diff --git "a/0eedd8efcf3fab683389f91e77a829502b5c698130c328f60b9e1bf314f4cc29/metadata.json" "b/0eedd8efcf3fab683389f91e77a829502b5c698130c328f60b9e1bf314f4cc29/metadata.json" new file mode 100644--- /dev/null +++ "b/0eedd8efcf3fab683389f91e77a829502b5c698130c328f60b9e1bf314f4cc29/metadata.json" @@ -0,0 +1,181 @@ +{ + "title": "Single-step retrosynthesis prediction via multitask graph representation learning", + "pre_title": "Retro-MTGR: Molecule Retrosynthesis Prediction via Multi-Task Graph Representation Learning", + "journal": "Nature Communications", + "published": "18 January 2025", + "supplementary_0": [ + { + "label": "Supplementary Information", + "link": "https://static-content.springer.com/esm/art%3A10.1038%2Fs41467-025-56062-y/MediaObjects/41467_2025_56062_MOESM1_ESM.pdf" + }, + { + "label": "Description Of Additional Supplementary File", + "link": "https://static-content.springer.com/esm/art%3A10.1038%2Fs41467-025-56062-y/MediaObjects/41467_2025_56062_MOESM2_ESM.pdf" + }, + { + "label": "Supplementary Data 1", + "link": "https://static-content.springer.com/esm/art%3A10.1038%2Fs41467-025-56062-y/MediaObjects/41467_2025_56062_MOESM3_ESM.xlsx" + }, + { + "label": "Supplementary Data 2", + "link": "https://static-content.springer.com/esm/art%3A10.1038%2Fs41467-025-56062-y/MediaObjects/41467_2025_56062_MOESM4_ESM.pptx" + }, + { + "label": "Supplementary Data 3", + "link": "https://static-content.springer.com/esm/art%3A10.1038%2Fs41467-025-56062-y/MediaObjects/41467_2025_56062_MOESM5_ESM.xls" + }, + { + "label": "Reporting Summary", + "link": "https://static-content.springer.com/esm/art%3A10.1038%2Fs41467-025-56062-y/MediaObjects/41467_2025_56062_MOESM6_ESM.pdf" + }, + { + "label": "Transparent Peer Review file", + "link": "https://static-content.springer.com/esm/art%3A10.1038%2Fs41467-025-56062-y/MediaObjects/41467_2025_56062_MOESM7_ESM.pdf" + } + ], + "supplementary_1": [ + { + "label": "Source Data", + "link": "https://static-content.springer.com/esm/art%3A10.1038%2Fs41467-025-56062-y/MediaObjects/41467_2025_56062_MOESM8_ESM.xlsx" + } + ], + "supplementary_2": NaN, + "source_data": [ + "/articles/s41467-025-56062-y#ref-CR36", + "/articles/s41467-025-56062-y#ref-CR16", + "https://doi.org/10.5281/zenodo.14346324", + "/articles/s41467-025-56062-y#ref-CR69", + "/articles/s41467-025-56062-y#Sec25" + ], + "code": [ + "https://github.com/zpczaizheli/Retro-MTGR", + "/articles/s41467-025-56062-y#ref-CR69", + "https://doi.org/10.5281/zenodo.14346324", + "/articles/s41467-025-56062-y#ref-CR69" + ], + "subject": [ + "Cheminformatics", + "Computational chemistry" + ], + "license": "http://creativecommons.org/licenses/by-nc-nd/4.0/", + "preprint_pdf": "https://www.researchsquare.com/article/rs-3205328/v1.pdf?c=1737292018000", + "research_square_link": "https://www.researchsquare.com//article/rs-3205328/v1", + "nature_pdf": "https://www.nature.com/articles/s41467-025-56062-y.pdf", + "preprint_posted": "05 Sep, 2023", + "research_square_content": [ + { + "section_name": "Abstract", + "section_text": "It is a vital bridging step to infer appropriate synthesis reaction routes (i.e., retrosynthesis) of newly-designed molecules. Unlike classical experience-based retrosynthesis approaches, artificial intelligence enables a cheap and fast retrosynthesis approach. Template-based models, limited in known synthesis templates, leverage substructure searching to infer candidate reaction centers (i.e., bonds). In contrast, both translation-based models (TransMs) and discriminative methods (DiscMs) are free to synthesis templates. TransM regards retrosynthesis as a translation from the target molecule to its reactants by generative algorithms. DiscM, directly inspired by chemical synthesis, performs reaction center recognition and leaving group identification in turn. Nevertheless, TransMs are redundant and weakly interpretable, while existing DiscMs neglect the associations between reaction centers and leaving groups. To address these issues, this paper elaborates a novel discriminative Multi-Task Graph Representation learning model of Retrosynthesis prediction (Retro-MTGR). It solves two major supervised discriminative tasks (i.e., the reaction center recognition and the leaving group identification respectively), and an auxiliary self-supervised task (i.e., atom embedding enhancer) simultaneously. The comparison with various state-of-the-art methods first demonstrates the superiority of Retro-MTGR. Then, the ablation studies reveal how its crucial components contribute to the prediction respectively, including the atom embedding enhancer, bond energies, and the leaving group co-occurrence graph. More importantly, comprehensive investigations validate its chemical interpretability by answering two questions: why a bond can be the reaction center or not, and what leaving groups are appropriate to given synthons. The answers demonstrate that Retro-MTGR can reflect five underlying chemical synthesis rules by characterizing molecule structures alone. Finally, two case studies demonstrate that the inferred retrosynthesis routes by Retro-MTGR are significantly consistent with those achieved by performed chemical synthesis assays. It\u2019s anticipated that our Retro-MTGR can provide prior guidance for real retrosynthesis route planning. The code and data underlying this article are freely available at https://github.com/zpczaizheli/Retro-MTGR.Biological sciences/Drug discovery/Medicinal chemistry/Computational chemistryBiological sciences/Drug discovery/Medicinal chemistry/Drug discovery and development", + "section_image": [] + }, + { + "section_name": "Additional Declarations", + "section_text": "There is NO Competing Interest.", + "section_image": [] + }, + { + "section_name": "Supplementary Files", + "section_text": "Data1.xlsxData 1Data2.xlsxData 2Data3.xlsxData 3Supplementary.pdfSupplementary", + "section_image": [] + } + ], + "nature_content": [ + { + "section_name": "Abstract", + "section_text": "Inferring appropriate synthesis reaction (i.e., retrosynthesis) routes for newly designed molecules is vital. Recently, computational methods have produced promising single-step retrosynthesis predictions. However, template-based methods are limited by the known synthesis templates; template-free methods are weakly interpretable; and semi template-based methods are deficient with regard to utilizing the associations between chemical entities. To address these issues, this paper leverages the intra-associations between synthons, the inter-associations between synthons and leaving groups (LGs), and the intra-associations between LGs. It develops a multitask graph representation learning model for single-step retrosynthesis prediction (Retro-MTGR) to solve reaction centre deduction and LG identification simultaneously. A comparison with 16 state-of-the-art methods first demonstrates the superiority of Retro-MTGR. Then, its robustness and scalability and the contributions of its crucial components are validated. More importantly, it can determine whether a bond can be a reaction centre and what LGs are appropriate for a given synthon, respectively. The answers reflect underlying chemical synthesis rules, especially opposite electrical properties between chemical entities (e.g., reaction sites, synthons, and LGs). Finally, case studies demonstrate that the retrosynthesis routes inferred by Retro-MTGR are promising for single-step synthesis reactions. The code and data of this study are freely available at https://doi.org/10.5281/zenodo.14346324.", + "section_image": [] + }, + { + "section_name": "Introduction", + "section_text": "In diverse modern drug design tasks (e.g., target screening1, molecule generation2, and ADMET prediction3), artificial intelligence (AI) technologies have achieved promising progress with significant cost and time reductions4,5. Once the chemical structure of a small molecule is determined in silico, retrosynthesis inference can determine the available reactants for synthesizing the target molecule6. This retrosynthesis process works as a bridge from in silico to in vitro settings. Compared to the synthesis reaction, retrosynthesis is an inverse inference process7,8. A complete retrosynthesis process is a tree-like route9, where the root node (the target molecule) is recursively decomposed into its descendant nodes (reactants) until reaching reactants that are commercially available. Each decomposition stage is called a single-step retrosynthesis process. However, even inferring a single-step retrosynthesis relies heavily on the individual domain experiences of chemists under costly trial-and-error assays10.\n\nIn recent years, both the accumulation of chemical synthesis data and the growth of deep learning methods have accelerated the rapid development of computer-assisted synthesis processes (CASPs) for single-step retrosynthesis. The existing single-step retrosynthesis methods can be roughly grouped into template-based, template-free, and semi template-based methods11. Template-based methods infer the single-step retrosynthesis process of a newly given molecule by finding appropriate reaction templates. These techniques can be categorized into similarity-based, classification-based, and embedding-based approaches. Similarity-based approaches directly leverage similarity metrics to match the target molecule to pre-collected reaction templates. For example, Coley et al. utilized the Tanimoto similarity on Morgan molecular fingerprints to search for suitable retrosynthesis templates12. Dai et al. proposed a conditional graphical model constructed upon graph learning networks (GLNs) and performed subgraph pattern matching to acquire candidate templates13. Classification-based approaches, which treat templates as class labels and target molecules as samples, train multiclass classifiers to determine candidate templates for a given target molecule. For example, Segler et al. trained a multiclass deep neural network with extended-connectivity fingerprints (ECFP4) to infer appropriate templates14. After organizing templates into reaction type-specific groups, Baylon et al. trained a set of reaction type-specific deep highway networks with Morgan fingerprints to match the correct templates in each template group15. Embedding-based approaches map both target molecules and templates into a common embedding space, where a good match is declared if a template is near a target molecule. For instance, Chen et al. designed an MPNN variant (LocalRetro) to encode templates and target molecules16. Seidl et al. proposed a modern Hopfield network (MHN) to measure the relevance between templates and the target molecule (initialized by ECFPs)17. However, template-based methods cannot predict the retrosynthesis results of target molecules with novel synthesis patterns that are outside the synthesis rules contained in the utilized template library. In addition, it is tedious to update template libraries when new synthesis knowledge is discovered18. In contrast, both template-free and semi template-based methods can predict single-step retrosynthesis results without a prebuilt template library.\n\nInspired by machine translation, template-free methods directly convert the input target molecule into its reactants in the form of strings. The simplified molecular input line entry system (SMILES) is a string notation strategy for representing molecules and reactions. It is a true chemical notation language with a simple vocabulary, including atom symbols, bond symbols, branches, cyclic structures, disconnected structures, and no space19. Each unit in the SMILES can be regarded as a word in a machine translation model. This paradigm has boosted the development of template-free methods. In earlier years, Liu et al. proposed an attention-enhanced long short-term memory (LSTM) model20 to convert the target molecule into its reactants under a sequence-to-sequence encoder-decoder architecture21. As new superstars in the natural language processing domain, transformers22 have been directly applied to retrosynthesis prediction in recent years since a SMILES string is equivalent to a chemical notation sentence23,24. However, these methods have led to new issues in which the generated reactants are probably invalid in terms of chemistry. More efforts have been made to address this issue. Zheng et al. designed an extra syntax postchecker based on a transformer to satisfy the chemical validity requirements of generated reactants25. Ucak et al. treated molecular substructures (capturing their local atomic environments) as words in a transformer to guarantee the validity of the generated reactants26. Similarly, Fang et al. defined a vocabulary of atom-localized substructures to convert a SMILES string into a word sequence under the transformer architecture27. However, these methods using SMILES representations cannot effectively capture the rich information hidden in molecular chemical structures (e.g., atomic properties, bond features, and adjacent structure matrices28). The subsequently developed methods integrate structural information into transformer-based retrosynthesis prediction frameworks. Mao et al. proposed a graph attention network (GAT) to encode atoms on molecular graphs, enhancing the atom embeddings in transformers29. Wan et al. integrated the adjacent matrix of a molecular graph into the calculating self-attention values. Lin et al. enhanced a self-attention module via node centrality and node position encodings derived from bipartite molecular graphs30. Tu et al. utilized a directed message passing neural network (D-MPNN) on a molecular graph to encode atoms and directly used the decoder of a transformer to generate reactants31. However, since the process of generating SMILES strings in template-free approaches sequentially outputs individual symbols, the interpretability of the resulting predictions is limited32.\n\nInspired by the retrosynthesis inference process employed by chemists, semi template-based methods perform two-phase retrosynthesis prediction tasks, including reaction centre prediction and leaving group (LG) prediction. The former subtask finds the reaction centre (i.e., a bond consisting of two reaction sites) where the molecule of interest is split into two synthons (i.e., incomplete reactants)33. The latter infers appropriate functional groups (i.e., LGs) that attach to two synthons to form reactants. Analogous to template-free methods, some semi template-based methods still adopt translation models to perform two-phase retrosynthesis prediction. For example, Wang et al. formulated the reaction centre prediction and LG prediction tasks as two sequence-to-sequence problems (i.e., target molecules to synthons and synthons to reactants, respectively), which were solved by two independent transformers respectively34. To characterize the rich information possessed by molecular structures, some methods treat bonds as samples and utilize graph neural networks to achieve an improved reaction centre identification effect via bond classification. For instance, Shi et al. utilized relational graph convolutional networks (R-GCNs) to recognize reaction centres and converted completed synthons into reactants via a variational graph translation model35. Yan et al. applied a GAT variant to predict reaction centres and a sequence-based transformer to convert synthons into reactants36. To obtain improved reaction centre predictions with richer chemical meanings, graph editing operations (e.g., node/edge additions and deletions) were leveraged in the follow-up methods. Somnath et al. constructed MPNNs to encode atoms/bonds into embedding spaces, where predefined graph editing operations implemented on atoms/bonds were discriminated to determine the reaction centre, and candidate LGs autoregressively picked from a discrete LG vocabulary were subsequently utilized for determining the corresponding synthons37. Similarly, Chen et al. designed a graph message passing network (GMPN) to predict graph editing operations on product molecular graphs to obtain synthons38. Furthermore, they found appropriate LGs from a predefined LG list in an autoregressive manner38. Moreover, considering the connection between reaction centre prediction and LG prediction, some methods have integrated these tasks into a unified autoregressive reasoning process under graph editing operations. For example, by adopting the encoder-decoder architecture in a transformer22, Sacha et al. iteratively started multiple graph editing operations from the target molecule until a STOP operation was encountered (i.e., until its reactants were reached)39. Zhong et al. adopted a similar architecture but employed more detailed graph editing operations32. However, the existing methods are deficient in terms of utilizing the occurrence tendencies of LGs w.r.t. synthons and their co-occurring associations.\n\nWe consider three underlying associations between chemical entities in a chemical synthesis reaction, such as a broad-sense coupling reaction \u2018\\({{{\\rm{R}}}}_{1}{{\\rm{C}}}\\left(={{\\rm{O}}}\\right){{\\rm{OH}}}+{{{\\rm{R}}}}_{2}{{\\rm{COH}}}\\to {{{\\rm{R}}}}_{1}{{\\rm{C}}}\\left(={{\\rm{O}}}\\right){{\\rm{OC}}}{{{\\rm{R}}}}_{2}+{{{\\rm{H}}}}_{2}{{\\rm{O}}}\\)\u2019. Broad-sense coupling reactions are dominant in multi-step retrosynthesis40. The above case generates one product molecule from two reactant molecules, except for H2O. The first association accounts for the two synthons (i.e., \\({{{\\rm{R}}}}_{1}{{\\rm{C}}}\\left(={{\\rm{O}}}\\right)\\) and \\({{{\\rm{R}}}}_{2}{{\\rm{CO}}}\\)) forming the product (i.e., \\({{{\\rm{R}}}}_{1}{{\\rm{C}}}\\left(={{\\rm{O}}}\\right){{\\rm{OC}}}{{{\\rm{R}}}}_{2}\\)). The second is the association between the two LGs (i.e., \\({{\\rm{OH}}}\\) and \\({{\\rm{H}}}\\)) that form the side product (i.e., \\({{{\\rm{H}}}}_{2}{{\\rm{O}}}\\)). The third is the association between a synthon and its LG, which are dissociated from the corresponding reactant, such as the pair of \\({{{\\rm{R}}}}_{1}{{\\rm{C}}}\\left(={{\\rm{O}}}\\right)\\) and \\({{\\rm{OH}}}\\) forming \\({{{\\rm{R}}}}_{1}{{\\rm{C}}}\\left(={{\\rm{O}}}\\right){{\\rm{OH}}}\\). Moreover, as stated by Ayers41, an excellent LG tends to be structurally simple (even monatomic) and to exhibit strong electron affinity and low bond dissociation energies (e.g., H, Cl, Br, and OH). We believe that structurally simple LGs are dominant w.r.t. the occurrence frequencies observed across molecules. In addition, we believe that bonds with strong energies tend to be ordinary bonds rather than reaction centres since it is difficult to synthesize high-energy bonds. Appropriately utilizing these associations would improve the interpretability of retrosynthesis prediction.\n\nFollowing the abovementioned consideration in reactions, this work, treating products, reactants, and synthons as molecule graphs to obtain atom embeddings, aims to measure the associations among synthons and LGs in a chemical embedding space. To achieve this goal, it develops a Multi-Task Graph Representation learning framework for Retrosynthesis prediction (Retro-MTGR), which performs two tasks (i.e., reaction centre recognition and leaving group identification) in retrosynthesis (shown in Fig.\u00a01). Its fundamental ideas are listed in the following:\n\nMolecules in the form of graphs are first represented by an MPNN-based atom encoder to learn initial atom embeddings. The AEE further boosts the atom embeddings by performing contrastive learning on the molecules and their synthons w.r.t. the associated molecule embeddings. The RCP implements a bond-level readout on the enhanced atom embeddings to learn bond embeddings, which are sequentially augmented by extra bond energies, and then recognize the reaction centres among the bonds. Afterwards, the LGP learns LG embeddings based on an LG co-occurrence graph and measures the proximity between them and the synthon embeddings (involving the atoms and bonds in the reaction centres) to predict appropriate LGs for the given synthons.\n\nTo make reaction sites (i.e., atoms in reaction centres) significant, Retro-MTGR elaborates an atom embedding enhancer (AEE), which captures the structural commonness and the differences between a product and the combination of its two synthons by graph-contrastive learning (GCL)42.\n\nTo highlight the difference between ordinary bonds and reaction centres, it constructs a reaction centre predictor (RCP), which generates bond embeddings augmented by easily accessed chemical knowledge (i.e., theoretical bond energies43).\n\nTo reveal the intra-associations/inter-associations between chemical entities (synthons and LGs) in reactions, the framework designs a leaving group predictor (LGP), which characterizes synthons by the embeddings of reaction sites (atoms) and reaction centres (bonds), represents LGs through embedding an LG co-occurrence network, and constructs a joint embedding space of synthons and LGs to measure their proximity.\n\nBased on the clustering over the learned bond embeddings, the bond embedding space illustrates why a bond can be the reaction centre in synthesis reactions (based on both USPTO-50K and USPTO-480K). The observations include (1) double bonds (C\u2009=\u2009C, C\u2009=\u2009O, and C\u2009=\u2009N), triple bonds (C#C and C#N), and aromatic bonds (c\u2009~\u2009c and c\u2009~\u2009n) having high energy (>=360\u2009kJ/mol) are ordinary bonds and belong to unique communities; (2) single bonds (i.e., C-C, C-c, c-c C\u2019-C\u2019, C\u2019-O\u2019, c-O, C\u2019-N\u2019, C-S, C-N and C-O) having low bond energies (<360\u2009kJ/mol) fall into one community accounting for reaction centres and others accounting for ordinary bonds due to the diverse molecular structure topologies of their corresponding synthons44; (3) the electrical property distributions of the atom pairs contained in bonds demonstrate that a bond is the coupling reaction centre in a molecule if its member atoms tend to have opposite electrical properties (reflected by local substructures)45; otherwise, it is an ordinary bond.\n\nThe joint embedding space of chemical entities in reaction illustrates the distributions and the intra-associations/inter-associations of synthons and LGs. Four findings are concluded: (1) two synthons in a reaction always have opposite Eps (electrical properties) and are distant from each other; (2) a synthon and an LG belonging to a reactant usually have opposite EPs and are close to each other; (3) two LGs co-occurring in a reaction regularly have opposite EPs and are distant from each other; (4) in addition, reaction-common LGs tend to be spread around and are occurrence-dominant, monatomic or structurally simple (e.g., H, CL, Br, I, and OH), while reaction-specific LGs tend to gather w.r.t. their reaction types and are similar to each other in terms of structures.", + "section_image": [ + "https:////media.springernature.com/lw685/springer-static/image/art%3A10.1038%2Fs41467-025-56062-y/MediaObjects/41467_2025_56062_Fig1_HTML.png" + ] + }, + { + "section_name": "Results", + "section_text": "As commonly done in the existing methods26, we collect our benchmark dataset from the USPTO-50K dataset, which was derived from an open-source patent database containing 50,016 atom-mapped reactions46. The USPTO-50K dataset contains 10 types of reactions, including six kinds of broad-sense coupling reactions and four types of other reactions. By forming the molecule scaffold, broad-sense coupling reactions are the majority in the multi-step synthesis of a product molecule47. For example, as we counted, they account for >80% of 1771 reactions in the multi-step retrosynthetic routes provided in the study of Kevin40. In contrast, deprotections, protections, reductions, and oxidations modify atoms in branches or small functional groups to increase yield rate and raw material availability48,49,50. Thus, in the field of retrosynthesis inference, our Retro-MTGR pays attention to the retrosynthesis of broad-sense coupling reactions, which are involved in the disconnection of the molecular scaffold. After discarding deprotection, protection, reduction, and oxidation reactions, our dataset contains 35,682 reaction entries of broad-sense coupling reactions, which are further divided into 6 categories according to their reaction types (Supplementary Table\u00a01). Furthermore, we adopt the same training-validation-testing (TVTS) split as that used in the research of Coley et al.12; the reactions are decomposed into a training set, a validation set, and a testing set at an 8:1:1 ratio.\n\nIn the atom encoder, as suggested by the existing methods3, each atom is initially represented by a 28-dimensional (28-d) atom feature vector \\(({{{\\bf{x}}}}_{i}^{0}\\in {R}^{28})\\), including the atom type (23-d), the number of hydrogens (1-d), the number of linking neighbours (degree, 1-d), whether the atom is aromatic (1-d), its formal charge (1-d), and its atomic mass (1-d). See Supplementary Table\u00a02 for details.\n\nDue to the one-hot f atom type coding process, the initial atom representation \\({{{\\bf{x}}}}_{i}^{0}\\) is semisparse. To learn better embeddings, as suggested by Cheng et al.51, an extra three-layer MLP maps the atom to a dense form (\\({{{\\bf{x}}}}_{i}\\in {R}^{32}\\)). We empirically set 64 and 32 neurons in the hidden and output layers, respectively.\n\nMoreover, bonds are represented as a binary adjacent matrix \\({{\\bf{E}}}\\), in which \\({e}_{{ij}}=1\\) indicates the occurrence of a bond between two atoms (\\({a}_{i}\\) and \\({a}_{j}\\)); otherwise, no bond occurs. Both \\(\\left\\{{{{\\bf{x}}}}_{i}\\right\\}\\) and \\({{\\bf{E}}}\\) are input into a multilayer MPNN to obtain atom embeddings with the same dimensions as those of \\(\\left\\{{{{\\bf{x}}}}_{k}\\right\\}\\). In particular, we investigate how the number of MPNN layers (#layers = 1, 2, 4, 6) influences the performance of Retro-MTGR under the selected TVTS split. As shown in Supplementary Fig.\u00a01, the investigation (measured by top-1, top-3, and top-5 accuracy) reveals that the case with two layers yields the best performance in both the reaction-type-unknown (RTU) and reaction-type-known (RTK) scenarios.\n\nIn the RCP module, the MLP accounting for reaction centre identification also contains an input layer, a hidden layer, and an output layer. The input layer includes 33 neurons, where 32 neurons are responsible for the resulting embeddings derived from the bond-level readout, and the last neuron is responsible for the bond energy. The number of neurons is empirically set to 16. The unique neuron in the output layer accounts for the confidence score of a potential reaction centre.\n\nIn the LGP module, the nodes in the LGCoG are initially represented as \\(n\\)-dimensional one-hot coding vectors \\(\\left\\{{{{\\bf{k}}}}_{i}^{0}\\in {R}^{\\left|K\\right|}\\right\\}\\), where \\(n=\\left|K\\right|\\) is the cardinality of the LG set. Then, they are mapped to LG embeddings \\(\\left\\{{{{\\bf{k}}}}_{i}\\in {R}^{\\left|K\\right|}\\right\\}\\) by another two-layer MPNN without undergoing dimensional changes. Additionally, the adaptor maps the synthon embedding space (the concatenation of 32-d atom embeddings and 33-d bond embeddings) to the LG embedding space. This mechanism is implemented by a three-layer MLP, which contains an input layer accounting for synthon embeddings (\\({{{\\bf{s}}}}_{u}\\in {R}^{65}\\)), a hidden layer empirically possessing 128 neurons, and an output layer with \\(n\\) neurons. Thus, \\({{{\\bf{s}}}}_{u}^{*}\\,\\)=\u2009\\({{\\rm{M}}}{{\\rm{LP}}}\\left({{{\\bf{s}}}}_{u}\\right)\\in {R}^{n}\\), where \\(n\\) (i.e., \\(\\left|K\\right|\\)) is scenario-specific since the type-known scenario and the type-unknown scenario have different types of LGs.\n\nFinally, to investigate how the task weights influence the prediction results, we perform multiple rounds of parameter tuning in the RTU scenario. All the prediction performances attained under these combinations are shown in Supplementary Fig.\u00a02, where the combination accounting for the best prediction performance is highlighted (i.e., \\({w}_{1}\\)\u2009=\u20090.6, \\({w}_{2}\\)\u2009=\u20090.2, and \\({w}_{3}\\)\u2009=\u20090.2).\n\nTo evaluate the effectiveness of Retro-MTGR, we compare it with 16 state-of-the-art single-step retrosynthesis methods, including 4 template-based methods (MHN17, LocalRetro16, a GLN13, and RetroSim12), 6 template-free methods (G2GT30, Graph2SMILES31, RetroTRAE26, Retroformer28, SCROP25, and Seq2Seq20), and 6 semi template-based methods (G2Retro38, Graph2Edits32, R-SMILES52, RetroPrime34, MEGAN39, and G2Gs35). Since the employed dataset is slightly different from the datasets used in the original studies, we rerun these models by tuning their hyperparameters to conduct a fair comparison. During the tuning, we fix their neural network architectures. However, we tune the regular hyperparameters, such as the number of attention heads in the transformer framework, the dimensions of the GNN embedding layers, and the maximum number of iterations. The detailed tuning processes of these methods are provided in Supplementary Table\u00a03. In addition, two retrosynthesis scenarios, namely, RTU and RTK cases, are considered when testing the performance of these methods. In the RTU case, we have no information about the potential reaction types. In the RTK case, we must perform retrosynthesis for a molecule after being given its possible reaction type.\n\nThe results of all the methods validate that the RTU task is more difficult than RTK task since the extra type information contained in the RTK case helps the prediction process (Table\u00a01). The comparison demonstrates that our Retro-MTGR produces excellent predictions. Specifically, Retro-MTGR achieves accuracies of 54.3%, 76.7%, and 90.1% in the RTU case but achieves accuracies of 72.2%, 88.2%, and 92.8% in the RTK case in terms of the top-1, top-3 and top-5 metrics, respectively. Retro-MTGR attains the best top-5 RTU performance, the best top-1 and top-5 RTK performance, and the second-best performance over the remaining cases. A set of paired-sample t-tests demonstrates that Retro-MTGR performs similarly to R-SMILES but is superior to the 15 other state-of-the-art methods across the two retrosynthesis scenarios (i.e., p-value\u2009<\u20090.05).\n\nAdditionally, we employed paired-sample t-tests to individually investigate the differences between our reported results and those provided in the original papers. Detailed results can be found in Supplementary Table\u00a04. The results indicate that almost all methods exhibit no significant differences (p-value>\u20090.05). Although significant differences are observed for the Seq2Seq data, our reported results are better than those presented in the original papers. Thus, our reported results for the state-of-the-art methods are reliable after tuning their parameters.\n\nIn addition, we investigate the scalability of Retro-MTGR by running it on a more extensive and diverse dataset, USPTO-480K (i.e., USPTO-MIT)13. We compare it with R-SMILES52 in the RTU scenario. Specifically, R-SMILES achieves values of 60.3%, 78.2%, and 83.2%, while our Retro-MTGR approach achieves 61.3%, 80.3%, and 88.2% values in terms of the top-1, top-3, and top-5 accuracy metrics, respectively. Note: With the aid of Indigo Toolkit, we excluded the reaction entries belonging to oxidation, reduction, protection, or deprotection from USPTO-480k.\n\nIn this section, we investigate how the crucial components of our Retro-MTGR method contribute to the retrosynthesis prediction process through ablation studies. We construct 7 variants of our original model by masking one block of Retro-MTGR each time. They are briefly depicted as follows. First, we create two variants to assess how the multitask learning framework influences the performance of Retro-MTGR. The first variant, denoted as w/o MTL, trains the RCP and the LGP in separate tasks, while the other variant, denoted as w/o AEE, removes the AEE module. Subsequently, we evaluate how bond properties, including bond energies and bond types, affect the performance of Retro-MTGR. Two variants, denoted as w/o BE and w/o BT, remove these properties from the bond embeddings. More importantly, we discard the LGCoG to investigate how well it contributes to the performance of Retro-MTGR (denoted as w/o LGCoG). Finally, we form two additional variants to assess how well two calculation tricks affect the performance of Retro-MTGR. The first variant (denoted as w/o MLP) removes the MLP accounting for semisparse atom representations, while the second variant (denoted as w/o Norm) deletes the normalization operation imposed on LG co-occurrences.\n\nAll these variants are run under the TVTS split in the unknown and known reaction type scenarios (Table\u00a02). The significance of their differences from Retro-MTGR is measured by the p-values obtained under paired-sample t-tests. Overall, the superiority of Retro-MTGR to all its variants demonstrates that all these blocks play significant roles (p-values\u2009<\u20090.05) in the retrosynthesis prediction process.\n\nNotably, the multitask learning framework plays the most crucial role in Retro-MTGR. Specifically, Retro-MTGR with the joint learning method involving the RCP and LGP improves the top-1, top-3, and top-5 accuracies by 7.5%, 10.1%, and 17.8%, respectively, when the reaction types are unknown and by 10.1%, 10.8%, and 11.2%, respectively, when the reaction types are known. The improvements reveal that joint learning captures the tendency of LGs to undergo bond synthesis.\n\nThe LGCoG plays the second-most crucial role in Retro-MTGR. Specifically, Retro-MTGR with the LGCoG improves the top-1, top-3, and top-5 accuracies by 6.0%, 5.4%, and 13.3%, respectively, when the reaction types are unknown and by 8.7%, 5.1%, and 3.8%, respectively, when the reaction types are known. The essential reason for such improvements is that the embedding process of the LGCoG captures both the individual occurrence tendencies and co-occurrence associations of LGs.\n\nIn addition, the AEE, the third-most crucial component of Retro-MTGR, improves the top-k accuracies by 4.8%, 3.0%, and 7.6%, respectively, in the case with unknown reaction types and by 7.9%, 3.0%, and 3.7%, respectively, in the case with known reaction types. The results also show that GCL boosts atom embeddings.\n\nFurthermore, the comparison shows that bond properties improve the performance of Retro-MTGR. Bond energy, even when working as a single dimension, particularly provides a nontrivial contribution to bond embeddings since bonds with strong energies tend to be ordinary bonds. A detailed analysis can be found in Supplementary Fig.\u00a03.\n\nIn addition, the technical enhancement achieved by both the MLP and the normalization operation demonstrates that the two numeric tricks (i.e., the utilization of dense atom representations and the elimination of the large absolute variance of co-occurrences) enable a better learning effect.\n\nFinally, we perform additional ablation studies to explore how bond energy affects the first main task (reaction centre prediction), how the LGCoG affects the second task (LG prediction), and how the AEE affects both main tasks separately. The investigation significantly demonstrates that bond energy helps the method recognize reaction centres with a\u2009>\u20093% improvement in terms of the top-1 accuracy metric (Supplementary Table\u00a05). As expected, the LGCoG significantly boosts the LG prediction effect of the model, with a\u2009>\u200910% improvement in terms of the top-1 accuracy (Supplementary Table\u00a06). The AEE enhances both tasks with >4% improvements in terms of the top-1 accuracy metric. The significance of the improvements achieved by these modules is measured by the p-values obtained from paired-sample t-tests.\n\nIn general, all these variants play indispensable roles in retrosynthesis prediction. More detailed investigations indicate why the important modules work (conducted in the section Retrosynthesis Rule Discovery).\n\nTaking an esterification reaction (i.e., broad-sense coupling reaction) as an example, we investigate the process of a synthesis reaction (shown in Fig.\u00a02). For simplicity, we denote the electron-withdrawing property as a positive electrical property (+) and the electron-donating property as a negative electrical property (\u2212). Three cases of pairwise chemical entities (e.g., synthons and LGs) with opposite electrical properties (EPs) can be observed. The first case includes the pair of synthons that form the product (i.e., \\({{{\\rm{R}}}}_{1}{{\\rm{C}}}\\left(={{\\rm{O}}}\\right){{\\rm{OC}}}{{{\\rm{R}}}}_{2}\\)). The second case involves the pair of LGs that form the side product (i.e., \\({{{\\rm{H}}}}_{2}{{\\rm{O}}}\\)). The third case contains pairs consisting of synthons and their LGs, which are dissociated from a reactant, such as the pair including \\({{{{\\rm{R}}}}_{1}{{\\rm{C}}}}^{+}\\left(={{\\rm{O}}}\\right)\\) and \\({{{\\rm{OH}}}}^{-}\\) and that including \\({{{{\\rm{R}}}}_{2}{{\\rm{CO}}}}^{-}\\) and \\({{{\\rm{H}}}}^{+}\\). These cases reveal the underlying EP associations between chemical entities.\n\nThe detachment of LGs (i.e., \\({{{\\rm{OH}}}}^{-}\\) and \\({{{\\rm{H}}}}^{+}\\)) from two reactants results in corresponding synthons. Then, two synthons (i.e.,\\({{{{\\rm{R}}}}_{1}{{\\rm{C}}}}^{+}\\left(={{\\rm{O}}}\\right)\\) and \\({{{{\\rm{R}}}}_{2}{{\\rm{CO}}}}^{-}\\)) attract each other to form the target molecule (i.e., \\({{{\\rm{R}}}}_{1}{{\\rm{C}}}\\left(={{\\rm{O}}}\\right){{\\rm{OC}}}{{{\\rm{R}}}}_{2}\\)) according to Coulomb\u2019s law. Furthermore, the reaction illustrates that the LGs (i.e., \\({{{\\rm{OH}}}}^{-}\\) and \\({{{\\rm{H}}}}^{+}\\)) form a side product molecule (i.e., \\({{{\\rm{H}}}}_{2}{{\\rm{O}}}\\)) since they also exhibit opposite EPs when the target molecule is forming.\n\nThese observations inspire us to uncover underlying retrosynthesis rules via Retro-MTGR. Specifically, we reveal why a bond can be the reaction centre via the bond embedding space in Section Bond View. Meanwhile, we explore which LGs are appropriate for generating synthons via the joint embedding space of the LGs and synthons in Section 'Joint View of Synthons and Leaving Groups'.\n\nTo interpret why a bond can be the reaction centre, we consider the following three bond-derived questions.\n\nCan bond energies alone determine the reaction centre in a molecule?\n\nFor a chemical bond, its bond energy (i.e., the minimum energy required to break it down) measures its stability44. The greater the bond energy is, the more stable the bond is, and the more difficult the bond is to synthesize from the point of view of chemical retrosynthesis. Thus, we assume that the reaction centre is a low-energy bond.\n\nTo validate this hypothesis, we first generate a statistical distribution of the bond energies across all molecular bonds in a histogram (Supplementary Fig.\u00a03). Note that the bond energy used here refers to the theoretical bond energy of the target chemical bond, which is used for two reasons. First, it is challenging to measure real bond energies (e.g., \\({\\triangle H}_{298}^{o}\\) in the gas phase43). Additionally, the bond energy of a bond varies due to the influences of its neighbouring bonds or near atoms in diverse molecular conformations44. Thus, we consider only the theoretical breaking energy of each chemical bond. We collect the theoretical values of the bond energies from the textbook \u2018Handbook of chemistry and physics\u201945. The list of theoretical bond energies can be found in Supplementary Data\u00a01. The bonds are sorted into 20 equally spaced bins along the bond energy axis between the minimum and maximum energy values (kJ/mol). Due to the difference between the numbers of reaction centres and ordinary bonds, the heights of the bins (i.e., the number of bonds falling in the bins) are normalized by the total number of bonds to conduct a convenient comparison.\n\nAs illustrated in Supplementary Fig.\u00a03, both the USPTO-50K and USPTO-480K datasets possess similar distributions, and no significant difference is found (p-value\u2009=\u20090.999 under a paired-sample t-test). The underlying reason for this finding is that USPTO-50K guarantees the representativeness of the most common reaction types used in medicinal chemistry43. Overall, most bonds fall into four bins: [270,315], [315, 360], [450-495], and [495-540]. Specifically, the bond energies of reaction centres are usually located in the lower bond energy range (i.e., 95.26% of reaction centres have bond energies <360\u2009kJ/mol). In contrast, 45.33% of the ordinary bonds have bond energies <360\u2009kJ/mol, and 54.65% have bond energies \u2265360\u2009kJ/mol. Thus, a na\u00efve decision can be made: bonds with bond energies >360\u2009kJ/mol are usually ordinary bonds.\n\nSuch a finding can be used to filter out ordinary bonds with large breaking energies during the reaction centre identification process. This is why bond energies significantly contribute to the identification of reaction centres. However, bond energies cannot determine the reaction centre in a molecule alone since ordinary bonds (45.33%) still overlap with the reaction centres in cases with low breaking energies (<360\u2009kJ/mol). Based on the bond embedding space, this issue can be further investigated by answering the second question.\n\nWhat is the underlying chemical rule captured by bond embeddings such that reaction centres can be distinguished from ordinary bonds?\n\nOne of the core contributions of our model (Retro-MTGR) is the discrimination of reaction centres from ordinary bonds in cases with low bond energy. Since bond embedding representations (Formula 4) characterize bond features based on molecular graph topologies, we utilize them to determine the difference between reaction centres and ordinary bonds. Principal component analysis (PCA) is used to visualize bonds in 2-dimensional space, where each point represents a bond.\n\nSuch a bond space is rendered (Fig.\u00a03). Figure\u00a03A clearly separates the reaction centres (red points) and ordinary bonds (blue points), except for a small overlapping area. This separation result demonstrates that our model can effectively characterize the difference between reaction centres and ordinary bonds. More importantly, both reaction centres and ordinary bonds can be split into communities, which are strongly specific to bond types. Figure\u00a03B indicates that bond communities are consistent with their bond types. We find that 7 bonds of the same type, namely, C#N (triple bond with a theoretical bond energy of 891\u2009kJ/mol), C#C (triple bond, 837\u2009kJ/mol), c\u2009~\u2009c (aromatic bond, 533\u2009kJ/mol), C\u2009=\u2009O (double bond, 728\u2009kJ/mol), C\u2009=\u2009N (double bond, 615\u2009kJ/mol), c\u2009~\u2009n (aromatic bond, 483\u2009kJ/mol), and C\u2009=\u2009C (double bond, 611\u2009kJ/mol), from unique communities. Commonly, they are ordinary bonds with high bond energies (\u2009>\u2009480\u2009K\u2009kJ/mol).\n\nIn contrast, single bonds with lower bond energies (<360\u2009kJ/mol) usually fall into two communities, such as C-C, C-c, c-c C\u2019-C\u2019, C\u2019-O\u2019, c-O, C\u2019-N\u2019, and C-S. One of the communities is labelled the reaction centre, and the other is labelled an ordinary bond. As shown in Fig.\u00a03C, a C-C bond is the reaction centre (shown by the brown dashed line) if one of the carbon atoms connects with an oxygen atom via double bonds. Otherwise, it is just an ordinary bond (shown as a solid brown line). Significantly, two types of single bonds (i.e., C-N and C-O) fall into >2 communities. For example, C-O bonds are split into 4 groups, of which 3 groups are annotated as reaction centres, and the remaining group is annotated as an ordinary bond group. As in previous cases, the connection of the carbon atom in a C-O bond with diverse atoms (e.g., oxygen or Cl) is the key to being a reaction centre. Again, the connection of the oxygen atom in a C-O bond with a sulphone group pushes the C-O bond to act as a reaction centre. In contrast, a C-O bond is usually an ordinary bond if its carbon and oxygen atoms have no extra connections beyond alkyl groups or hydrogen atoms (Fig.\u00a03D).\n\nIn brief, as illustrated in the bond embedding space, bonds (e.g., double bonds, triple bonds, and aromatic bonds) with high energy (\u2265360\u2009kJ/mol) are always ordinary bonds and gather in their communities. Moreover, most single bonds with lower bond energies can be reaction centres or ordinary bonds, which are grouped into two or more communities. The local structures around the atoms in bonds determine whether a bond is a reaction centre. In particular, compared with other single bonds, both C-N and C-O bonds are involved in more diverse molecular structures, resulting in >2 reaction centre communities. The details can be found in the Supplementary Data\u00a02.\n\nWhy can a bond be the reaction centre in one molecule but not in another molecule?\n\nA Reaction centres and ordinary bonds. Red dots indicate reaction centres, while blue dots indicate ordinary bonds. B Bond types. Different colours represent different bond types. C C-C bond communities. The left reaction template indicates an ordinary C-C bond while the right shows a reaction centre. D C-O bond communities. The left three reaction templates indicate C-C reaction centres while the right shows an ordinary C-O bond. Regarding the chemical symbols in (B), the symbol C stands for a carbon atom, c denotes a carbon atom in an aromatic bond, and C\u2019 represents a carbon atom in a general ring. Moreover, N stands for a nitrogen atom, n signifies a nitrogen atom in an aromatic bond, and N\u2019 represents a nitrogen atom in a general ring. In addition, O denotes an oxygen atom, O\u2019 represents an oxygen atom in a general ring, and S signifies a sulphur atom. Four specific symbols, including \u2018~\u2019, \u2018-\u2019, \u2018=\u2019, and \u2018#\u2019, denote aromatic, single, double, and triple bonds, respectively. In (C) and (D), \u2018R1\u2019 and \u2018R1\u2019 denote alkyl groups. All atom individuals are numbered while hydrogen atoms are omitted. Reaction centres and ordinary bonds are highlighted by brown dashed lines and brown solid lines respectively. Source data are provided as a Source Data file.\n\nConsidering that bond energy cannot determine the reaction centre in a molecule alone, our model (Retro-MTGR) leverages molecular graph topologies to capture the differences between reaction centres and ordinary bonds, even those with the same bond types. We investigate what hidden chemical rule is captured by atom/bond embeddings. It is anticipated that this inherent law will help identify reaction centres and ordinary bonds, especially in cases with both common bond types and similar bond energies.\n\nOur investigation is inspired by the chemical knowledge that the electrical property (denoted as p) of an atom in a molecule is determined by the conjugation effect of the motion of its electrons as well as the union of its spatially neighbouring atoms. When exhibiting an attractive impact on electrons, the examined atom is considered an electron-withdrawing atom. Otherwise, it is called an electron-donating atom. Since it is challenging to quantify the EPs of atoms due to complicated interatom influences, we first propose a qualitative method for determining the strengths and weaknesses of these materials based on expert knowledge. Then, when enumerating atom-centred substructures, we identify 356 substructures, which are categorized into four groups in terms of their EP strengths. Specifically, the atoms showing exhibiting electron-withdrawing/donating properties are labelled p++ and p\u2212\u2212, respectively. Furthermore, the atoms exhibiting weak electron-withdrawing/donating properties are denoted as p+ and p\u2212, respectively. As a result, the atom pairs forming bonds show ten possible pairs of EPs (e.g., (p++ | p\u2212\u2212) and (p+| p\u2212)) in total.\n\nFirst, we count the percentages of all types of EP pairs in the case involving both reaction centres and ordinary bonds (Supplementary Fig.\u00a04). This observation illustrates that the atom pairs that form reaction centres have opposite dominant EPs (97.0% with \u2018p+| p\u2212\u2019, \u2018p+|p\u2212\u2212\u2019, \u2018p++ | p\u2212\u2019, and \u2018p++ |p\u2212\u2212\u2019 pairs), while those pairs that form ordinary bonds have the same or similar EPs (80.1%). Some ordinary bonds with opposite EPs are also reaction centres in the deeper steps of the retrosynthesis process. See also Section Case Study.\n\nTo validate whether our bond embeddings can capture the underlying rule, we form an EP distribution of the atom pairs in bonds w.r.t. the bond embeddings of the training dataset. Specifically, the bond embeddings are first mapped to one-dimensional values by an MLP. Then, a histogram EP plot is drawn (Fig.\u00a04), where the horizontal axis indicates the values of the 1-D bond embeddings and the vertical axis indicates the proportion of bonds falling into specific bins of bond embedding values. Moreover, we illustrate a set of histograms produced w.r.t. pairwise EP patterns, where four patterns account for reaction centres and six patterns account for ordinary bonds (Supplementary Fig.\u00a05). In conclusion, all these histograms show that bonds with opposite EPs tend to have larger bond embedding values, and vice versa. Therefore, our bond embeddings reflect the underlying EPs.\n\nThe total number of atom pairs is 274,389, where the number of atom pairs having opposite electrical properties is 78,666 and that of atom pairs having same or similar electrical properties is 195,723. The horizontal axis indicates the values of 1-D bond embeddings, while the vertical axis indicates the proportions of bonds falling into specific bins of bond embedding values. Source data are provided as a Source Data file.\n\nFinally, from the viewpoint of EPs, we review the cases with low bond energy from the previous section (Fig.\u00a03C). When a C-C bond is a reaction centre, except for alkyl groups (R-groups) or hydrogen atoms, its carbon atom connects with an extra oxygen atom, which causes two carbons in the bond to have opposite EPs. Specifically, the left panel shows that both the electronegativity of the oxygen atom and its conjugated double bond with the No. 2 carbon atom cause the electron-withdrawing property of the No. 2 carbon atom. Moreover, the R2 group results in the electron-donating property of the No. 3 carbon atom. Thus, the No. 2 and No. 3 carbon atoms have opposite EPs. In contrast, in the right panel, since the No. 1 and No. 2 carbon atoms have similar EPs, the C-C bond is an ordinary bond. A similar reason explains why C-O bonds are reaction centres or ordinary bonds. In addition to oxygen atoms, halogen atoms connected to carbon atoms and sulphone groups linked to oxygen atoms in C-O bonds cause opposite EPs between the carbon (electron-donating) and oxygen (electron-withdrawing) atoms in C-O bonds.\n\nIn brief, the structural difference between the neighbouring bonds of an atom determines its electrical properties. Atom pairs in reaction centres tend to have opposite EPs, while those in ordinary centres tend to have the same or similar EPs.\n\nThus far, we have leveraged bond embeddings generated by Retro-MTGR to determine why a bond can be a reaction centre by embedding molecule topologies. Bonds with high bond energies are non-single bonds (e.g., double bonds, triple bonds, and aromatic bonds), which are always ordinary bonds. Bonds with lower bond energies are single bonds, which can be reaction centres in some molecules and ordinary bonds in others. A bond is a reaction centre if its member atoms have opposite EPs and is an ordinary bond otherwise. The EP of an atom depends on its neighbouring bonds.\n\nA 3D embedding space. B PC1-PC2 plane. C PC2-PC3 plane. D PC1-PC3 plane. For simplicity, the electron-withdrawing property is denoted as a positive EP (+), while the electron-donating property is denoted as a negative EP (\u2212). Synthons with positive EPs and negative EPs (denoted as \\({{Syn}}^{+}\\) and \\({{Syn}}^{-}\\)) are rendered as blue and red points, respectively. LGs possessing positive EPs and negative EPs (denoted as \\({{LG}}^{+}\\) and \\({{LG}}^{-}\\)) are marked by triangles and circles, respectively. In addition, the marker sizes represent LG occurrence frequencies (degrees), while the filled colours account for the types of reactions where LGs occur. Source data are provided as a Source Data file.\n\nThe LGP in Retro-MTGR enables the construction of a joint embedding space for synthons and LGs that helps interpret which LGs are appropriate for particular synthons. For a vivid visualization, we map the high-dimensional embedding space to its 3D form via PCA (Fig.\u00a05). In this space, we consider the following four questions.\n\nWhat is the association between the two synthons in a reaction?\n\nAs observed, synthons exhibit a valley-shaped surface, where blue and red points account for synthons possessing positive EPs and negative EPs (denoted as \\({{Syn}}^{+}\\) and \\({{Syn}}^{-}\\)), respectively. Several crucial characteristics of synthons can be found. First, their projecting plane expanded by the first two principal components (PCs) illustrates a considerable degree of separation between \\({{Syn}}^{+}\\) and \\({{Syn}}^{-}\\) even with just the first principal component (PC1). For example, 94.61% of synthons can be correctly separated at PC1\u2009=\u2009\u22120.375, where \\({{Syn}}^{+}\\) and \\({{Syn}}^{-}\\) tend to have smaller and larger PC1 values, respectively (Fig.\u00a05B). Moreover, the average distances of the synthons in the same reactions and those of the synthons in different reactions are investigated. As illustrated in Fig.\u00a06A, the comparison shows that the former average distance (3.609\u2009\u00b1\u20091.47) is significantly greater than the latter distance (2.683\u2009\u00b1\u20091.49) according to a univariate analysis of variance (p-value\u2009=\u20090.0059\u2009<\u20090.05). Overall, the embedding space illustrates that two synthons in the same reaction always have opposite EPs and are distant from each other, and vice versa.\n\nA Pairwise distance distributions between synthons. The blue line represents the distance distribution of synthons presenting in the same reactions, while the red line represents that of synthons in different reactions. B Pairwise distance distributions between synthons and LGs. The blue line represents the distance distribution of synthons and LGs belonging to the same reactants, while the red line represents the distribution in the case with different reactants. C Distance distributions between LGs in the embedding space. The blue line represents the distance distribution of LGs co-occurring in the same reactions, while the red line represents the distribution of LGs in different reactions. The comparisons in (A), (B) and (C) demonstrate the significant difference between two distance distributions under a univariate analysis of variance with p-value = 0.0059, p-value = 0.0082 and p-value = 0.0014 respectively. Source data are provided as a Source Data file.\n\nWhat is the association between a synthon and an LG in a reactant?\n\nMore importantly, LGs with positive EPs (denoted as \\({{LG}}^{+}\\) and marked by triangles) also exhibit separation from LGs with negative EPs (denoted as \\({{LG}}^{-}\\) and marked by circles) along with the first principal component in terms of EPs (Fig.\u00a05B). For example, 77.05% of the LGs can be correctly distinguished even with just a linear separation at the position of PC1\u2009=\u2009\u22120.375, where\\(\\,{{LG}}^{+}\\) and \\({{LG}}^{-}\\) tend to have larger and smaller PC1 values, respectively, than synthons. Furthermore, \\({{LG}}^{+}\\) tends to occur in the \\({{Syn}}^{-}\\) zone, while \\({{LG}}^{-}\\) tends to appear in the \\({{Syn}}^{+}\\) zone. To determine the underlying association between synthons and LGs, we calculate the distance between a synthon and an LG in the same reactant (\\({d}_{s,l}^{{same}}\\)) and that between a synthon and an LG in different reactants (\\({d}_{s,l}^{{diff}}\\)). As shown in Fig.\u00a06B, \\({d}_{s,l}^{{same}}\\) (1.22\u2009\u00b1\u20090.82) for the same reactants is significantly lower than \\({d}_{s,l}^{{diff}}\\) (2.59\u2009\u00b1\u20091.15) for different reactants according to a univariate analysis of variance (p-value\u2009=\u20090.0082\u2009<\u20090.05). In general, the embedding space illustrates that a synthon and an LG tend to form a reactant if they are close to each other and follow the matching rule of positive and negative EPs.\n\nWhat is the tendency for LG to occur?\n\nSimilar to synthons, LGs also scatter on the valley-shaped surface (Fig.\u00a05A). First, we find that several LGs occur many times, while many LGs occur a few times. The node sizes denotes their occurrence frequencies in Fig.\u00a05. The former are simple groups (e.g., H, OH, or halogens), while the latter are usually chemical substructures. To determine the underlying reason for this finding, we investigate the number of LG occurrences according to different reaction types (Supplementary Data\u00a03). The investigation reveals that LGs can be split into two categories according to the number of reaction types they participate in. The first category, \u2018reaction-common\u2019 LGs, contains LGs that appear in equal to or more than half the total number of reaction types (i.e., >3). They are usually simple LGs, occur frequently, and have many matching partner LGs. Specifically, \u2018H\u2019, \u2018OH\u2019, \u2018Cl\u2019, and \u2018Br\u2019 appear in 6, 6, 6, and 5 categories, respectively. In particular, \u2018H\u2019 is involved in almost all reactions, occurring 27623 times and having 37 kinds of matching partner LGs (denoted by the node degrees in the graph). The embedding space shows that reaction-common LGs with opposite EPs are far from each other. For example, the distances between H and OH, between H and Cl, and between H and Br are 4.74, 4.89, and 4.03, respectively. These distances are greater than the average distance between all pairs of LGs (2.19). This finding is consistent with the statement that an excellent LG tends to be structurally simple (even monatomic) and to exhibit strong electron affinity and lower bond dissociation energies (e.g., H, Cl, Br, and OH)40. The second category, named \u2018reaction-specific\u2019 LGs, includes LGs occurring only in specific types of reactions. They are usually composed of chemical substructure groups, such as \u2018-CC\u2019, \u2018-OCC(Cl)(Cl)Cl\u2019, and \u2018-OCC(F)(F)F\u2019. Reaction-specific LGs gather into clusters on the valley-shaped surface. Some clusters contain many reaction-specific LGs. For example, in the case with 16 LGs exclusively occurring in Type-2 reactions, the average distance within Type-2 reactions (0.82) is less than that of all the LGs (2.19). Similarly, the average distance of LGs exclusively occurring within Type-3 reactions is 1.31. Usually, LGs involved in the same type of cluster have similar chemical structures, such as \u2018CCOC(\u2009=\u2009O)O\u2019 and \u2018CC(C)(C)OC(\u2009=\u2009O)O\u2019 in Type-2 reactions (0.783) and \u2018B1OCCO1\u2019 and \u2018B1OCCCO1\u2019 (0.754) in Type-3 reactions, where the similarity is calculated by MACCS fingerprints in terms of Jaccard similarity. The chemical structural similarity of LGs implies their potential for substitution, which is determined by costs or the reaction conditions available in synthesis reaction routes. In addition, each of several clusters contains one or a couple of reaction-specific LGs; these occur only a few times and tend to be near reaction-common LGs having the same EPs or far from reaction-common LGs possessing opposite EPs. For example, \u2018CCC[SnH](CCC)CCC\u2019, which has negative EPs and occurs 1 time, is near \u2018OH\u2019 (distance = 0.61) but far from \u2018H\u2019, which has a positive EP in the embedding space (distance = 4.96). In total, reaction-common LGs tend to spread widely, while reaction-specific LGs tend to gather w.r.t. their reaction types and tend to be clustered similarly in terms of structure. In addition, the LGs are rendered in terms of their reaction types in the LGCoG to provide a clear visualization (Fig.\u00a07A).\n\nA Reaction type map. LGs occurring in single reaction types, multiple reaction types (\u22643), and many types (>3, reaction-common) are highlighted in different colours. B EP map. LGs with positive and negative EPs are rendered in blue and red, respectively.\n\nWhat is the association between two LGs in a reaction?\n\nFinally, we consider the occurrence of LG pairs. Overall, 94.3% of the LG pairs in the same reactions have opposite EPs, which cause two LGs in a reaction to be apart from each other in the embedding space. In particular, we investigate the pairwise distances between the LGs in the embedding space. Figure\u00a06C shows that the pairwise distances between co-occurring LGs (3.92\u2009\u00b1\u20090.99) in reactions are greater than those between non-co-occurring LGs (2.14\u2009\u00b1\u20091.11) according to a univariate analysis of variance (p-value\u2009=\u20090.0014\u2009<\u20090.05). Specifically, we find that LG pairs consisting of two simple groups (e.g., H, OH, or halogens) usually occur frequently. In particular, the pairs \u2018H, OH\u2019 (27.8%), \u2018H, Cl\u2019 (27.4%), \u2018H, Br\u2019 (12.9%), and \u2018H, I\u2019 (8.1%) are the most frequent LG pairs. Moreover, LG pairs including simple groups and chemical substructures, such as \u2018Br, CC1(C)OBOC1(C)C\u2019 (1.8%), occur at low frequencies. Few LG pairs (0.386%) are composed of chemical substructures only, such as \u2018O\u2009=\u2009S(\u2009=\u2009O)(O)C(F)(F)F-B(OH)2, O\u2009=\u2009S(\u2009=\u2009O)(O)C(F)(F)F-CC1(C)OBOC1(C)C\u2019. In addition, the LGs are rendered in terms of their EPs in the LGCoG to provide a straightforward visualization (Fig.\u00a07B). Thus, the embedding space illustrates that the more distant two LGs with opposite Eps are, the greater their co-occurrence, or the more likely they are to have the same reaction.\n\nIn summary, the union of synthons and LG co-occurrences results in an embedding space, which captures the underlying association rules between the synthons and LGs. First, the two synthons in a reaction always have opposite EPs and are distant from each other in the embedding space. Second, a synthon and an LG belonging to a reactant usually have opposite EPs and are close to each other in the embedding space. Finally, two LGs co-occurring in a reaction regularly have opposite EPs and are distant from each other. Therefore, the union of synthons and the co-occurrence graphs of LGs contains rich retrosynthesis information, including the intra-associations between synthons, the inter-associations between LGs and synthons, and the intra-associations between LGs. Our Retro-MTGR method can capture such rich information to achieve an enhanced retrosynthesis prediction effect.\n\nTo evaluate the retrosynthesis prediction ability of our Retro-MTGR method in a real scenario, we collect two drugs (i.e., Sonidegib53 and Acotiamide54) that are not included in our dataset as the study cases. We infer their retrosynthesis routes with Retro-MTGR and then validate them via chemical assays.\n\nThe two selected drugs are briefly summarized as follows. The first drug, Sonidegib, is a Hedgehog signalling pathway inhibitor (via smoothened antagonism) that was developed as an anticancer agent by Novartis and approved by the FDA in 2015 for treating basal cell carcinoma. Currently, it is commonly used for the treatment of locally advanced recurrent basal cell carcinoma (BCC) following surgery and radiation therapy or in cases where surgery or radiation therapy is not appropriate (DrugBank ID: DB09143)52. The second drug, Acotiamide, is a medication manufactured and approved in Japan for treating postprandial fullness, upper abdominal bloating, and early satiation due to functional dyspepsia. It acts as an acetylcholinesterase inhibitor (DrugBank ID: DB12482) 53.\n\nSince Retro-MTGR is a single-step retrosynthesis prediction model, we iteratively apply it to infer a complete retrosynthesis route for a given drug molecule. In the first iteration, Retro-MTGR splits the complete molecule into two synthons under the top-1 criterion (the first candidate reaction centre), which are further converted into smaller intermediate molecules by appending appropriate LGs. The intermediate molecules are then split into smaller molecules by Retro-MTGR in a similar manner unless the intermediate molecules are reactants that can be easily bought on the market.\n\nThe prediction results of Sonidegib are shown in Fig.\u00a08A. The predicted retrosynthesis route of Sonidegib (marked as \u20181\u2019) illustrates that it can be split into two intermediate molecules (marked as \u20182\u2019 and \u20183\u2019). Furthermore, they are split into two pairs of reactants, where one pair is marked as \u20184\u2019 and \u20185\u2019 and the other is marked as \u20186\u2019 and \u20187\u2019. In each retrosynthesis step, the attention scores of the bonds are labelled, and the highest (top-1) score is considered the reaction centre. During the retrosynthesis route prediction process, both the reaction centres and LGs are correctly predicted.\n\nA Retrosynthesis prediction route of Sonidegib. Sonidegib (\u20181\u2019) is split at the reaction centre into intermediate molecules \u20182\u2019 and \u20183\u2019, where the highest attention score generated by our Retro-MTGR method is highlighted. Then, \u20182\u2019 and \u20183\u2019 are further decomposed into four reactants (\u20184\u2019, \u20185\u2019, \u20186\u2019 and \u20187\u2019) according to their reaction centres. B Chemical synthesis route of Sonidegib. First, the Suzuki cross-coupling reaction between \u20184\u2019 and \u20185\u2019 is executed to obtain intermediate molecule \u20182\u2019. Furthermore, \u20186\u2019 is converted to \u20188\u2019 with the help of m-Chloroperbenzoic acid (m-CPBA) to prevent its intrareaction. Then, \u20188\u2019 is combined with \u20187\u2019 in the presence of DIEA (N, N-diisopropylethylamine) to form another intermediate molecule \u20189\u2019, which is further reduced by H2 in the presence of Pd/C to generate \u20183\u2019. In the last step, the coupling of compound \u20182\u2019 with \u20183\u2019 in the presence of HATU (2-(7-Azabenzotriazol-1-yl)-N, N, N\u2019, N\u2019-tetramethyluronium hexafluorophosphate) and DIEA (N, N-Diisopropylethylamine) in DMF (N, N-Dimethylformamide) generates Sonidegib (\u20181\u2019). C Retrosynthesis prediction route of Acotiamide. Acotiamide (\u201810\u2019) is split at the reaction centre into intermediate molecules \u201811\u2019 and reactant \u201812\u2019, where the highest attention score generated by our Retro-MTGR method is highlighted as well. Then, \u201811\u2019 is further decomposed into two reactants (\u201813\u2019 and \u201814\u2019) according to their reaction centres. D Chemical synthesis route of Acotiamide. First, reactant \u201814\u2019 is esterified with methanol to form \u201815\u2019. Then, compound 13 is coupled with 15 in the presence of HATU and DIEA to obtain intermediate molecule \u201816\u2019, which is sequentially hydrolysed to another intermediate molecule \u201811\u2019 by NaOH and HCl. Finally, the coupling between the intermediate molecule \u201811\u2019 and the reactant \u201812\u2019 in the presence of HATU and DIEA in DMF generates the Acotiamide molecule (\u201810\u2019). More details (e.g., reaction conditions and instruments) about the above chemical synthesis reactions can be found in Supplementary Note\u00a01. Note: DIEA, HATU, m-CPBA, and Pd/C are catalysts used in the abovementioned chemical reactions.\n\nThe chemical synthesis assay of Sonidegib is shown in Fig.\u00a08B. Furthermore, according to the prediction results, a series of chemical synthesis reactions starting from reactants (\u20184\u2019, \u20185\u2019, \u20186\u2019, and \u20187\u2019) is performed to validate the predicted retrosynthesis route. Remarkably, due to the potential intrareaction among the \u20186\u2019 molecules triggered by their chlorine (-Cl) and amino groups (-NH2), the expected reaction between \u20186\u2019 and \u20187\u2019 generates fewer \u20183\u2019 molecules. To guarantee a high production rate of \u20183\u2019 in the real synthesis process, we convert \u20186\u2019 to \u20188\u2019 with a nitration reaction, which alters the amino group to a nitro group (-NO2). Then, the reaction of \u20188\u2019 and \u20187\u2019 produces \u20189\u2019, where the nitro group is further converted back into the amino group in a reduction reaction to obtain \u20183\u2019 with the desired product. Moreover, the reaction of \u20184\u2019 and \u20185\u2019 generates \u20182\u2019. Finally, we perform an amidation reaction by combining \u20182\u2019 and \u20183\u2019 to form the product molecule Sonidegib (\u20181\u2019).\n\nThe prediction results of Acotiamide are shown in Fig.\u00a08C. Similarly, the retrosynthesis route of Acotiamide (marked as \u201810\u2019) is correctly predicted in terms of both its reaction centres and LGs and validated by chemical synthesis reactions. Specifically, the first retrosynthesis step generates an intermediate molecule (\u201811\u2019) and a reactant (\u201812\u2019). Then, the former molecule is split into two reactants (\u201813\u2019 and \u201814\u2019) in the second step.\n\nThe chemical synthesis assay of Acotiamide is shown in Fig.\u00a08D. Since an intrareaction issue is also encountered in \u201814\u2019, a similar strategy is adopted to guarantee the final production of \u201810\u2019. In brief, the carboxylic acid group (-OH) of \u201814\u2019 is altered to a methoxy group (-OCH3) to generate \u201815\u2019 through an esterification reaction. As with the substitution of \u201814\u2019, \u201815\u2019 is combined with \u201813\u2019 to generate a new intermediate molecule, \u201816\u2019, which is sequentially hydrolysed to an intermediate molecule \u201811\u2019 by both NaOH and HCl. Finally, a similar amidation reaction combining \u201811\u2019 and \u201812\u2019 is performed to form Acotiamide (\u201810\u2019).\n\nTo summarize, the ability of Retro-MTGR to predict retrosynthesis routes is consistent with chemical assays. Thus, this study can provide clear guidance for performing retrosynthesis route planning under extra reaction conditions.", + "section_image": [ + "https:////media.springernature.com/lw685/springer-static/image/art%3A10.1038%2Fs41467-025-56062-y/MediaObjects/41467_2025_56062_Fig2_HTML.png", + "https:////media.springernature.com/lw685/springer-static/image/art%3A10.1038%2Fs41467-025-56062-y/MediaObjects/41467_2025_56062_Fig3_HTML.png", + "https:////media.springernature.com/lw685/springer-static/image/art%3A10.1038%2Fs41467-025-56062-y/MediaObjects/41467_2025_56062_Fig4_HTML.png", + "https:////media.springernature.com/lw685/springer-static/image/art%3A10.1038%2Fs41467-025-56062-y/MediaObjects/41467_2025_56062_Fig5_HTML.png", + "https:////media.springernature.com/lw685/springer-static/image/art%3A10.1038%2Fs41467-025-56062-y/MediaObjects/41467_2025_56062_Fig6_HTML.png", + "https:////media.springernature.com/lw685/springer-static/image/art%3A10.1038%2Fs41467-025-56062-y/MediaObjects/41467_2025_56062_Fig7_HTML.png", + "https:////media.springernature.com/lw685/springer-static/image/art%3A10.1038%2Fs41467-025-56062-y/MediaObjects/41467_2025_56062_Fig8_HTML.png" + ] + }, + { + "section_name": "Discussions", + "section_text": "Aiming at developing a highly interpretable discriminative model to uncover chemical synthesis mechanisms, this paper presents a Retro-MTGR framework. Based on molecular graphs, three related tasks are simultaneously considered in Retro-MTGR, where two major supervised discriminative tasks account for recognizing reaction centres and identifying LGs, and an auxiliary self-supervised task accounts for generating better atom embeddings.\n\nFirst, a comparison with 16 state-of-the-art methods demonstrates the superiority of Retro-MTGR. Additionally, its robustness and scalability are validated by different training-testing strategies and datasets of different sizes in both RTU and RTK scenarios.\n\nThen, an ablation study demonstrates the contributions of its modules to the prediction process, as follows. (1) As the most crucial module in Retro-MTGR, the multitask learning framework learns an RCP and the LGP simultaneously to comprehensively characterize synthesis reactions from their reaction centres to LGs, including the bond energy tendencies in reaction centres, the occurrence tendencies of LGs, and the associations between chemical entities in synthesis reactions. (2) An LGCoG, which plays the second-most crucial role in Retro-MTGR, captures both the individual occurrence tendencies and co-occurring associations of LGs. (3) An AEE based on GCL, which plays the third-most crucial role, enhances atom embeddings by leveraging chemical structural redundancy and the differences between a molecule and its synthons. (4) Bond energy, even when working as a single dimension, provides a nontrivial contribution to bond embeddings since bonds with high energies tend to be ordinary bonds. (5) Two numeric tricks (i.e., the utilization of dense atom representations and the elimination of large absolute co-occurrence variances) enable a better learning effect.\n\nMore importantly, multiple comprehensive investigations validate the interpretability of the chemical synthesis process of Retro-MTGR by answering two questions: why can a bond be the reaction centre, and what LGs are appropriate for a given synthon? The results demonstrate that Retro-MTGR can capture and illustrate the underlying chemical synthesis rules.\n\nSpecifically, the embedding space of bonds illustrates why a bond can be the reaction centre or not based on both USPTO-50K and USPTO-480K. (1) Bonds (e.g., double bonds, triple bonds, and aromatic bonds) with high energy (\u2265360\u2009kJ/mol) are always ordinary bonds and fall into unique communities. (2) Most single bonds with lower bond energies (\u2009<\u2009360\u2009kJ/mol) usually fall into two communities (reaction centres and ordinary bonds). (3) Two types of single bonds (i.e., C-N and C-O) with lower bond energies (\u2009<\u2009360\u2009kJ/mol) fall into >2 communities due to the diverse molecular structural topologies of their corresponding synthons. (4) The EP distributions of the atom pairs contained in bonds demonstrate that a bond is the reaction centre in a molecule if its member atoms tend to have opposite EPs (as reflected by their local substructures); otherwise, it is an ordinary bond.\n\nMoreover, the joint embedding space of synthons and LGs is appropriate for generating synthons because of the associations between chemical entities. In the embedding space, (1) two synthons in a reaction always have opposite EPs and are distant from each other; (2) a synthon and an LG belonging to a reactant usually have opposite EPs and are close to each other; (3) two LGs co-occurring in a reaction regularly have opposite EPs and are distant from each other; and (4) reaction-common LGs tend to spread widely and are occurrence-dominant, monatomic or structurally simple (e.g., H, Cl, Br, I, and OH), while reaction-specific LGs tend to gather w.r.t. their reaction types and are similar to each other in terms of structures. In addition, occurrence-dominant LG pairs always consist of two simple groups (e.g., \u2018H, OH\u2019, \u2018H, Cl\u2019, and \u2018H, Br\u2019).\n\nFinally, the practical capabilities of Retro-MTGR are evaluated in cases involving two novel drugs. The results reveal that the retrosynthesis routes inferred by Retro-MTGR are consistent with those achieved by chemical synthesis assays. In brief, our Retro-MTGR approach can provide prior guidance for retrosynthesis route planning.\n\nHowever, the widespread application of Retro-MTGR still faces the following limitations. We believe that an extended version of Retro-MTGR with the integration of additional synthetic factors (e.g. reaction yields, conditions, and reagents) can be a multistep retrosynthesis route planning strategy for the future.\n\nImbalanced LGs. An extreme imbalance is observed among diverse LGs in terms of occurrence. For example, \u2018H\u2019 occurs 27624 times, while \u2018CCCCCCCCCCCCO\u2019 occurs only once in USPTO-50K. Although the individual occurrence tendencies of LGs assist with the LGP task, Retro-MTGR has insufficient training data for minority LGs, resulting in bias towards the majority LGs. Generative algorithms (e.g., autoregressive models and generative adversarial networks) are promising approaches for generating more reactions involving the minority LGs to address this issue. Moreover, since the list of all possible LGs is enumerated from the training data, some LGs in the testing dataset are not fully covered. Retro-MTGR is at risk of failing in these cases. It is feasible to build a list of LGs with high coverage based on a larger dataset (e.g., USPTO-Full or PubChem).\n\nIncomplete multistep retrosynthesis. Retro-MTGR provides a core single-step retrosynthesis prediction tool and even an initial multistep retrosynthesis route by iteratively decomposing the scaffold of a product into commercial reactants. However, Retro-MTGR only accounts for popular broad-sense coupling reactions (generating one product molecule from two reactant molecules) and can\u2019t modify atoms in branches or small functional groups. The latter case involves four types of minority reactions, including deprotection, protection, reduction, and oxidation reactions. Although not need to modify molecule scaffolds, they play untrivial roles (e.g. increasing yield rate and raw material availability) in multistep retrosynthesis. Therefore, Retro-MTGR should be extended to cover these reactions to attain a complete multistep retrosynthesis planning effect.\n\nLack of real bond energies. Similar to atom properties and bond types, theoretical bond energies are easily collected and surely improve bond embeddings. Although they are defined as the standard enthalpy changes exhibited by reactions, bond energies are limited in reflecting the real circumstances of reactions. The appropriate measurement of real bond energies, especially moderate energies, would yield improved bond embeddings.\n\nLack of numerical EPs. Our results demonstrate that the opposition of EPs plays an essential role in determining the interactions between reaction sites and LGs. Nevertheless, we are currently limited to personal expert experience for qualitatively estimating EPs. We believe that assay-determined quantitative EPs would significantly improve the effects of retrosynthesis prediction.", + "section_image": [] + }, + { + "section_name": "Methods", + "section_text": "Given a set of \\(n\\) chemical reactions \\(R=\\left\\{{r}_{1}^{i}+{r}_{2}^{i}={c}_{i}|i=1,\\ldots,n\\right\\}\\), the task is to find a retrosynthesis strategy for a newly designed target molecule \\(c\\) (i.e., to recommend reactants \\(\\left({r}_{1},{r}_{2}\\right)\\) for \\(c\\)), where the reactants \\({r}_{1}^{i}\\) and \\({r}_{2}^{i}\\) are two reactant molecules for the synthesis of the target molecule \\({c}_{i}\\). Remarkably, this task only focuses on the reactions involved in the scaffold decomposition process of the target molecule (i.e., broad-sense coupling reactions), which is the core of retrosynthesis planning. However, reactions (e.g., deprotections, protections, reductions, and oxidations), which involve modifying atoms in branches or bonds in small functional groups, are discarded.\n\nTo implement a chemist-like retrosynthesis process, we develop a Retro-MTGR framework, which contains an RCP module, an AEE module and an LGP. They account for two major tasks and one auxiliary task. The first major task, implemented by the RCP, is modelled as a binary discrimination problem, which recognizes the reaction centre \\({b}_{{u}^{*}{v}^{*}}\\) among all the bonds \\(\\left\\{{b}_{{uv}}\\right\\}\\) of the target molecule. Additionally, \\({b}_{{u}^{*}{v}^{*}}\\) is broken down to obtain two synthons \\({s}_{{u}^{*}}\\) and \\({s}_{{v}^{*}}\\), where \\(u,v\\) are two bonding atoms in \\(c\\). To support the first major task in terms of the associated atom embeddings, the auxiliary task (implemented by the AEE) is modelled as a self-supervised contrastive learning problem that characterizes the structural commonalities and differences between \\(c\\) and its synthons \\(\\left({s}_{{u}^{*}},{s}_{{v}^{*}}\\right)\\). The second major task is modelled as a multiclass discrimination problem that assigns appropriate LG \\(\\left({k}_{{u}^{*}},{k}_{{v}^{*}}\\right)\\) to the synthons \\(\\left({s}_{{u}^{*}},{s}_{{v}^{*}}\\right)\\) to form complete reactants \\(\\left({r}_{{u}^{*}},{r}_{{v}^{*}}\\right)\\) under enhanced LG dependence. All the symbols used are listed in Supplementary Table\u00a07.\n\nReaction centre identification is the first step in the retrosynthesis inference process. Inspired by the existing semi-template-based approaches, we primarily attempt to recognize the reaction centre among all the bonds of a given target molecule. In retrosynthesis, the bond at the reaction centre is broken. Thus, the task of reaction centre recognition can be naturally modelled as a binary discrimination problem, which recognizes the reaction centre \\({b}_{{u}^{*}{v}^{*}}\\) among all the bonds \\(\\left\\{{b}_{{uv}}\\right\\}\\) of the target molecule.\n\nFor this task, we design an RCP module, which includes an atom encoder, a bond-level readout layer, and a multilayer perceptron (MLP). The atom encoder is implemented by a multilayer MPNN to convert molecular graphs \\(G\\) into atom embeddings \\(\\left\\{{{{\\bf{a}}}}_{i}\\right\\}\\), which are further refined by an AEE. The bond-level readout layer generates bond embeddings \\(\\left\\{{{{\\bf{b}}}}_{{uv}}\\right\\}\\), which are further boosted by concatenating them with bond energy \\(g\\) and bond type embeddings \\(d\\). The MLP accounts for the discrimination of bonds through \\(y=F\\left({{{\\bf{b}}}}_{{uv}}\\right)\\), where \\(y=1\\) if \\({b}_{{uv}}\\) is the reaction centre and \\(y=0\\) otherwise.\n\nAccording to its chemical structure, each compound \\(m\\) is represented as a molecular graph \\(G=\\left(A,B\\right)\\), where \\(A\\) is the set of its atoms\\(\\,\\left\\{{a}_{i}\\right\\}\\), \\(B\\) is the set of its bonds \\(\\left\\{{b}_{{ij}}\\right\\}\\), and \\(i,j={\\mathrm{1,2}},\\ldots,\\left|A\\right|\\). Let \\({{\\bf{Q}}}\\in {R}^{N\\times N}\\left(N=\\left|A\\right|\\right)\\) be its adjacency matrix, in which \\({q}_{{ij}}=1\\) indicates the bond occurring \\(\\left({b}_{{ij}}\\in B\\right)\\) between two atoms (i.e., \\({a}_{i}\\) and \\({a}_{j}\\)) and\\(\\,{q}_{{ij}}=0\\) indicates the lack of a bond. Suppose that \\({{{\\bf{x}}}}_{i}^{0}\\in {R}^{n}\\) is the initial feature vector of atom \\({a}_{i}\\), which is usually coded into a vector containing one-hot-shaped atom types, the number of hydrogen atoms, and other attributes55. Usually, the one-hot encoding attributes in \\({{{\\bf{x}}}}^{0}\\) are sparse, while other attributes are dense (e.g., nonzero integers (numbers) of atom or degrees). Thus, \\({{{\\bf{x}}}}^{0}\\) is partially sparse or semi-sparse. An extra MLP maps it to a dense form (\\({{{\\bf{x}}}}_{i}^{0}\\in {R}^{n}\\to {{{\\bf{x}}}}_{i}\\in {R}^{p}\\)) to learn better embeddings, as suggested by Cheng et al.51\n\nBoth \\({{\\bf{Q}}}\\) and \\({{\\bf{x}}}\\) are input into a multilayer MPNN to generate atom embeddings \\(\\left\\{{{{\\bf{a}}}}_{i}\\right\\}\\) for molecule \\(c\\). The MPNN updates the embedding \\({{{\\bf{a}}}}_{i}\\) of each atom \\({a}_{i}\\) by aggregating those of its neighbouring atoms in a layer as follows:\n\nwhere \u2018\\({||}\\)\u2019 formally represents the vector concatenation operation32, \\({{{\\bf{a}}}}_{i}^{t}\\in {R}^{q}\\) denotes the embedding of atom \\(i\\) in the t-th layer of the MPNN, \\(N\\left({a}_{i}\\right)\\) denotes the neighbours of atom \\({a}_{i}\\) in the molecular graph \\(G\\), \\(\\sigma \\left(\\bullet \\right)\\) is a nonlinear activation function (e.g., \\({ReLU}\\)), and \\({{{\\bf{b}}}}^{t}\\) denotes a learnable bias. Moreover, \\({{{\\bf{w}}}}_{1}^{t}\\) indicates the weight across the neighbours of \\({a}_{i}\\), \\({{{\\bf{w}}}}_{2}^{t}\\) indicates the weight of \\({a}_{j}\\) in \\(N\\left({a}_{i}\\right)\\), and \\({{{\\bf{w}}}}_{3}^{t}\\) indicates the weight of \\({a}_{i}\\) w.r.t. layer t. Inspired by Gilmer56 and Kipf57, we believe that the weights \u2018\\({{\\bf{w}}}\\)\u2019 are specific to different layers. In addition, \\({{{\\bf{d}}}}_{{ij}}\\) is the one-hot coding vector of bond types, within which four bits denote the presence of single bonds, double bonds, triple bonds, and aromatic bonds. In short, the atom embedding update rules are defined as follows:\n\nAfter passing through the MLP and the MPNN in order, the initial feature vector \\({{{\\bf{x}}}}_{i}^{0}\\in {R}^{n}\\) of atom \\({a}_{i}\\) is mapped to its p-dimensional dense representation \\({{{\\bf{x}}}}_{i}\\in {R}^{p}\\) and its embedding \\({{{\\bf{a}}}}_{i}\\in {R}^{q}\\) in turn. Then, the atom embeddings \\(\\left\\{{{{\\bf{a}}}}_{i}\\right\\}\\) are further refined by the AEE module, which characterizes the structural commonalities and differences between the molecule and its synthons. This embedding is used by RCP to identify the reaction centre. Meanwhile, it is utilized by the LGP module to help find appropriate leaving groups for the synthons. All three tasks are associated together by shared atom embeddings.\n\nThe refined atom embeddings \\(\\left\\{{{{\\bf{a}}}}_{i}\\right\\}\\) are subsequently used to generate bond embeddings via the bond-level readout process. Let \\({b}_{{ij}}\\) be the bond connecting atoms \\({a}_{i}\\) and \\({a}_{j}\\). Unlike the ordinary molecule-level readout method (e.g., combining all atoms), the RCP model defines a bond-level readout function \\({R}_{B}\\left({a}_{i},{a}_{j}\\right)\\), which is augmented by bond energy because bonds with strong energies tend to be ordinary bonds43. The bond embedding \\({{{\\bf{b}}}}_{{ij}}\\) is obtained as follows:\n\nwhere \\({g}_{{ij}}\\) is the theoretical bond energy and \u2018\\({||}\\)\u2019 indicates the concatenation of atom embeddings and the corresponding bond energy.\n\nFinally, the RCP identifies the reaction centre among all the bonds of \\(m\\). \\(Y\\) is defined as the set of bond flags \\(\\left\\{{y}_{{ij}}\\right\\}\\) w.r.t. molecule \\(m\\), where \\({y}_{{ij}}=1\\) if \\({b}_{{ij}}\\) is the reaction centre (a positive sample); otherwise, \\({y}_{{ij}}=0\\) (a negative sample). Based on the abovementioned bond embeddings \\(\\{{{{\\bf{b}}}}_{{ij}}\\}\\), an MLP (denoted as \\({F}_{b}\\)) is constructed as the classifier to achieve such a bond identification effect (i.e., \\({y}_{{ij}}^{*}={F}_{b}\\left({{{\\bf{b}}}}_{{ij}}\\right)\\)). To train the model, the cross-entropy loss function calculated over all the training molecules is defined as follows:\n\nwhere \\(M\\) is the set of all the training molecules and \\({B}_{m}\\) is the bond set of \\(m\\).\n\nGCL has been applied in diverse areas (e.g., image clustering58, drug-target interaction prediction59, and Parkinsonian assessment60) because it can augment node representations via node dropping, edge perturbation, attribute masking or subgraph extraction42. In particular, recent works on molecular property prediction have demonstrated that molecular GCL (MGCL) can obtain better atom representations and molecular representations than other methods61. As we observed, GCL and retrosynthesis are highly analogous in terms of graph change (regarding a molecular structure as a graph). Inspired by this observation, we designed an AEE module based on GCL to boost atom embeddings.\n\nOur AEE is just an implementation of GCL that involves atom masking, bond deletion, and substructure removal. Akin to the existing GCL methods, our approach determines a contrastive positive sample based on a target molecule (product) and another different molecule. However, unlike these methods, we do not deliberately design contrastive positive samples via bond perturbation (i.e., node connectivity variation). In contrast, we directly use the target molecule (product) and the combination of its two synthons as a contrastive positive sample because the difference (the reaction centre) between the product and its combined synthons naturally plays a role in bond perturbation. The AEE module is expected to augment atom embeddings to better identify reaction centres.\n\nFormally, for a target molecule \\(m\\), we treat its synthons (\\({s}_{1}\\) and \\({s}_{2}\\)) as a new perturbed molecule \\(s\\). The \u2018perturbation\u2019 concept is taken from Liang et al.62. Perturbed molecules were originally referred to as incomplete molecules generated by randomly removing atoms or bonds from a target molecule63. In our work, the perturbed molecule \\(s\\) is specifically defined as the combined structure of two synthons. Compared to \\(m\\), \\(s\\) has no reaction centre. During contrastive learning, a positive sample is defined as the pair consisting of \\(m\\) and its perturbed molecule \\(s\\) since they are nearly the same. In contrast, a negative sample is defined as the pair consisting of \\(m\\) and another molecule \\(\\bar{s}\\) different from \\(m\\). Here, \\(\\bar{s}\\) refers to a molecule randomly selected from the training set. Contrastive learning pushes \\(m\\) and \\(s\\) as close together as possible while pushing \\(m\\) and \\(\\bar{s}\\) as far apart as possible, resulting in better atom embeddings.\n\nLet \\({{{\\bf{h}}}}_{m}\\), \\({{{\\bf{h}}}}_{s}\\), and \\({{{\\bf{h}}}}_{\\widetilde{s}}\\) be the respective embeddings of the above molecules. These molecule embeddings are generated by an atom encoder and a molecule-level readout function \\({R}_{M}\\left(\\cdot \\right)\\). The former shares parameters with the atom encoder used in the RCP module. For a given molecule, \\({R}_{M}\\left(\\cdot \\right)\\) aggregates the embeddings of all its atoms to generate a molecule-level embedding via average pooling: \\({{\\bf{h}}}=\\frac{1}{\\left|A\\right|}{\\sum }_{i=1}^{\\left|A\\right|}{{{\\bf{a}}}}_{i}\\).\n\nIn terms of contrastive learning, the molecule embedding pair of \\({{{\\bf{h}}}}_{m}\\) and \\({{{\\bf{h}}}}_{s}\\) is regarded as a positive sample, while the pair of \\({{{\\bf{h}}}}_{m}\\) and \\({{{\\bf{h}}}}_{\\widetilde{s}}\\) is taken as a negative sample. Our goal is to train a contrastive learning model that pushes \\({{{\\bf{h}}}}_{m}\\) and \\({{{\\bf{h}}}}_{s}\\) as close together as possible (similar) while pushing \\({{{\\bf{h}}}}_{m}\\) and \\({{{\\bf{h}}}}_{\\widetilde{s}}\\) as far apart as possible (different). For this purpose, we design a contrastive loss function as follows:\n\nThus, the AEE module enables an ingenious process for utilizing the chemical structural commonness and differences between a molecule and its synthons to produce enhanced atom embeddings for other tasks.\n\nOnce synthons are determined based on the reaction centre, chemists can obtain the corresponding reactants by attaching appropriate LGs to them. Thus, the LG recognition task can be modelled as a multiclass classification problem. More importantly, we believe that LGs are not independent but rather associated in terms of chemical synthesis. First, an excellent LG tends to be structurally simple (even monatomic) and to exhibit strong electron affinity and lower bond dissociation energies64. Moreover, two LGs in a synthesis reaction are associated since they form the side product (e.g., \\({{{\\rm{H}}}}_{2}{{\\rm{O}}}\\)) accompanying the target molecule.\n\nBased on these considerations, we propose an elaborate LGP based on multiclass classification to identify LG. Formally, let \\(K=\\{{k}_{i}{|i}=1,\\ldots,\\left|K\\right|\\}\\) be the list of all possible LGs, \\({b}_{u,v}\\) be the reaction centre of molecule \\(m\\), \\({a}_{u},{a}_{v}\\) be the reaction sites (i.e., the atoms forming \\({b}_{u,v}\\)), and \\({s}_{u},{s}_{v}\\) be the corresponding synthons. The task is to assign \\({s}_{u}\\) to an appropriate LG (i.e., \\({k}_{u}=F\\left({s}_{u}\\right)\\in K\\)) to form its corresponding reactants \\({r}_{u}\\) (i.e., \\(\\left\\{{r}_{u}={k}_{u}+{s}_{u}\\right\\}\\)), where \\({{\\mathscr{F}}}\\) indicates a mapping function from \\({s}_{u}\\) to \\({k}_{u}\\). Note that the list of all possible LGs is enumerated from the training data.\n\nTo capture the individual occurrence tendencies and co-occurrence associations of LGs, we construct an LG co-occurrence graph (LGCoG) \\({G}_{k}=\\{K,E\\}\\), where \\(K=\\{{k}_{i}{|i}=1,\\ldots,\\left|K\\right|\\}\\) denotes the set of nodes (LG) and \\(E=\\left\\{{e}_{{ij}}\\right\\}\\) denotes the set of weighted edges (normalized co-occurrences between the LGs). Each LG is a small chemical substructure (e.g., -OH or -B(OH)2) or an individual atom/ion (e.g., -Cl, -Br, or -H). The popular one-hot coding method is used to determine the initial node features \\(\\left\\{{{{\\bf{k}}}}_{i}^{0}\\right\\}\\). The edge building process consists of two steps. \\({{\\bf{U}}}=\\{{u}_{{ij}}\\}\\in {R}^{\\left|K\\right|\\times \\left|K\\right|}\\) is defined as the LG co-occurrence matrix, where \\({u}_{{ij}}\\) denotes the number of pairwise co-occurrences between \\({k}_{i}\\) and \\({k}_{j}\\). The co-occurrence of two LGs is counted if they are involved in the same reaction. For example, the co-occurrence of \u2018-H\u2019 and \u2018-OH\u2019 is counted once in the reaction \\({{{\\rm{R}}}}_{1}{{\\rm{C}}}\\left(={{\\rm{O}}}\\right){{\\rm{OH}}}+{{{\\rm{R}}}}_{2}{{\\rm{COH}}}\\to {{{\\rm{R}}}}_{1}{{\\rm{C}}}\\left(={{\\rm{O}}}\\right){{\\rm{OC}}}{{{\\rm{R}}}}_{2}+{{{\\rm{H}}}}_{2}{{\\rm{O}}}\\). After enumerating all the possible LG pairs, their co-occurrences \\(\\{{u}_{{ij}}\\}\\) are counted over all the reactions in the training dataset. Then, a probability matrix \\({{\\bf{P}}}\\) can be calculated via \\({{\\bf{U}}}\\). Therefore, \\({p}_{{ij}}\\) is calculated as follows:\n\nRemarkably, the normalization step in Formula (7) should be considered because the values of \\(\\{{u}_{{ij}}\\}\\) vary greatly. For example, the co-occurrence between \u2018-H\u2019 and \u2018-OH\u2019 is 9451, while that between \u2018-H\u2019 and \u2018-CC(\u2009=\u2009O)O\u2019 is only 1. Since small co-occurrence values are overwhelmed by large values during training, a trained model with a massive absolute co-occurrence variance would decrease retrosynthesis prediction performance. The ablation study conducted in section results demonstrates the effectiveness of the normalization step.\n\nWe set \\({p}_{{ij}}\\) as the weight of the edge from \\({k}_{j}\\) to \\({k}_{i}\\) (i.e., \\({e}_{{ij}}={p}_{{ij}}\\)). Thus, the embedding (\\({{{\\bf{k}}}}_{i}\\)) of LG \\({k}_{i}\\) can be represented by implementing an MPNN on \\({G}_{k}\\) as follows:\n\nwhere \\(\\sigma (\\bullet )\\) is a nonlinear activation function (i.e., ReLU), \\(j\\in N\\left({k}_{i}\\right)\\) is the neighbourhood of \\({k}_{i}\\), \\({{{\\bf{w}}}}_{1}^{t}\\) indicates the weight across the neighbours of LG \\({k}_{i}\\), \\({{{\\bf{w}}}}_{2}^{t}\\) indicates the weight of \\({k}_{j}\\) belonging to \\({{\\mathscr{N}}}\\left({k}_{i}\\right)\\), and\\(\\,{{{\\bf{w}}}}_{3}^{t}\\) indicates the weight of \\({k}_{i}\\) w.r.t. layer t.\n\nMoreover, inspired by chemists, we believe that the reaction centre and the local substructures around the reaction sites are crucial factors for determining the corresponding LGs. To characterize synthons, the reaction centre \\({b}_{{uv}}\\) is represented by its bond embedding \\({{{\\bf{b}}}}_{{uv}}\\) in the RCP, while the local substructure around reaction site \\({a}_{u}\\) is represented by the atom embedding \\({{{\\bf{a}}}}_{u}\\), which already aggregates its neighbours due to the MPNN contained in the RCP. Thus, the embedding of the synthon \\({s}_{u}\\) containing \\({a}_{u}\\) can be defined as their concatenation \\({{{\\bf{s}}}}_{u}=\\left[{{{\\bf{a}}}}_{u}{||}{{{\\bf{b}}}}_{{uv}}\\right]\\). Similarly, we can define the embedding of \\({s}_{v}\\) by \\({{{\\bf{s}}}}_{v}=\\left[{{{\\bf{a}}}}_{v}{||}{{{\\bf{b}}}}_{{uv}}\\right]\\).\n\nAfter obtaining the synthon and LG embeddings, we can directly determine the candidate LG that should be attached to a synthon. We assume that in a synthon-LG association, the synthon and LG tend to form a reactant if they are close to each other in their embedding space. As suggested by MLGL-MP65, we measure the proximity between a given synthon \\({s}_{u}\\) and a given LG \\({k}_{i}\\) as follows:\n\nThe proximity \\({\\hat{y}}_{{ui}}\\) is the prediction score regarding the attachment of the given synthon to the \\(i\\)-th LG in the LG set \\(K\\), and it reflects how likely \\({s}_{u}\\) is to be attached to \\({k}_{i}\\). It is anticipated that the proximity measure captures the underlying association between synthons and LGs.\n\nHowever, such a direct proximity measure would be senseless since the synthon embedding and LG embedding spaces are different vector spaces. To address this issue, we design an adaptor to map \\(\\left\\{{{{\\bf{s}}}}_{u}\\right\\}\\) to \\(\\left\\{{{{\\bf{k}}}}_{i}\\right\\}\\). The adaptor can be implemented by an MLP containing an input layer, a hidden layer, and an output layer. Thus, the final compound representation feature is defined as \\({{{\\bf{s}}}}_{u}^{*}\\,\\)=\u2009\\({{\\rm{M}}}{{\\rm{LP}}}\\left({{{\\bf{s}}}}_{u}\\right)\\in {R}^{s}\\), where \\(s\\) is the dimensionality of \\({{{\\bf{k}}}}_{i}\\). It is remarkable that such an adaptor enables the formation of a joint embedding space for synthons and LGs, where the inter-associations between synthons and LGs, the intra-associations between synthons, and the intra-associations between LGs can be characterized simultaneously.\n\nFinally, the mean squared error (MSE) loss function is used when training the LGP as follows:\n\nwhere \\({y}_{{ui}}\\in \\{0,1\\}\\) is the true label indicating whether a synthon \\({s}_{j}\\) is attached to an LG \\({k}_{i}\\), \\({\\hat{y}}_{{ji}}\\) is the corresponding score output by the LGP, \\(M\\) is the total number of training molecules, and \\(2M\\) represents the number of corresponding synthons.\n\nTo learn the bond energy tendencies of reaction centres, the occurrence tendencies of LGs, and the associations between chemical entities in synthesis reactions, we simultaneously train the reaction centre recognition and LG identification modules under the framework of multitask learning66,67. During training, Retro-MTGR learns three related tasks, including two major tasks (reaction centre recognition and leaving group prediction) and one auxiliary task (atom embedding enhancement via GCL). Accordingly, we jointly optimize the three loss functions formed w.r.t. these tasks into a linear combination as follows:\n\nwhere \\({\\sum }_{i=1}^{3}{w}_{i}=1\\) denotes the normalized hyperparameters for adjusting the task weights. The tuning process for finding the optimal values of \\(\\left\\{{w}_{i}\\right\\}\\) can be found in Supplementary Fig.\u00a02.\n\nNotably, the training process of Retro-MTGR is different from its testing procedure because the contrastive learning-based AEE should be removed from the testing process. A similar strategy can be found in Liu et al.68. Technically, the training process of Retro-MTGR involves the atom encoder, AEE, RCP, and LGP modules. After training, a set of neural networks is constructed, including the MLP and the MPNN in the atom encoder, the MLP in the RCP, and the adaptor and the MPNN in the LGP. Note that the AEE contains no neural network but instead has a molecule-level readout and a contrastive loss, which help obtain better atom embeddings during training. During the testing process performed for a given target molecule, Retro-MTGR leverages the trained neural networks in the atom encoder, RCP, and LGP modules to determine its reaction centre and the LGs of its two synthons.\n\nTo measure the retrosynthesis prediction effects of the tested methods, we adopt the top-k accuracy, which is a prediction metric that is popularly used in the existing works32,38.\n\nTechnically, for a given target molecule, the trained model (i.e., Retro-MTGR) infers the bond candidates for its reaction centre, which are ordered by their prediction scores. A higher score indicates a greater probability of the corresponding bond candidate being the reaction centre. After ranking the bond candidates, we pay attention to the first k bond candidates, such as the top-1, top-3, and top-5 results. Furthermore, each bond candidate accounts for a pair of synthons, which are assigned two LGs to form a predicted reactant pair. Here, Retro-MTGR provides predictions for only the top-1 LGs. Thus, we obtain the top-k predicted reactant pairs based on the top-k bond candidates. Among the top-k predicted reactant pairs, a pair of reactants is counted as a correct prediction if they are the same as the real reactants of the target molecule. In this case, we regard the target molecule as a correctly predicted sample in terms of the top-k prediction process. Therefore, the top-k accuracy is defined as the ratio of the number of correctly predicted target molecules out of top-k predicted candidates to the total number of target molecules.\n\nModel trainings use the Adam optimizer for gradient descent optimization with an initial learning rate set to 0.01. The training process lasted approximately 73.5\u2009h (USPTO-50K). All experiments were conducted on a hardware setup comprising four NVIDIA A800 GPUs (each with 64 GB memory) and an Intel Xeon Gold 6348 CPU @ 2.60\u2009GHz.\n\nFurther information on research design is available in the\u00a0Nature Portfolio Reporting Summary linked to this article.", + "section_image": [] + }, + { + "section_name": "Data availability", + "section_text": "We used the benchmark datasets USPTO-50K, USPTO-MIT, and USPTO-FULL for all our experiments. For a fair comparison, we used the same version and splits as those provided by Yan et al.36 for USPTO-50K and USPTO-FULL. The USPTO-MIT dataset is provided by Chen et al.16. The source data can be found at https://doi.org/10.5281/zenodo.1434632469.\u00a0Source data are provided with this paper.", + "section_image": [] + }, + { + "section_name": "Code availability", + "section_text": "All code of the manuscript, alongside a description of necessary steps for reproducing results, can be found in a GitHub repository accompanying this manuscript: https://github.com/zpczaizheli/Retro-MTGR69. Additionally, the source code is also available at Zenodo repertory through https://doi.org/10.5281/zenodo.1434632469.", + "section_image": [] + }, + { + "section_name": "References", + "section_text": "Zhao, P. et al. Targets preliminary screening for the fresh natural drug molecule based on Cosine-correlation and similarity-comparison of local network. J. Transl. Med. 20, 67 (2022).\n\nArticle\u00a0\n CAS\u00a0\n PubMed\u00a0\n PubMed Central\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nLi, J. N., Yang, G., Zhao, P. C., Wei, X. X. & Shi, J. Y. CProMG: controllable protein-oriented molecule generation with desired binding affinity and drug-like properties. Bioinformatics 39, i326\u2013i336 (2023).\n\nArticle\u00a0\n PubMed\u00a0\n PubMed Central\u00a0\n \n Google Scholar\u00a0\n \n\nDu, B.-X., Xu, Y., Yiu, S.-M., Yu, H., Shi, J.-Y. MTGL-ADMET: A Novel Multi-task Graph Learning Framework for ADMET Prediction Enhanced by Status-Theory and Maximum Flow. In: Research in Computational Molecular Biology: 27th Annual International Conference, RECOMB 2023, Istanbul, Turkey, April 16\u201319, 2023, Proceedings (Springer, 2023).\n\nGupta, R. et al. Artificial intelligence to deep learning: machine intelligence approach for drug discovery. Mol. Divers 25, 1315\u20131360 (2021).\n\nArticle\u00a0\n CAS\u00a0\n PubMed\u00a0\n PubMed Central\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nVatansever, S. et al. Artificial intelligence and machine learning-aided drug discovery in central nervous system diseases: State-of-the-arts and future directions. Med. Res. Rev. 41, 1427\u20131473 (2021).\n\nArticle\u00a0\n PubMed\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nLee, A. A. et al. Molecular transformer unifies reaction prediction and retrosynthesis across pharma chemical space. Chem. Commun. 55, 12152\u201312155 (2019).\n\nArticle\u00a0\n CAS\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nDong, J., Zhao, M., Liu, Y., Su, Y., Zeng, X. Deep learning in retrosynthesis planning: datasets, models and tools. Brief Bioinform, 23, bbab391 (2022).\n\nColey, C. W., Green, W. H. & Jensen, K. F. Machine learning in computer-aided synthesis planning. Acc. Chem. Res 51, 1281\u20131289 (2018).\n\nArticle\u00a0\n CAS\u00a0\n PubMed\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nJiang, Y. et al. Artificial intelligence for retrosynthesis prediction. Engineering 25, 32\u201350 (2023).\n\nArticle\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nYan, M. & Baran, P. S. Drug discovery: fighting evolution with chemical synthesis. Nature 533, 326\u2013327 (2016).\n\nArticle\u00a0\n ADS\u00a0\n CAS\u00a0\n PubMed\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nJiang, Y., Ying, W., Wu, F., Huang, Z., Kuang, K., Wang, Z. Learning chemical rules of retrosynthesis with pre-training. In: Proceedings of the AAAI Conference on Artificial Intelligence) (2023).\n\nColey, C. W., Rogers, L., Green, W. H. & Jensen, K. F. Computer-Assisted Retrosynthesis Based on Molecular Similarity. ACS Cent. Sci. 3, 1237\u20131245 (2017).\n\nArticle\u00a0\n CAS\u00a0\n PubMed\u00a0\n PubMed Central\u00a0\n \n Google Scholar\u00a0\n \n\nDai, H., Li, C., Coley, C., Dai, B., Song, L. Retrosynthesis prediction with conditional graph logic network. Advances in Neural Information Processing Systems 32 (NeurIPS, 2019).\n\nSegler, M. H. S. & Waller, M. P. Neural-symbolic machine learning for retrosynthesis and reaction prediction. Chemistry 23, 5966\u20135971 (2017).\n\nArticle\u00a0\n CAS\u00a0\n PubMed\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nBaylon, J. L., Cilfone, N. A., Gulcher, J. R. & Chittenden, T. W. Enhancing retrosynthetic reaction prediction with deep learning using multiscale reaction classification. J. Chem. Inf. Model 59, 673\u2013688 (2019).\n\nArticle\u00a0\n CAS\u00a0\n PubMed\u00a0\n \n Google Scholar\u00a0\n \n\nChen, S. & Jung, Y. Deep retrosynthetic reaction prediction using local reactivity and global attention. JACS Au 1, 1612\u20131620 (2021).\n\nArticle\u00a0\n CAS\u00a0\n PubMed\u00a0\n PubMed Central\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nSeidl, P. et al. Improving few- and zero-shot reaction template prediction using modern Hopfield networks. J. Chem. Inf. Model 62, 2111\u20132120 (2022).\n\nArticle\u00a0\n CAS\u00a0\n PubMed\u00a0\n PubMed Central\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nSegler, M. H. S. & Waller, M. P. Modelling chemical reasoning to predict and invent reactions. Chemistry 23, 6118\u20136128 (2017).\n\nArticle\u00a0\n CAS\u00a0\n PubMed\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nWeininger, D. SMILES, a chemical language and information system. 1. Introduction to methodology and encoding rules. J. Chem. Inf. Comput. Sci. 28, 31\u201336 (1988).\n\nArticle\u00a0\n CAS\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nLiu, B. et al. Retrosynthetic reaction prediction using neural sequence-to-sequence models. ACS Cent. Sci. 3, 1103\u20131113 (2017).\n\nArticle\u00a0\n CAS\u00a0\n PubMed\u00a0\n PubMed Central\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nKalyan, K. S., Rajasekharan, A. & Sangeetha, S. AMMU: a survey of transformer-based biomedical pretrained language models. J. Biomed. Inform. 126, 103982 (2022).\n\nArticle\u00a0\n PubMed\u00a0\n \n Google Scholar\u00a0\n \n\nVaswani A. et al. Attention is all you need. Advances in neural information processing systems 30, (NeurIPS, 2017).\n\nKarpov P., Godin G., Tetko IV. A transformer model for retrosynthesis. In: International Conference on Artificial Neural Networks (Springer, 2019).\n\nLowe D. M. Extraction of chemical structures and reactions from the literature (University of Cambridge, 2012).\n\nZheng, S., Rao, J., Zhang, Z., Xu, J. & Yang, Y. Predicting retrosynthetic reactions using self-corrected transformer neural networks. J. Chem. Inf. Model 60, 47\u201355 (2020).\n\nArticle\u00a0\n CAS\u00a0\n PubMed\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nUcak, U. V., Ashyrmamatov, I., Ko, J. & Lee, J. Retrosynthetic reaction pathway prediction through neural machine translation of atomic environments. Nat. Commun. 13, 1186 (2022).\n\nArticle\u00a0\n ADS\u00a0\n CAS\u00a0\n PubMed\u00a0\n PubMed Central\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nFang, L., Li, J., Zhao, M., Tan, L. & Lou, J. G. Single-step retrosynthesis prediction by leveraging commonly preserved substructures. Nat. Commun. 14, 2446 (2023).\n\nArticle\u00a0\n ADS\u00a0\n CAS\u00a0\n PubMed\u00a0\n PubMed Central\u00a0\n \n Google Scholar\u00a0\n \n\nWan Y., Hsieh C.-Y., Liao B., Zhang S. Retroformer: Pushing the limits of end-to-end retrosynthesis transformer. In: International Conference on Machine Learning (PMLR, 2022).\n\nMao, K. et al. Molecular graph enhanced transformer for retrosynthesis prediction. Neurocomputing 457, 193\u2013202 (2021).\n\nArticle\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nLin, Z., Yin, S., Shi, L., Zhou, W. & Zhang, Y. J. G2GT: retrosynthesis prediction with graph-to-graph attention neural network and self-training. J. Chem. Inf. Model 63, 1894\u20131905 (2023).\n\nArticle\u00a0\n CAS\u00a0\n PubMed\u00a0\n \n Google Scholar\u00a0\n \n\nTu, Z. & Coley, C. W. Permutation invariant graph-to-sequence model for template-free retrosynthesis and reaction prediction. J. Chem. Inf. Model 62, 3503\u20133513 (2022).\n\nArticle\u00a0\n CAS\u00a0\n PubMed\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nZhong, W., Yang, Z. & Chen, C. Y. Retrosynthesis prediction using an end-to-end graph generative architecture for molecular graph editing. Nat. Commun. 14, 3009 (2023).\n\nArticle\u00a0\n ADS\u00a0\n CAS\u00a0\n PubMed\u00a0\n PubMed Central\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nde Souza, R., Miranda, L. S. M. & Bornscheuer, U. T. A retrosynthesis approach for biocatalysis in organic synthesis. Chemistry 23, 12040\u201312063 (2017).\n\nArticle\u00a0\n PubMed\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nWang, X. et al. RetroPrime: a diverse, plausible and transformer-based method for single-step retrosynthesis predictions. Chem. Eng. J. 420, 129845 (2021).\n\nArticle\u00a0\n CAS\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nShi, C., Xu, M., Guo, H., Zhang, M., Tang, J. A graph to graphs framework for retrosynthesis prediction. In: International conference on machine learning (PMLR, 2020).\n\nYan, C. et al. Retroxpert: decompose retrosynthesis prediction like a chemist. Adv. Neural Inf. Process. Syst. 33, 11248\u201311258 (2020).\n\n\n Google Scholar\u00a0\n \n\nSomnath, V. R., Bunne, C., Coley, C., Krause, A. & Barzilay, R. Learning graph models for retrosynthesis prediction. Adv. Neural Inf. Process. Syst. 34, 9405\u20139415 (2021).\n\n\n Google Scholar\u00a0\n \n\nChen, Z., Ayinde, O. R., Fuchs, J. R., Sun, H. & Ning, X. G(2)Retro as a two-step graph generative models for retrosynthesis prediction. Commun. Chem. 6, 102 (2023).\n\nArticle\u00a0\n PubMed\u00a0\n PubMed Central\u00a0\n \n Google Scholar\u00a0\n \n\nSacha, M. et al. Molecule Edit Graph Attention Network: Modeling Chemical Reactions as Sequences of Graph Edits. J. Chem. Inf. Model 61, 3273\u20133284 (2021).\n\nArticle\u00a0\n CAS\u00a0\n PubMed\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nYu K. et al. Double-ended synthesis planning with goal-constrained bidirectional search[J]. arXiv preprint arXiv:2407.06334, 2024.\n\nAyers, P. W., Anderson, J. S., Rodriguez, J. I. & Jawed, Z. Indices for predicting the quality of leaving groups. Phys. Chem. Chem. Phys. 7, 1918\u20131925 (2005).\n\nArticle\u00a0\n CAS\u00a0\n PubMed\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nYou, Y. et al. Graph contrastive learning with augmentations. Adv. neural Inf. Process. Syst. 33, 5812\u20135823 (2020).\n\nMATH\u00a0\n \n Google Scholar\u00a0\n \n\nTreptow, R. S. Bond energies and enthalpies: an often neglected difference. J. Chem. Educ. 72, 497 (1995).\n\nArticle\u00a0\n CAS\u00a0\n \n Google Scholar\u00a0\n \n\nCooper, M. M. & Klymkowsky, M. W. The trouble with chemical energy: why understanding bond energies requires an interdisciplinary systems approach. CBE Life Sci. Educ. 12, 306\u2013312 (2013).\n\nArticle\u00a0\n PubMed\u00a0\n PubMed Central\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nHaynes W. M. CRC handbook of chemistry and physics. CRC Press (2014).\n\nSchneider, N., Stiefl, N. & Landrum, G. A. What\u2019s What: the (nearly) definitive guide to reaction role assignment. J. Chem. Inf. Model 56, 2336\u20132346 (2016).\n\nArticle\u00a0\n CAS\u00a0\n PubMed\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nWarren S., Wyatt P. Organic synthesis: the disconnection approach. (John Wiley & Sons, 2008).\n\nMcOmie, John Frederick William, ed. \u201cProtective groups in organic chemistry.\u201d 98, (1973).\n\nBurns, N. Z., Baran, P. S. & Hoffmann, R. W. Redox economy in organic synthesis. Angew. Chem. Int. Ed. 48, 2854\u20132867 (2009).\n\nArticle\u00a0\n CAS\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nCorey E. J. The logic of chemical synthesis[M]. \u0420\u0438\u043f\u043e\u043b \u041a\u043b\u0430\u0441\u0441\u0438\u043a, (1991).\n\nCheng H.-T. et al. Wide & deep learning for recommender systems. In: Proceedings of the 1st workshop on deep learning for recommender systems (2016).\n\nZhong, Z. et al. Root-aligned SMILES: a tight representation for chemical reaction prediction. Chem. Sci. 13, 9023\u20139034 (2022).\n\nArticle\u00a0\n CAS\u00a0\n PubMed\u00a0\n PubMed Central\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nBurness, C. B. & Scott, L. J. Sonidegib: a review in locally advanced basal cell carcinoma. Target Oncol. 11, 239\u2013246 (2016).\n\nArticle\u00a0\n PubMed\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nNolan, M. L. & Scott, L. J. Acotiamide: first global approval. Drugs 73, 1377\u20131383 (2013).\n\nArticle\u00a0\n \n Google Scholar\u00a0\n \n\nWu, Z. et al. Mining toxicity information from large amounts of toxicity data. J. Med. Chem. 64, 6924\u20136936 (2021).\n\nArticle\u00a0\n CAS\u00a0\n PubMed\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nGilmer, J., Schoenholz, S. S., Riley, P. F., Vinyals, O., Dahl, G. E. Neural message passing for quantum chemistry. In: International conference on machine learning (PMLR, 2017).\n\nKipf T. N., Welling M. Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:160902907, (2016).\n\nFang, U., Li, J., Lu, X., Mian, A. & Gu, Z. Robust image clustering via context-aware contrastive graph learning. Pattern Recognit. 138, 109340 (2023).\n\nArticle\u00a0\n \n Google Scholar\u00a0\n \n\nLi, Y., Qiao, G., Gao, X. & Wang, G. Supervised graph co-contrastive learning for drug\u2013target interaction prediction. Bioinformatics 38, 2847\u20132854 (2022).\n\nArticle\u00a0\n CAS\u00a0\n PubMed\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nGuo, R., Li, H., Zhang, C. & Qian, X. A tree-structure-guided graph convolutional network with contrastive learning for the assessment of Parkinsonian hand movements. Med. Image Anal. 81, 102560 (2022).\n\nArticle\u00a0\n PubMed\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nWang, Y., Wang, J., Cao, Z. & Barati Farimani, A. Molecular contrastive learning of representations via graph neural networks. Nat. Mach. Intell. 4, 279\u2013287 (2022).\n\nArticle\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nLiang, H. et al. Graph contrastive learning with implicit augmentations. Neural Netw. 163, 156\u2013164 (2023).\n\nArticle\u00a0\n PubMed\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nWang, Y., Magar, R., Liang, C. & Barati Farimani, A. Improving molecular contrastive learning via faulty negative mitigation and decomposed fragment contrast. J. Chem. Inf. Model 62, 2713\u20132725 (2022).\n\nArticle\u00a0\n CAS\u00a0\n PubMed\u00a0\n \n Google Scholar\u00a0\n \n\nYang, J.-D., Ji, P., Xue, X.-S. & Cheng, J.-P. Recent advances and advisable applications of bond energetics in organic chemistry. J. Am. Chem. Soc. 140, 8611\u20138623 (2018).\n\nArticle\u00a0\n CAS\u00a0\n PubMed\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nDu, B. X. et al. MLGL-MP: a multi-label graph learning framework enhanced by pathway interdependence for metabolic pathway prediction. Bioinformatics 38, i325\u2013i332 (2022).\n\nArticle\u00a0\n PubMed\u00a0\n PubMed Central\u00a0\n \n Google Scholar\u00a0\n \n\nRuder, S. An overview of multi-task learning in deep neural networks. arXiv preprint arXiv:170605098, (2017).\n\nZhang, Y. & Yang, Q. A survey on multi-task learning. IEEE Trans. Knowl. Data Eng. 34, 5586\u20135609 (2021).\n\nArticle\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nLiu, H., Huang, Y., Liu, X. & Deng, L. Attention-wise masked graph contrastive learning for predicting molecular property. Brief. Bioinforma. 23, bbac303 (2022).\n\nArticle\u00a0\n MATH\u00a0\n \n Google Scholar\u00a0\n \n\nZhao, P. et al. Single-step Retrosynthesis Prediction via Multitask Graph Representation Learning. Retro-MTGR, https://doi.org/10.5281/zenodo.14346324 (2024).\n\nDownload references", + "section_image": [] + }, + { + "section_name": "Acknowledgements", + "section_text": "This work was supported by the National Natural Science Foundation of China (62372375 J.Y.S.), the Shaanxi Province Key R&D Program (2023-YBSF-114 J.Y.S.), the CAAI-Huawei Mind Spore Open Fund (CAAIXSJLJJ-2022-035A J.Y.S.), and the Scientific and Technological Innovation Project of China Academy of Chinese Medical Sciences (CI2023C065YLL C.L.).", + "section_image": [] + }, + { + "section_name": "Author information", + "section_text": "School of Life Sciences, Northwestern Polytechnical University, Xi\u2019an, China\n\nPeng-Cheng Zhao,\u00a0Xue-Xin Wei,\u00a0Qiong Wang,\u00a0Jia-Ning Li,\u00a0Jie Shang\u00a0&\u00a0Jian-Yu Shi\n\nSchool of Chemistry and Chemical Engineering, Northwestern Polytechnical University, Xi\u2019an, China\n\nQi-Hao Wang\n\nInstitute of Basic Research in Clinical Medicine China Academy of Chinese Medical Sciences, Beijing, China\n\nCheng Lu\n\nSearch author on:PubMed\u00a0Google Scholar\n\nSearch author on:PubMed\u00a0Google Scholar\n\nSearch author on:PubMed\u00a0Google Scholar\n\nSearch author on:PubMed\u00a0Google Scholar\n\nSearch author on:PubMed\u00a0Google Scholar\n\nSearch author on:PubMed\u00a0Google Scholar\n\nSearch author on:PubMed\u00a0Google Scholar\n\nSearch author on:PubMed\u00a0Google Scholar\n\nP.Z., X.W., and J.Y.S. designed the research. P.Z., X.W., Q.W., and Q.H.W. handled the data processing. J.L. and J.S. evaluated the performance of single-step retrosynthesis. P.Z. and J.Y.S. conducted the experiments. P.Z., C.L., and J.Y.S. wrote the manuscript. P.Z., C.L., J.S., and J.Y.S. revised the manuscript. All authors reviewed and approved the final manuscript.\n\nCorrespondence to\n Jie Shang, Cheng Lu or Jian-Yu Shi.", + "section_image": [] + }, + { + "section_name": "Ethics declarations", + "section_text": "The authors declare no competing interests.", + "section_image": [] + }, + { + "section_name": "Peer review", + "section_text": "Nature Communications thanks the anonymous, reviewer(s) for their contribution to the peer review of this work. A peer review file is available.", + "section_image": [] + }, + { + "section_name": "Additional information", + "section_text": "Publisher\u2019s note Springer Nature remains neutral with regard to jurisdictional claims in published maps and institutional affiliations.", + "section_image": [] + }, + { + "section_name": "Source data", + "section_text": "", + "section_image": [] + }, + { + "section_name": "Rights and permissions", + "section_text": "Open Access This article is licensed under a Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License, which permits any non-commercial use, sharing, distribution and reproduction in any medium or format, as long as you give appropriate credit to the original author(s) and the source, provide a link to the Creative Commons licence, and indicate if you modified the licensed material. You do not have permission under this licence to share adapted material derived from this article or parts of it. The images or other third party material in this article are included in the article\u2019s Creative Commons licence, unless indicated otherwise in a credit line to the material. If material is not included in the article\u2019s Creative Commons licence and your intended use is not permitted by statutory regulation or exceeds the permitted use, you will need to obtain permission directly from the copyright holder. To view a copy of this licence, visit http://creativecommons.org/licenses/by-nc-nd/4.0/.\n\nReprints and permissions", + "section_image": [] + }, + { + "section_name": "About this article", + "section_text": "Zhao, PC., Wei, XX., Wang, Q. et al. Single-step retrosynthesis prediction via multitask graph representation learning.\n Nat Commun 16, 814 (2025). https://doi.org/10.1038/s41467-025-56062-y\n\nDownload citation\n\nReceived: 10 August 2023\n\nAccepted: 08 January 2025\n\nPublished: 18 January 2025\n\nVersion of record: 18 January 2025\n\nDOI: https://doi.org/10.1038/s41467-025-56062-y\n\nAnyone you share the following link with will be able to read this content:\n\nSorry, a shareable link is not currently available for this article.\n\n\n\n\n Provided by the Springer Nature SharedIt content-sharing initiative\n ", + "section_image": [ + "https://data:image/svg+xml;base64,<svg height="81" width="57" xmlns="http://www.w3.org/2000/svg"><g fill="none" fill-rule="evenodd"><path d="m17.35 35.45 21.3-14.2v-17.03h-21.3" fill="#989898"/><path d="m38.65 35.45-21.3-14.2v-17.03h21.3" fill="#747474"/><path d="m28 .5c-12.98 0-23.5 10.52-23.5 23.5s10.52 23.5 23.5 23.5 23.5-10.52 23.5-23.5c0-6.23-2.48-12.21-6.88-16.62-4.41-4.4-10.39-6.88-16.62-6.88zm0 41.25c-9.8 0-17.75-7.95-17.75-17.75s7.95-17.75 17.75-17.75 17.75 7.95 17.75 17.75c0 4.71-1.87 9.22-5.2 12.55s-7.84 5.2-12.55 5.2z" fill="#535353"/><path d="m41 36c-5.81 6.23-15.23 7.45-22.43 2.9-7.21-4.55-10.16-13.57-7.03-21.5l-4.92-3.11c-4.95 10.7-1.19 23.42 8.78 29.71 9.97 6.3 23.07 4.22 30.6-4.86z" fill="#9c9c9c"/><path d="m.2 58.45c0-.75.11-1.42.33-2.01s.52-1.09.91-1.5c.38-.41.83-.73 1.34-.94.51-.22 1.06-.32 1.65-.32.56 0 1.06.11 1.51.35.44.23.81.5 1.1.81l-.91 1.01c-.24-.24-.49-.42-.75-.56-.27-.13-.58-.2-.93-.2-.39 0-.73.08-1.05.23-.31.16-.58.37-.81.66-.23.28-.41.63-.53 1.04-.13.41-.19.88-.19 1.39 0 1.04.23 1.86.68 2.46.45.59 1.06.88 1.84.88.41 0 .77-.07 1.07-.23s.59-.39.85-.68l.91 1c-.38.43-.8.76-1.28.99-.47.22-1 .34-1.58.34-.59 0-1.13-.1-1.64-.31-.5-.2-.94-.51-1.31-.91-.38-.4-.67-.9-.88-1.48-.22-.59-.33-1.26-.33-2.02zm8.4-5.33h1.61v2.54l-.05 1.33c.29-.27.61-.51.96-.72s.76-.31 1.24-.31c.73 0 1.27.23 1.61.71.33.47.5 1.14.5 2.02v4.31h-1.61v-4.1c0-.57-.08-.97-.25-1.21-.17-.23-.45-.35-.83-.35-.3 0-.56.08-.79.22-.23.15-.49.36-.78.64v4.8h-1.61zm7.37 6.45c0-.56.09-1.06.26-1.51.18-.45.42-.83.71-1.14.29-.3.63-.54 1.01-.71.39-.17.78-.25 1.18-.25.47 0 .88.08 1.23.24.36.16.65.38.89.67s.42.63.54 1.03c.12.41.18.84.18 1.32 0 .32-.02.57-.07.76h-4.36c.07.62.29 1.1.65 1.44.36.33.82.5 1.38.5.29 0 .57-.04.83-.13s.51-.21.76-.37l.55 1.01c-.33.21-.69.39-1.09.53-.41.14-.83.21-1.26.21-.48 0-.92-.08-1.34-.25-.41-.16-.76-.4-1.07-.7-.31-.31-.55-.69-.72-1.13-.18-.44-.26-.95-.26-1.52zm4.6-.62c0-.55-.11-.98-.34-1.28-.23-.31-.58-.47-1.06-.47-.41 0-.77.15-1.07.45-.31.29-.5.73-.58 1.3zm2.5.62c0-.57.09-1.08.28-1.53.18-.44.43-.82.75-1.13s.69-.54 1.1-.71c.42-.16.85-.24 1.31-.24.45 0 .84.08 1.17.23s.61.34.85.57l-.77 1.02c-.19-.16-.38-.28-.56-.37-.19-.09-.39-.14-.61-.14-.56 0-1.01.21-1.35.63-.35.41-.52.97-.52 1.67 0 .69.17 1.24.51 1.66.34.41.78.62 1.32.62.28 0 .54-.06.78-.17.24-.12.45-.26.64-.42l.67 1.03c-.33.29-.69.51-1.08.65-.39.15-.78.23-1.18.23-.46 0-.9-.08-1.31-.24-.4-.16-.75-.39-1.05-.7s-.53-.69-.7-1.13c-.17-.45-.25-.96-.25-1.53zm6.91-6.45h1.58v6.17h.05l2.54-3.16h1.77l-2.35 2.8 2.59 4.07h-1.75l-1.77-2.98-1.08 1.23v1.75h-1.58zm13.69 1.27c-.25-.11-.5-.17-.75-.17-.58 0-.87.39-.87 1.16v.75h1.34v1.27h-1.34v5.6h-1.61v-5.6h-.92v-1.2l.92-.07v-.72c0-.35.04-.68.13-.98.08-.31.21-.57.4-.79s.42-.39.71-.51c.28-.12.63-.18 1.04-.18.24 0 .48.02.69.07.22.05.41.1.57.17zm.48 5.18c0-.57.09-1.08.27-1.53.17-.44.41-.82.72-1.13.3-.31.65-.54 1.04-.71.39-.16.8-.24 1.23-.24s.84.08 1.24.24c.4.17.74.4 1.04.71s.54.69.72 1.13c.19.45.28.96.28 1.53s-.09 1.08-.28 1.53c-.18.44-.42.82-.72 1.13s-.64.54-1.04.7-.81.24-1.24.24-.84-.08-1.23-.24-.74-.39-1.04-.7c-.31-.31-.55-.69-.72-1.13-.18-.45-.27-.96-.27-1.53zm1.65 0c0 .69.14 1.24.43 1.66.28.41.68.62 1.18.62.51 0 .9-.21 1.19-.62.29-.42.44-.97.44-1.66 0-.7-.15-1.26-.44-1.67-.29-.42-.68-.63-1.19-.63-.5 0-.9.21-1.18.63-.29.41-.43.97-.43 1.67zm6.48-3.44h1.33l.12 1.21h.05c.24-.44.54-.79.88-1.02.35-.24.7-.36 1.07-.36.32 0 .59.05.78.14l-.28 1.4-.33-.09c-.11-.01-.23-.02-.38-.02-.27 0-.56.1-.86.31s-.55.58-.77 1.1v4.2h-1.61zm-47.87 15h1.61v4.1c0 .57.08.97.25 1.2.17.24.44.35.81.35.3 0 .57-.07.8-.22.22-.15.47-.39.73-.73v-4.7h1.61v6.87h-1.32l-.12-1.01h-.04c-.3.36-.63.64-.98.86-.35.21-.76.32-1.24.32-.73 0-1.27-.24-1.61-.71-.33-.47-.5-1.14-.5-2.02zm9.46 7.43v2.16h-1.61v-9.59h1.33l.12.72h.05c.29-.24.61-.45.97-.63.35-.17.72-.26 1.1-.26.43 0 .81.08 1.15.24.33.17.61.4.84.71.24.31.41.68.53 1.11.13.42.19.91.19 1.44 0 .59-.09 1.11-.25 1.57-.16.47-.38.85-.65 1.16-.27.32-.58.56-.94.73-.35.16-.72.25-1.1.25-.3 0-.6-.07-.9-.2s-.59-.31-.87-.56zm0-2.3c.26.22.5.37.73.45.24.09.46.13.66.13.46 0 .84-.2 1.15-.6.31-.39.46-.98.46-1.77 0-.69-.12-1.22-.35-1.61-.23-.38-.61-.57-1.13-.57-.49 0-.99.26-1.52.77zm5.87-1.69c0-.56.08-1.06.25-1.51.16-.45.37-.83.65-1.14.27-.3.58-.54.93-.71s.71-.25 1.08-.25c.39 0 .73.07 1 .2.27.14.54.32.81.55l-.06-1.1v-2.49h1.61v9.88h-1.33l-.11-.74h-.06c-.25.25-.54.46-.88.64-.33.18-.69.27-1.06.27-.87 0-1.56-.32-2.07-.95s-.76-1.51-.76-2.65zm1.67-.01c0 .74.13 1.31.4 1.7.26.38.65.58 1.15.58.51 0 .99-.26 1.44-.77v-3.21c-.24-.21-.48-.36-.7-.45-.23-.08-.46-.12-.7-.12-.45 0-.82.19-1.13.59-.31.39-.46.95-.46 1.68zm6.35 1.59c0-.73.32-1.3.97-1.71.64-.4 1.67-.68 3.08-.84 0-.17-.02-.34-.07-.51-.05-.16-.12-.3-.22-.43s-.22-.22-.38-.3c-.15-.06-.34-.1-.58-.1-.34 0-.68.07-1 .2s-.63.29-.93.47l-.59-1.08c.39-.24.81-.45 1.28-.63.47-.17.99-.26 1.54-.26.86 0 1.51.25 1.93.76s.63 1.25.63 2.21v4.07h-1.32l-.12-.76h-.05c-.3.27-.63.48-.98.66s-.73.27-1.14.27c-.61 0-1.1-.19-1.48-.56-.38-.36-.57-.85-.57-1.46zm1.57-.12c0 .3.09.53.27.67.19.14.42.21.71.21.28 0 .54-.07.77-.2s.48-.31.73-.56v-1.54c-.47.06-.86.13-1.18.23-.31.09-.57.19-.76.31s-.33.25-.41.4c-.09.15-.13.31-.13.48zm6.29-3.63h-.98v-1.2l1.06-.07.2-1.88h1.34v1.88h1.75v1.27h-1.75v3.28c0 .8.32 1.2.97 1.2.12 0 .24-.01.37-.04.12-.03.24-.07.34-.11l.28 1.19c-.19.06-.4.12-.64.17-.23.05-.49.08-.76.08-.4 0-.74-.06-1.02-.18-.27-.13-.49-.3-.67-.52-.17-.21-.3-.48-.37-.78-.08-.3-.12-.64-.12-1.01zm4.36 2.17c0-.56.09-1.06.27-1.51s.41-.83.71-1.14c.29-.3.63-.54 1.01-.71.39-.17.78-.25 1.18-.25.47 0 .88.08 1.23.24.36.16.65.38.89.67s.42.63.54 1.03c.12.41.18.84.18 1.32 0 .32-.02.57-.07.76h-4.37c.08.62.29 1.1.65 1.44.36.33.82.5 1.38.5.3 0 .58-.04.84-.13.25-.09.51-.21.76-.37l.54 1.01c-.32.21-.69.39-1.09.53s-.82.21-1.26.21c-.47 0-.92-.08-1.33-.25-.41-.16-.77-.4-1.08-.7-.3-.31-.54-.69-.72-1.13-.17-.44-.26-.95-.26-1.52zm4.61-.62c0-.55-.11-.98-.34-1.28-.23-.31-.58-.47-1.06-.47-.41 0-.77.15-1.08.45-.31.29-.5.73-.57 1.3zm3.01 2.23c.31.24.61.43.92.57.3.13.63.2.98.2.38 0 .65-.08.83-.23s.27-.35.27-.6c0-.14-.05-.26-.13-.37-.08-.1-.2-.2-.34-.28-.14-.09-.29-.16-.47-.23l-.53-.22c-.23-.09-.46-.18-.69-.3-.23-.11-.44-.24-.62-.4s-.33-.35-.45-.55c-.12-.21-.18-.46-.18-.75 0-.61.23-1.1.68-1.49.44-.38 1.06-.57 1.83-.57.48 0 .91.08 1.29.25s.71.36.99.57l-.74.98c-.24-.17-.49-.32-.73-.42-.25-.11-.51-.16-.78-.16-.35 0-.6.07-.76.21-.17.15-.25.33-.25.54 0 .14.04.26.12.36s.18.18.31.26c.14.07.29.14.46.21l.54.19c.23.09.47.18.7.29s.44.24.64.4c.19.16.34.35.46.58.11.23.17.5.17.82 0 .3-.06.58-.17.83-.12.26-.29.48-.51.68-.23.19-.51.34-.84.45-.34.11-.72.17-1.15.17-.48 0-.95-.09-1.41-.27-.46-.19-.86-.41-1.2-.68z" fill="#535353"/></g></svg>" + ] + }, + { + "section_name": "Associated content", + "section_text": "Collection", + "section_image": [] + } + ] +} \ No newline at end of file