{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,7,25]],"date-time":"2024-07-25T07:59:01Z","timestamp":1721894341923},"reference-count":65,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2023,10,7]],"date-time":"2023-10-07T00:00:00Z","timestamp":1696636800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,10,7]],"date-time":"2023-10-07T00:00:00Z","timestamp":1696636800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key R&D Program of China","doi-asserted-by":"crossref","award":["No. 2022YFB3904200"],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"crossref"}]},{"name":"Natural Science Foundation of Hubei Province of China","award":["No. 2022CFB640"]},{"name":"Opening Fund of Key Laboratory of Geological Survey and Evaluation of Ministry of Education","award":["No. GLAB 2023ZR01"]},{"name":"Fundamental Research Funds for the Central Universities , the China Postdoctoral Science Foundation","award":["No.2021M702991"]},{"name":"Open Fund of Key Laboratory of Urban Land Resources Monitoring and Simulation, Ministry of Natural Resources","award":["No. KF-2022-07-014"]},{"name":"Open Fund of Hubei Key Laboratory of Intelligent Vision Based Monitoring for Hydroelectric Engineering","award":["No. 2022SDSJ04"]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Earth Sci Inform"],"published-print":{"date-parts":[[2023,12]]},"DOI":"10.1007\/s12145-023-01112-6","type":"journal-article","created":{"date-parts":[[2023,10,7]],"date-time":"2023-10-07T02:01:56Z","timestamp":1696644116000},"page":"3629-3646","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["CnGeoPLM: Contextual knowledge selection and embedding with pretrained language representation model for the geoscience domain"],"prefix":"10.1007","volume":"16","author":[{"given":"Kai","family":"Ma","sequence":"first","affiliation":[]},{"given":"Shuai","family":"Zheng","sequence":"additional","affiliation":[]},{"given":"Miao","family":"Tian","sequence":"additional","affiliation":[]},{"given":"Qinjun","family":"Qiu","sequence":"additional","affiliation":[]},{"given":"Yongjian","family":"Tan","sequence":"additional","affiliation":[]},{"given":"Xinxin","family":"Hu","sequence":"additional","affiliation":[]},{"given":"HaiYan","family":"Li","sequence":"additional","affiliation":[]},{"given":"Zhong","family":"Xie","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,10,7]]},"reference":[{"key":"1112_CR1","unstructured":"Araci D (2019) Finbert: Financial sentiment analysis with pre-trained language models. arXiv preprint arXiv:1908.10063"},{"key":"1112_CR2","doi-asserted-by":"crossref","unstructured":"Beltagy I, Lo K, Cohan A (2019) SciBERT: A pretrained language model for scientific text. arXiv preprint arXiv:1903.10676","DOI":"10.18653\/v1\/D19-1371"},{"key":"1112_CR3","first-page":"1877","volume":"33","author":"T Brown","year":"2020","unstructured":"Brown T, Mann B, Ryder N et al (2020) Language models are few-shot learners. Adv Neural Inf Process Syst 33:1877\u20131901","journal-title":"Adv Neural Inf Process Syst"},{"key":"1112_CR4","unstructured":"Chen Q, Zhuo Z, Wang W (2019) Bert for joint intent classification and slot filling. arXiv preprint arXiv:1902.10909"},{"key":"1112_CR5","unstructured":"Lample G, Conneau A (2019) Cross-lingual language model pretraining. arXiv preprint arXiv:1901.07291"},{"key":"1112_CR6","unstructured":"Dai AM, Le QV (2015) Semi-supervised sequence learning. In: Proceedings of the 28th International Conference on Neural Information Processing Systems, vol 2, pp 3079\u20133087"},{"key":"1112_CR7","doi-asserted-by":"crossref","unstructured":"Denli H, Chughtai H A, Hughes B et al (2021) Geoscience language processing for exploration. Abu Dhabi International Petroleum Exhibition and Conference:D031S102R003","DOI":"10.2118\/207766-MS"},{"key":"1112_CR8","unstructured":"Devlin J, Chang M W, Lee K et al (2018) Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805"},{"key":"1112_CR9","doi-asserted-by":"publisher","DOI":"10.1016\/j.oregeorev.2021.104200","volume":"135","author":"M Enkhsaikhan","year":"2021","unstructured":"Enkhsaikhan M, Holden EJ, Duuring P et al (2021) Understanding ore-forming conditions using machine reading of text. Ore Geol Rev 135:104200","journal-title":"Ore Geol Rev"},{"key":"1112_CR10","unstructured":"Fedus W, Zoph B, Shazeer N (2021) Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity. arXiv preprint arXiv:2101.03961"},{"issue":"24","key":"1112_CR11","doi-asserted-by":"publisher","first-page":"12942","DOI":"10.3390\/app122412942","volume":"12","author":"Y Gao","year":"2022","unstructured":"Gao Y, Xiong Y, Wang S et al (2022) GeoBERT: pre-training geospatial representation learning on point-of-Interest. Appl Sci 12(24):12942","journal-title":"Appl Sci"},{"key":"1112_CR12","doi-asserted-by":"crossref","unstructured":"Gururangan S, Marasovi\u0107 A, Swayamdipta S et al (2020) Don't stop pretraining: adapt language models to domains and tasks. arXiv preprint arXiv:2004.10964","DOI":"10.18653\/v1\/2020.acl-main.740"},{"key":"1112_CR13","doi-asserted-by":"publisher","DOI":"10.1016\/j.oregeorev.2019.05.005","volume":"111","author":"EJ Holden","year":"2019","unstructured":"Holden EJ, Liu W, Horrocks T et al (2019) GeoDocA\u2013Fast analysis of geological content in mineral exploration reports: a text mining approach. Ore Geol Rev 111:102919","journal-title":"Ore Geol Rev"},{"key":"1112_CR14","doi-asserted-by":"crossref","unstructured":"Howard J, Ruder S (2018) Universal language model fine-tuning for text classification. arXiv preprint arXiv:1801.06146","DOI":"10.18653\/v1\/P18-1031"},{"key":"1112_CR15","unstructured":"Huang K, Altosaar J, Ranganath R (2019) Clinicalbert: Modeling clinical notes and predicting hospital readmission. arXiv preprint arXiv:1904.05342"},{"key":"1112_CR16","doi-asserted-by":"crossref","unstructured":"Ke P, Ji H, Liu S et al (2020) SentiLARE: Sentiment-Aware Language Representation Learning with Linguistic Knowledge. Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP). 6975-6988","DOI":"10.18653\/v1\/2020.emnlp-main.567"},{"key":"1112_CR17","unstructured":"Lan Z, Chen M, Goodman S et al (2019) Albert: A lite bert for self-supervised learning of language representations. arXiv preprint arXiv:1909.11942"},{"key":"1112_CR18","doi-asserted-by":"crossref","unstructured":"Lawley CJM, Raimondo S, Chen T et al (2022) Geoscience language models and their intrinsic evaluation. Appl Comput Geosci 14:100084","DOI":"10.1016\/j.acags.2022.100084"},{"issue":"4","key":"1112_CR19","doi-asserted-by":"publisher","first-page":"1234","DOI":"10.1093\/bioinformatics\/btz682","volume":"36","author":"J Lee","year":"2020","unstructured":"Lee J, Yoon W, Kim S et al (2020) BioBERT: a pre-trained biomedical language representation model for biomedical text mining. Bioinformatics 36(4):1234\u20131240","journal-title":"Bioinformatics"},{"key":"1112_CR20","doi-asserted-by":"publisher","DOI":"10.1016\/j.jbi.2020.103422","volume":"107","author":"X Li","year":"2020","unstructured":"Li X, Zhang H, Zhou XH (2020) Chinese clinical named entity recognition with variant neural structures based on BERT methods. J Biomed Inform 107:103422","journal-title":"J Biomed Inform"},{"key":"1112_CR21","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2022.116682","volume":"196","author":"D Li","year":"2022","unstructured":"Li D, Yan L, Yang J et al (2022) Dependency syntax guided bert-bilstm-gam-crf for chinese ner. Expert Syst Appl 196:116682","journal-title":"Expert Syst Appl"},{"key":"1112_CR22","doi-asserted-by":"crossref","unstructured":"Lin YC, Su KY (2021) How Fast can BERT Learn Simple Natural Language Inference?. Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume. 626\u2013633","DOI":"10.18653\/v1\/2021.eacl-main.51"},{"key":"1112_CR23","unstructured":"Liu Y, Ott M, Goyal N et al (2019) Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692"},{"key":"1112_CR24","doi-asserted-by":"crossref","unstructured":"Liu X, Hu J, Shen Q et al (2021) Geo-BERT Pre-training Model for Query Rewriting in POI Search. Findings of the Association for Computational Linguistics: EMNLP 2021. 2209\u20132214","DOI":"10.18653\/v1\/2021.findings-emnlp.190"},{"key":"1112_CR25","doi-asserted-by":"crossref","unstructured":"Liu Y, Lu W, Cheng S et al (2021) Pre-trained language model for web-scale retrieval in baidu search. Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery & Data Mining. 3365\u20133375","DOI":"10.1145\/3447548.3467149"},{"issue":"2","key":"1112_CR26","doi-asserted-by":"publisher","first-page":"979","DOI":"10.1007\/s12145-022-00775-x","volume":"15","author":"H Liu","year":"2022","unstructured":"Liu H, Qiu Q, Wu L et al (2022) Few-shot learning for name entity recognition in geological text based on GeoBERT. Earth Sci Inform 15(2):979\u2013991","journal-title":"Earth Sci Inform"},{"issue":"3","key":"1112_CR27","doi-asserted-by":"publisher","first-page":"e2021EA002166","DOI":"10.1029\/2021EA002166","volume":"9","author":"X Lv","year":"2022","unstructured":"Lv X, Xie Z, Xu D et al (2022) Chinese named entity recognition in the geoscience domain based on BERT. Earth Space Sci 9(3):e2021EA002166","journal-title":"Earth Space Sci"},{"key":"1112_CR28","doi-asserted-by":"crossref","unstructured":"Ma X (2022) Knowledge graph construction and application in geosciences: A review. Comput Geosci 161:105082","DOI":"10.1016\/j.cageo.2022.105082"},{"issue":"1","key":"1112_CR29","doi-asserted-by":"publisher","first-page":"21","DOI":"10.1007\/s12145-021-00695-2","volume":"15","author":"K Ma","year":"2022","unstructured":"Ma K, Tian M, Tan Y et al (2022a) What is this article about? Generative summarization with the BERT model in the geosciences domain. Earth Sci Inf 15(1):21\u201336","journal-title":"Earth Sci Inf"},{"issue":"2","key":"1112_CR30","doi-asserted-by":"publisher","first-page":"143","DOI":"10.1007\/s10109-022-00375-9","volume":"24","author":"K Ma","year":"2022","unstructured":"Ma K, Tan YJ, Xie Z et al (2022b) Chinese toponym recognition with variant neural structures from social media messages based on BERT methods. J Geogr Syst 24(2):143\u2013169","journal-title":"J Geogr Syst"},{"key":"1112_CR31","unstructured":"Mikolov T, Chen K, Corrado G et al (2013) Efficient estimation of word representations in vector space. arXiv preprint arXiv:1301.3781"},{"issue":"8","key":"1112_CR32","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0237861","volume":"15","author":"M Mozafari","year":"2020","unstructured":"Mozafari M, Farahbakhsh R, Crespi N (2020) Hate speech detection and racial bias mitigation in social media based on BERT model. PLoS ONE 15(8):e0237861","journal-title":"PLoS ONE"},{"key":"1112_CR33","doi-asserted-by":"crossref","unstructured":"Pennington J, Socher R, Manning CD (2014) Glove: Global vectors for word representation. Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP). 1532\u20131543","DOI":"10.3115\/v1\/D14-1162"},{"key":"1112_CR34","doi-asserted-by":"crossref","unstructured":"Peters ME, Neumann M, Iyyer M et al (2018) Deep contextualized word representations. In: Proceedings of the 2018 conference of the North American chapter of the association for computational linguistics: human language technologies, vol 1 (long papers), pp 2227\u20132237","DOI":"10.18653\/v1\/N18-1202"},{"issue":"1","key":"1112_CR35","doi-asserted-by":"publisher","first-page":"16","DOI":"10.1139\/geomat-2018-0007","volume":"72","author":"Q Qiu","year":"2018","unstructured":"Qiu Q, Xie Z, Wu L (2018a) A cyclic self-learning Chinese word segmentation for the geoscience domain. Geomatica 72(1):16\u201326","journal-title":"Geomatica"},{"key":"1112_CR36","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1016\/j.cageo.2018.08.006","volume":"121","author":"Q Qiu","year":"2018","unstructured":"Qiu Q, Xie Z, Wu L et al (2018b) DGeoSegmenter: a dictionary-based Chinese word segmenter for the geoscience domain. Comput Geosci 121:1\u201311","journal-title":"Comput Geosci"},{"issue":"JUL.","key":"1112_CR37","doi-asserted-by":"publisher","first-page":"157","DOI":"10.1016\/j.eswa.2019.02.001","volume":"125","author":"Q Qiu","year":"2019","unstructured":"Qiu Q, Xie Z, Wu L et al (2019) Geoscience Keyphrase Extraction Algorithm Using Enhanced Word Embedding. Expert Syst Appl 125(JUL.):157\u2013169","journal-title":"Expert Syst Appl"},{"issue":"4","key":"1112_CR38","doi-asserted-by":"publisher","first-page":"565","DOI":"10.1007\/s12145-019-00390-3","volume":"12","author":"Q Qiu","year":"2019","unstructured":"Qiu Q, Xie Z, Wu L et al (2019b) BiLSTM-CRF for geological named entity recognition from the geoscience literature. Earth Sci Inf 12(4):565\u2013579","journal-title":"Earth Sci Inf"},{"issue":"4","key":"1112_CR39","doi-asserted-by":"publisher","first-page":"1393","DOI":"10.1007\/s12145-020-00527-9","volume":"13","author":"Q Qiu","year":"2020","unstructured":"Qiu Q, Xie Z, Wu L et al (2020a) Automatic spatiotemporal and semantic information extraction from unstructured geoscience reports using text mining techniques. Earth Sci Inf 13(4):1393\u20131410","journal-title":"Earth Sci Inf"},{"issue":"10","key":"1112_CR40","doi-asserted-by":"publisher","first-page":"1872","DOI":"10.1007\/s11431-020-1647-3","volume":"63","author":"X Qiu","year":"2020","unstructured":"Qiu X, Sun T, Xu Y et al (2020b) Pre-trained models for natural language processing: a survey. Sci China Technol Sci 63(10):1872\u20131897","journal-title":"Sci China Technol Sci"},{"issue":"2","key":"1112_CR41","doi-asserted-by":"publisher","first-page":"839","DOI":"10.1111\/tgis.12887","volume":"26","author":"Q Qiu","year":"2022","unstructured":"Qiu Q, Xie Z, Ma K et al (2022) Spatially oriented convolutional neural network for spatial relation extraction from natural language texts. Trans GIS 26(2):839\u2013866","journal-title":"Trans GIS"},{"issue":"3","key":"1112_CR42","doi-asserted-by":"publisher","first-page":"423","DOI":"10.1007\/s11004-023-10050-4","volume":"55","author":"Q Qiu","year":"2023","unstructured":"Qiu Q, Ma K, Lv H et al (2023a) Construction and application of a knowledge graph for iron deposits using text mining analytics and a deep learning algorithm. Math Geosci 55(3):423\u2013456","journal-title":"Math Geosci"},{"issue":"5","key":"1112_CR43","doi-asserted-by":"publisher","first-page":"1526","DOI":"10.1111\/tgis.13086","volume":"27","author":"Q Qiu","year":"2023","unstructured":"Qiu Q, Xie Z, Ma K et al (2023b) NeuroSPE: a neuro-net spatial relation extractor for natural language text fusing gazetteers and pretrained models. Trans GIS 27(5):1526\u20131549","journal-title":"Trans GIS"},{"issue":"8","key":"1112_CR44","first-page":"9","volume":"1","author":"A Radford","year":"2019","unstructured":"Radford A, Wu J, Child R et al (2019) Language models are unsupervised multitask learners. OpenAI Blog 1(8):9","journal-title":"OpenAI Blog"},{"key":"1112_CR45","unstructured":"Radford A, Narasimhan K, Salimans T et al (2018a) Improving language understanding with unsupervised learning[J]. OpenAI"},{"key":"1112_CR46","unstructured":"Radford A, Narasimhan K, Salimans T et al (2018b) Improving language understanding by generative pre-training[J]. OpenAI"},{"key":"1112_CR47","unstructured":"Song Y, Wang J, Liang Z et al (2020) Utilizing BERT intermediate layers for aspect based sentiment analysis and natural language inference. arXiv preprint arXiv:2002.04815"},{"key":"1112_CR48","doi-asserted-by":"crossref","unstructured":"Sun T, Shao Y, Qiu X et al (2020a) CoLAKE: Contextualized Language and Knowledge Embedding. Proceedings of the 28th International Conference on Computational Linguistics. 3660\u20133670","DOI":"10.18653\/v1\/2020.coling-main.327"},{"key":"1112_CR49","doi-asserted-by":"crossref","unstructured":"Sun Y, Wang S, Li Y, et al (2020b) Ernie 2.0: A continual pre-training framework for language understanding. Proceedings of the AAAI Conference on Artificial Intelligence. 34(05): 8968\u20138975","DOI":"10.1609\/aaai.v34i05.6428"},{"issue":"7763","key":"1112_CR50","doi-asserted-by":"publisher","first-page":"95","DOI":"10.1038\/s41586-019-1335-8","volume":"571","author":"V Tshitoyan","year":"2019","unstructured":"Tshitoyan V, Dagdelen J, Weston L et al (2019) Unsupervised word embeddings capture latent knowledge from materials science literature. Nature 571(7763):95\u201398","journal-title":"Nature"},{"key":"1112_CR51","unstructured":"Van der Maaten L, Hinton G (2008) Visualizing data using t-SNE. J Mach Learning Res 9:2579\u20132605"},{"key":"1112_CR52","unstructured":"Vaswani A, Shazeer N, Parmar N et al (2017) Attention is all you need. In: Proceedings of the 31st International Conference on Neural Information Processing Systems, pp 6000\u20136010"},{"key":"1112_CR53","doi-asserted-by":"publisher","DOI":"10.1016\/j.oregeorev.2021.104190","volume":"134","author":"B Wang","year":"2021","unstructured":"Wang B, Wu L, Li W et al (2021a) A semi-automatic approach for generating geological profiles by integrating multi-source data. Ore Geol Rev 134:104190","journal-title":"Ore Geol Rev"},{"key":"1112_CR54","doi-asserted-by":"publisher","first-page":"176","DOI":"10.1162\/tacl_a_00360","volume":"9","author":"X Wang","year":"2021","unstructured":"Wang X, Gao T, Zhu Z et al (2021b) KEPLER: a unified model for knowledge embedding and pre-trained language representation. Trans Assoc Comput Linguist 9:176\u2013194","journal-title":"Trans Assoc Comput Linguist"},{"key":"1112_CR55","doi-asserted-by":"publisher","DOI":"10.1016\/j.oregeorev.2022.104818","volume":"144","author":"B Wang","year":"2022","unstructured":"Wang B, Ma K, Wu L et al (2022a) Visual analytics and information extraction of geological content for text-based mineral exploration reports. Ore Geol Rev 144:104818","journal-title":"Ore Geol Rev"},{"key":"1112_CR56","doi-asserted-by":"publisher","DOI":"10.1016\/j.cageo.2022.105229","volume":"168","author":"B Wang","year":"2022","unstructured":"Wang B, Wu L, Xie Z et al (2022b) Understanding geological reports based on knowledge graphs using a deep learning approach. Comput Geosci 168:105229","journal-title":"Comput Geosci"},{"issue":"6","key":"1112_CR57","doi-asserted-by":"publisher","first-page":"166","DOI":"10.3390\/ijgi6060166","volume":"6","author":"L Wu","year":"2017","unstructured":"Wu L, Xue L, Li C et al (2017) A knowledge-driven geospatially enabled framework for geological big data. ISPRS Int J Geo Inf 6(6):166","journal-title":"ISPRS Int J Geo Inf"},{"key":"1112_CR58","unstructured":"Xu H, Liu B, Shu L et al (2019) BERT post-training for review reading comprehension and aspect-based sentiment analysis. arXiv preprint arXiv:1904.02232"},{"key":"1112_CR59","doi-asserted-by":"crossref","unstructured":"Xue K, Zhou Y, Ma Z et al (2019) Fine-tuning BERT for joint entity and relation extraction in Chinese medical text. 2019 IEEE International Conference on Bioinformatics and Biomedicine (BIBM). IEEE 892\u2013897","DOI":"10.1109\/BIBM47256.2019.8983370"},{"key":"1112_CR60","unstructured":"Yang Z, Dai Z, Yang Y et al (2019) Xlnet: Generalized autoregressive pretraining for language understanding. In: Proceedings of the 33rd International Conference on Neural Information Processing Systems, pp 5753\u20135763"},{"key":"1112_CR61","unstructured":"Yao L, Mao C, Luo Y (2019) KG-BERT: BERT for knowledge graph completion. arXiv preprint arXiv:1909.03193"},{"key":"1112_CR62","doi-asserted-by":"crossref","unstructured":"Yu D, Zhu C, Yang Y et al (2022a) Jaket: Joint pre-training of knowledge graph and language understanding. Proceedings of the AAAI Conference on Artificial Intelligence. 36(10): 11630\u201311638","DOI":"10.1609\/aaai.v36i10.21417"},{"key":"1112_CR63","doi-asserted-by":"crossref","unstructured":"Yu Y, Wang Y, Mu J et al (2022b) Chinese mineral named entity recognition based on BERT model. Expert Syst Appl 206:117727","DOI":"10.1016\/j.eswa.2022.117727"},{"key":"1112_CR64","doi-asserted-by":"crossref","unstructured":"Zhang Z, Han X, Liu Z et al (2019) ERNIE: Enhanced language representation with informative entities. arXiv preprint arXiv:1905.07129","DOI":"10.18653\/v1\/P19-1139"},{"key":"1112_CR65","doi-asserted-by":"crossref","unstructured":"Zhang T, Cai Z, Wang C et al (2021) SMedBERT: A Knowledge-Enhanced Pre-trained Language Model with Structured Semantics for Medical Text Mining. Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers). 5882\u20135893","DOI":"10.18653\/v1\/2021.acl-long.457"}],"container-title":["Earth Science Informatics"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s12145-023-01112-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s12145-023-01112-6\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s12145-023-01112-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,12,8]],"date-time":"2023-12-08T06:36:07Z","timestamp":1702017367000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s12145-023-01112-6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10,7]]},"references-count":65,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2023,12]]}},"alternative-id":["1112"],"URL":"https:\/\/doi.org\/10.1007\/s12145-023-01112-6","relation":{},"ISSN":["1865-0473","1865-0481"],"issn-type":[{"value":"1865-0473","type":"print"},{"value":"1865-0481","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,10,7]]},"assertion":[{"value":"1 April 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"25 September 2023","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"7 October 2023","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}}]}}
  NODES
Association 4
INTERN 7
Note 1