@prefix skos: <http://www.w3.org/2004/02/skos/core#> .
@prefix dc: <http://purl.org/dc/terms/> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix ltk: <http://data.loterre.fr/ark:/67375/LTK> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .

<http://data.loterre.fr/ark:/67375/8LP-V1WS80W6-2>
  skos:prefLabel "modèle de langue"@fr, "language model"@en ;
  a skos:Concept ;
  skos:narrower <http://data.loterre.fr/ark:/67375/8LP-PJTL80HD-P> .

<http://data.loterre.fr/ark:/67375/8LP-PJTL80HD-P>
  dc:modified "2024-04-26T13:22:35"^^xsd:dateTime ;
  skos:definition "A pre-trained language representation model for the biomedical domain.'(Lee et al., 2020, p. 1235)."@en, "Modèle de représentation linguistique pré-entraîné pour le domaine biomédical. (Lee et al., 2020, p. 1235)"@fr ;
  skos:example "As a result the pretraining data for BioBERT also covers the biomedical domain. (Salhofer, Liu & Kern, 2022)"@en, "From zero-shot 8 to using all the training data EBM-Net improves only by 26.6% relative F1 (from 47.52% to 60.15%) while BioBERT improves largely by 60.0% relative F1 (from 32.77% to 54.33%). (Jin, Tan, Chen, Liu & Huang, 2020)"@en, "We see that BioBERT does not take age into account when predicting mortality risk except for patients over 90. (Van Aken, Herrmann & Löser, 2022)"@en, "On investigating the error categories of BioBERT (v1.1) models on the clinical language understanding task we find that despite having a strong performance the models still make several mistakes on examples that require medical domain knowledge. (Sushil, Suster & Daelemans, 2021)"@en ;
  skos:broader <http://data.loterre.fr/ark:/67375/8LP-V1WS80W6-2> ;
  skos:prefLabel "BioBERT"@en, "BioBERT"@fr ;
  a skos:Concept ;
  skos:inScheme <http://data.loterre.fr/ark:/67375/8LP> ;
  skos:exactMatch ltk:-N7PKH3X6-Q ;
  skos:hiddenLabel "BioBert"@en, "BioBert"@fr .

<http://data.loterre.fr/ark:/67375/8LP> a owl:Ontology, skos:ConceptScheme .
