@prefix skos: <http://www.w3.org/2004/02/skos/core#> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .
@prefix ltk: <http://data.loterre.fr/ark:/67375/LTK> .
@prefix dc: <http://purl.org/dc/terms/> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix inist: <http://www.inist.fr/Ontology#> .

<http://data.loterre.fr/ark:/67375/8LP-V1WS80W6-2>
  skos:prefLabel "modèle de langue"@fr, "language model"@en ;
  a skos:Concept ;
  skos:narrower <http://data.loterre.fr/ark:/67375/8LP-FZWTVBWP-M> .

<http://data.loterre.fr/ark:/67375/8LP> a owl:Ontology, skos:ConceptScheme .
<http://data.loterre.fr/ark:/67375/8LP-FZWTVBWP-M>
  skos:hiddenLabel "Alum"@fr, "Alum"@en ;
  skos:definition "Language model which regularizes the training objective by applying perturbations in the embedding space that maximizes the adversarial loss (Liu et al., 2020)."@en, "Modèle de langue qui « régularise l'entraînement en appliquant des perturbations dans l'espace des embeddings qui maximise la perte antagoniste » (Liu et al., 2020)"@fr ;
  skos:example "Both ALUM and InfoBERT take RoBERTa-large as the backbone model. (Chen, Zhang & Zhao, 2022)"@en ;
  skos:exactMatch ltk:-KBQWDCLS-T ;
  dc:modified "2024-04-26T13:14:13"^^xsd:dateTime ;
  skos:broader <http://data.loterre.fr/ark:/67375/8LP-V1WS80W6-2> ;
  skos:prefLabel "ALUM"@en, "ALUM"@fr ;
  skos:inScheme <http://data.loterre.fr/ark:/67375/8LP> ;
  skos:altLabel "Adversarial training for large neural LangUage Models"@en ;
  inist:definitionalContext "ALUM (Liu et al. 2020) is the state-of-theart adversarial training method for neural language models which regularizes fine-tuning via perturbations in the embedding space. (Chen, Shen, Chen & Yang, 2021)"@en ;
  a skos:Concept .

