@prefix skos: <http://www.w3.org/2004/02/skos/core#> .
@prefix dc: <http://purl.org/dc/terms/> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .

<http://data.loterre.fr/ark:/67375/8LP-H5XSG9ZG-G>
  skos:prefLabel "model training"@en, "entraînement de modèle"@fr ;
  a skos:Concept ;
  skos:narrower <http://data.loterre.fr/ark:/67375/8LP-CRW81XDS-M> .

<http://data.loterre.fr/ark:/67375/8LP-CRW81XDS-M>
  skos:inScheme <http://data.loterre.fr/ark:/67375/8LP> ;
  skos:example "Prompt Tuning necessitates approximately 0.2 times the training cost of vanilla fine-tuning utilizing only 0.0067% of the total parameters as trainable. (Somayajula, Liang, Zhang, Singh & Xie, 2024)"@en, "Under the latest paradigm of prompt-tuning with generative PLMs they have not considered the explicit interaction between prompts and documents. (Zhang, Yang, Zhu, Lin, Xu & Liu, 2024)"@en, "Although prompt tuning has been proven to be an efficient tuning paradigm for commonsense reasoning tasks it requires further exploration. (Huang, Li, Xu, Zhang, Gan, Zhang & Wang, 2023)"@en ;
  a skos:Concept ;
  skos:definition "The process of adapting LLMs to new tasks by training a small set of parameters, or prompts. (Shah, Prompt Tuning: A Powerful Technique for Adapting LLMs to New Tasks, on medium.com, 2023)"@en ;
  skos:prefLabel "prompt tuning"@en, "ajustement de l'invite"@fr ;
  skos:hiddenLabel "Ajustement de l'invite"@fr, "Prompt tuning"@en ;
  dc:modified "2024-06-13T14:55:06"^^xsd:dateTime ;
  skos:broader <http://data.loterre.fr/ark:/67375/8LP-H5XSG9ZG-G> .

<http://data.loterre.fr/ark:/67375/8LP> a owl:Ontology, skos:ConceptScheme .
