@prefix skos: <http://www.w3.org/2004/02/skos/core#> .
@prefix ltk: <http://data.loterre.fr/ark:/67375/LTK> .
@prefix dc: <http://purl.org/dc/terms/> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix owl: <http://www.w3.org/2002/07/owl#> .

<http://data.loterre.fr/ark:/67375/8LP-V1WS80W6-2>
  skos:prefLabel "modèle de langue"@fr, "language model"@en ;
  a skos:Concept ;
  skos:narrower <http://data.loterre.fr/ark:/67375/8LP-WS2JC6KS-7> .

<http://data.loterre.fr/ark:/67375/8LP-WS2JC6KS-7>
  skos:exactMatch ltk:-MCP9KCHH-6, <https://www.wikidata.org/wiki/Q355198> ;
  skos:definition "« Modèle de langue pré-entraîné qui convertit les textes en images, ce qui permet de transférer les représentations des langues sur la base de la similarité orthographique ou de la co-activation des pixels. » (Rust et al., 2022)"@fr, "A pretrained language model that renders text as images, making it possible to transfer representations across languages based on orthographic similarity or the co-activation of pixels. (Rust et al., 2022)"@en ;
  skos:inScheme <http://data.loterre.fr/ark:/67375/8LP> ;
  a skos:Concept ;
  skos:altLabel "picture element"@en, "px"@en, "px"@fr, "pixel-based encoders of language"@en, "pixel-based encoders of language"@fr ;
  skos:example "PIXEL transforms its input text into RGB images and is a pre-trained vision transformer masked autoencoder with 112M parameters. (Fröbe, Stein, Gollub, Hagen & Potthast, 2023)"@en, "Note that PIXEL is a 112M encoder-decoder model but only the 86M encoder is used in this experiment and GPT-2 uses even more parameters (126M). (Tai, Liao, Suglia & Vergari, 2024)"@en ;
  skos:prefLabel "PIXEL"@en, "PIXEL"@fr ;
  skos:broader <http://data.loterre.fr/ark:/67375/8LP-V1WS80W6-2> ;
  dc:modified "2024-04-26T13:27:46"^^xsd:dateTime ;
  skos:hiddenLabel "Pixel"@fr, "Pixel"@en .

<http://data.loterre.fr/ark:/67375/8LP> a owl:Ontology, skos:ConceptScheme .
