@prefix this: <https://w3id.org/np/RAszCcnWVTYomkTM0J8K9p0PSp-gwfAIP8oDv_sfGKsdQ> .
@prefix sub: <https://w3id.org/np/RAszCcnWVTYomkTM0J8K9p0PSp-gwfAIP8oDv_sfGKsdQ#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix np: <http://www.nanopub.org/nschema#> .
@prefix npx: <http://purl.org/nanopub/x/> .
@prefix dcterms: <http://purl.org/dc/terms/> .
@prefix prov: <http://www.w3.org/ns/prov#> .
@prefix schema: <https://schema.org/> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix orcid: <https://orcid.org/> .
sub:head {
  this: np:hasAssertion sub:assertion ;
    np:hasProvenance sub:provenance ;
    np:hasPublicationInfo sub:pubinfo ;
    a np:Nanopublication .
}
sub:assertion {
  <https://github.com/zipnn/zipnn> <https://sense-nets.xyz/hasZoteroItemType> "computerProgram" .
  sub:assertion dcterms:creator <https://w3id.org/np/RAoSadUw99CeqDlR2400018nqTzR_38fT86OrTzk16Vts> ;
    <http://purl.org/spar/cito/agreesWith> <https://x.com/RickardGabriels/status/1810368375455207470> , <https://x.com/pratyusha_PS/status/1739025292805468212> ;
    <http://purl.org/spar/cito/discusses> <https://www.alphaxiv.org/abs/2408.13656> , <https://www.alphaxiv.org/pdf/2310.01886> , <https://www.alphaxiv.org/pdf/2405.07813> , <https://x.com/prateeky2806/status/1727589818618523783> ;
    <http://purl.org/spar/cito/includesQuotationFrom> <https://x.com/RickardGabriels/status/1810368375455207470> , <https://x.com/pratyusha_PS/status/1739025292805468212> ;
    rdfs:comment """ You only learn a few parameters, with your parameter \"efficient\" finetuning. The rest is💩

A whole line of works🧵 shows that by throwing redundancy
we can get better LoRas, keep less memory and of course model merge https://twitter.com/LChoshen/status/1833879920348422216/photo/1

ComPeft shows you can improve LoRAs by pruning aggressively and making the remaining weights binary (+/-)
It also means parameter efficiency still relies on overparametrization(but only during training)
https://x.com/prateeky2806/status/1727589818618523783
Laser shows it on full models
https://x.com/pratyusha_PS/status/1739025292805468212 https://twitter.com/LChoshen/status/1833879922500080084/photo/1

In merging, many find that with only those few weights one can make a \"multitask\" model, keeping the important ones for each model and switching.
those e.g. 1% of the weights also represent tasks well

Many..
https://www.alphaxiv.org/abs/2408.13656
https://www.alphaxiv.org/pdf/2405.07813
https://www.alphaxiv.org/pdf/2310.01886

Those works are focused on efficient multitask learning that compresses the models, can keep many models and switch between them as necessary.
Another option to compress is to SVD the LORA, separately or
to a shared space, saving the tiny differences
https://x.com/RickardGabriels/status/1810368375455207470

And just because we discussed compression, of course this is all just \"model compression\" if you want to compress to just save space, there are smarter ways:
https://github.com/zipnn/zipnn

""" ;
    schema:keywords "finetuning" , "low-rank-adapters" , "model-compression" , "multitask-learning" , "serving-systems" ;
    <https://sense-nets.xyz/endorses> <https://www.arxiv.org/abs/2407.00066> ;
    <https://sense-nets.xyz/recommends> <https://github.com/zipnn/zipnn> ;
    <https://sense-nets.xyz/summarizes> <https://www.arxiv.org/abs/2407.00066> .
  <https://www.alphaxiv.org/abs/2408.13656> <https://sense-nets.xyz/hasZoteroItemType> "webpage" .
  <https://www.alphaxiv.org/pdf/2310.01886> <https://sense-nets.xyz/hasZoteroItemType> "webpage" .
  <https://www.alphaxiv.org/pdf/2405.07813> <https://sense-nets.xyz/hasZoteroItemType> "webpage" .
  <https://www.arxiv.org/abs/2407.00066> <https://sense-nets.xyz/hasZoteroItemType> "webpage" .
  <https://x.com/RickardGabriels/status/1810368375455207470> <https://sense-nets.xyz/hasZoteroItemType> "forumPost" .
  <https://x.com/prateeky2806/status/1727589818618523783> <https://sense-nets.xyz/hasZoteroItemType> "forumPost" .
  <https://x.com/pratyusha_PS/status/1739025292805468212> <https://sense-nets.xyz/hasZoteroItemType> "forumPost" .
}
sub:provenance {
  <https://sense-nets.xyz/> a prov:SoftwareAgent ;
    prov:actedOnBehalfOf <https://w3id.org/np/RAoSadUw99CeqDlR2400018nqTzR_38fT86OrTzk16Vts> .
  <https://w3id.org/np/RAoSadUw99CeqDlR2400018nqTzR_38fT86OrTzk16Vts> foaf:account orcid:0000-0002-0085-6496 , <https://x.com/LChoshen> .
  sub:activity a <https://sense-nets.xyz/supervisedActivity> ;
    prov:wasAssociatedWith <https://sense-nets.xyz/> .
  sub:assertion prov:linksTo <https://x.com/LChoshen/status/1833879920348422216> ;
    prov:wasAssociatedWith <https://x.com/LChoshen> ;
    prov:wasAttributedTo orcid:0000-0002-0085-6496 , <https://w3id.org/np/RAoSadUw99CeqDlR2400018nqTzR_38fT86OrTzk16Vts> ;
    prov:wasGeneratedBy sub:activity .
}
sub:pubinfo {
  <https://w3id.org/np/RAoSadUw99CeqDlR2400018nqTzR_38fT86OrTzk16Vts> foaf:account orcid:0000-0002-0085-6496 ;
    foaf:name "Leshem Choshen 🤖🤗 @ICML wanna talk?" .
  sub:sig npx:hasAlgorithm "RSA" ;
    npx:hasPublicKey "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArHtI92jm8pAYVsvJabxLGfOT+7G0JyJGh2gwjB5x2pFPga6wWTd+rNBWWUZViIFnaJrBEsJpgdnoupLU9ppwn+khMiGRfxqGsDDzwHcj3Jc75CRys7d3etwXdBdoXfBgjsJiZBazwm13idr6tljRrC1TaEJBnRQAqzBw9cLDeGY77cSznzXT39feUGT168dpCSE9O6u/48DvvWVqciHGsH9cQ+LroJJVsMrorwtsdZnAK+q48wtIP6pIpw5shSJ5LnA0qeN/f4TvTFDV6ItYIXjiWWpTECc/Bxmfnyat3B5xWCu9nvz8fEs7Ns0TuzQwT3/K55iSKDEIi/E0nO97xwIDAQAB" ;
    npx:hasSignature "lUGuTihITpSBPC1WF/Na8tXgPw2UeCdCKt1Z7VLl0Aq1DKhqi52cROTJZ0PhS/unqNjVpM+W8BiMHbwSNH5zDsT04upJOTVdHC2ghxFXO2jZ2YeFoZe8ZrOUkVrsO4fDytVI8AptUiDDOn3tWN13J70E+2utIO7SBjG/pitQ62t7Nj1N1qZVHehz7McKlIfP3IB6arWD9WOHIU4lYlpyYFsYE7dlHBvjdW8gr4xwPSChdB356QdPXYphIwjVZBlMujhZhStKs5oVdlfKnfqvYxf8Dfsdh+paShLwmBR3hfJ4NN8ADDkW/VDbOvDzYgqLb2decJxM2PStBT84SH0yRg==" ;
    npx:hasSignatureTarget this: ;
    npx:singedBy <https://sense-nets.xyz/> ;
    prov:wasAssociatedWith <https://w3id.org/np/RAoSadUw99CeqDlR2400018nqTzR_38fT86OrTzk16VtssigningDelegation> .
  this: dcterms:created "2024-09-11T18:07:24.837Z"^^xsd:dateTime ;
    dcterms:creator <https://w3id.org/np/RAoSadUw99CeqDlR2400018nqTzR_38fT86OrTzk16Vts> ;
    dcterms:license <https://creativecommons.org/licenses/by/4.0/> ;
    npx:hasNanopubType <https://sense-nets.xyz/SemanticPost> ;
    npx:wasCreatedAt <https://sense-nets.xyz/> ;
    rdfs:label "CoSMO Semantic Post" ;
    prov:wasAttributedTo orcid:0000-0002-0085-6496 ;
    <https://sense-nets.xyz/hasRootSigner> "0xf6ECcfD463afB464dcC85b051DF2E93E2646E6D2" .
}