File size: 1,491 Bytes
af93b45 60216ec 7956c78 d54176d 2fc66df 7956c78 60216ec 7956c78 4038e22 fd28154 7956c78 60216ec d30fa0a 88afc1d fd28154 cefaaf5 d36fc40 fd28154 7956c78 60216ec 7956c78 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import type { ModelEntryWithTokenizer } from "$lib/components/InferencePlayground/types";
import type { ModelEntry } from "@huggingface/hub";
import type { PageServerLoad } from "./$types";
import { env } from "$env/dynamic/private";
export const load: PageServerLoad = async ({ fetch }) => {
const apiUrl = "https://huggingface.co/api/models?pipeline_tag=text-generation&inference=warm&filter=conversational";
const HF_TOKEN = env.HF_TOKEN;
const res = await fetch(apiUrl, {
headers: {
Authorization: `Bearer ${HF_TOKEN}`,
},
});
if (!res.ok) {
console.error(`Error fetching warm models`, res.status, res.statusText);
return { models: [] };
}
const compatibleModels: ModelEntry[] = await res.json();
compatibleModels.sort((a, b) => a.id.toLowerCase().localeCompare(b.id.toLowerCase()));
const promises = compatibleModels.map(async model => {
const configUrl = `https://huggingface.co/${model.id}/raw/main/tokenizer_config.json`;
const res = await fetch(configUrl, {
headers: {
Authorization: `Bearer ${HF_TOKEN}`,
},
});
if (!res.ok) {
console.error(`Error fetching tokenizer file for ${model.id}`, res.status, res.statusText);
return null; // Ignore failed requests by returning null
}
const tokenizerConfig = await res.json();
return { ...model, tokenizerConfig } satisfies ModelEntryWithTokenizer;
});
const models: ModelEntryWithTokenizer[] = (await Promise.all(promises)).filter(model => model !== null);
return { models };
};
|