Spaces:
Running
Running
Commit
β’
d4febae
1
Parent(s):
9c9e5d3
work in progress to allow people passing their own token
Browse files- src/createSpace.mts +21 -8
- src/generateFiles.mts +21 -40
- src/getPythonApp.mts +41 -0
- src/getWebApp.mts +46 -0
- src/index.mts +10 -7
- src/isPythonAppPrompt.mts +6 -0
src/createSpace.mts
CHANGED
@@ -1,21 +1,34 @@
|
|
1 |
import { v4 as uuidv4 } from "uuid"
|
2 |
import { createRepo, uploadFiles, whoAmI } from "@huggingface/hub"
|
3 |
import type { RepoDesignation, Credentials } from "@huggingface/hub"
|
|
|
4 |
|
5 |
import { RepoFile } from "./types.mts"
|
6 |
|
7 |
-
export const createSpace = async (files: RepoFile[]) => {
|
8 |
-
|
9 |
-
const repoId = `space-factory-${uuidv4().slice(0, 4)}`
|
10 |
-
const repoName = `jbilcke-hf/${repoId}`
|
11 |
|
12 |
-
const
|
13 |
-
const credentials: Credentials = { accessToken: process.env.HF_API_TOKEN }
|
14 |
|
15 |
const { name: username } = await whoAmI({ credentials })
|
16 |
-
console.log("me: ", username)
|
17 |
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
await createRepo({
|
21 |
repo,
|
|
|
1 |
import { v4 as uuidv4 } from "uuid"
|
2 |
import { createRepo, uploadFiles, whoAmI } from "@huggingface/hub"
|
3 |
import type { RepoDesignation, Credentials } from "@huggingface/hub"
|
4 |
+
import slugify from "slugify"
|
5 |
|
6 |
import { RepoFile } from "./types.mts"
|
7 |
|
8 |
+
export const createSpace = async (files: RepoFile[], token: string) => {
|
|
|
|
|
|
|
9 |
|
10 |
+
const credentials: Credentials = { accessToken: token }
|
|
|
11 |
|
12 |
const { name: username } = await whoAmI({ credentials })
|
|
|
13 |
|
14 |
+
let slug = ``
|
15 |
+
let title = ``
|
16 |
+
const readme = files.find(p => p.path === "README.md")
|
17 |
+
try {
|
18 |
+
const matches = readme.content.match(/title: ([^\n]+)\n/)
|
19 |
+
title = matches?.[1] || ""
|
20 |
+
slug = (slugify as any)(title) as string
|
21 |
+
if (!slug.length) {
|
22 |
+
throw new Error("sluggification failed")
|
23 |
+
}
|
24 |
+
} catch (err) {
|
25 |
+
slug = `sf-${uuidv4().slice(0, 3)}`
|
26 |
+
}
|
27 |
+
|
28 |
+
const repoName = `${username}/${slug}`
|
29 |
+
|
30 |
+
const repo: RepoDesignation = { type: "space", name: repoName }
|
31 |
+
console.log(`Creating space at ${repoName}${title ? ` (${title})` : ''}`)
|
32 |
|
33 |
await createRepo({
|
34 |
repo,
|
src/generateFiles.mts
CHANGED
@@ -1,56 +1,30 @@
|
|
1 |
import { HfInference } from '@huggingface/inference'
|
2 |
import { RepoFile } from './types.mts'
|
3 |
import { createLlamaPrompt } from './createLlamaPrompt.mts'
|
4 |
-
import { streamlitDoc } from './streamlitDoc.mts'
|
5 |
import { parseTutorial } from './parseTutorial.mts'
|
|
|
|
|
|
|
6 |
|
7 |
-
const
|
8 |
-
|
9 |
-
export const generateFiles = async (prompt: string) => {
|
10 |
if (`${prompt}`.length < 2) {
|
11 |
throw new Error(`prompt too short, please enter at least ${prompt} characters`)
|
12 |
}
|
13 |
|
14 |
-
const prefix
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
role: "system",
|
19 |
-
content: [
|
20 |
-
`You are a Python developer, expert at crafting Streamlit applications to deploy to Hugging Face.`,
|
21 |
-
`Here is an extract from the Streamlit documentation:`,
|
22 |
-
streamlitDoc
|
23 |
-
].filter(item => item).join("\n")
|
24 |
-
},
|
25 |
-
{
|
26 |
-
role: "user",
|
27 |
-
content: `Please write, file by file, the source code for a Streamlit app.
|
28 |
-
|
29 |
-
Please limit yourself to the following Python modules:
|
30 |
-
- numpy
|
31 |
-
- streamlit
|
32 |
-
- matplotlib
|
33 |
|
34 |
-
|
35 |
-
\`\`\`
|
36 |
-
---
|
37 |
-
license: apache-2.0
|
38 |
-
title: <app name>
|
39 |
-
sdk: streamlit
|
40 |
-
emoji: π
|
41 |
-
colorFrom: green
|
42 |
-
colorTo: blue
|
43 |
-
---
|
44 |
-
\`\`\`
|
45 |
-
|
46 |
-
The Streamlit app is about: ${prompt}`,
|
47 |
-
}
|
48 |
-
]) + "\nSure! Here are the source files:\n" + prefix
|
49 |
|
50 |
let tutorial = prefix
|
51 |
|
52 |
try {
|
|
|
|
|
53 |
for await (const output of hf.textGenerationStream({
|
|
|
54 |
model: "codellama/CodeLlama-34b-Instruct-hf",
|
55 |
inputs,
|
56 |
parameters: {
|
@@ -59,7 +33,11 @@ let tutorial = prefix
|
|
59 |
// for "codellama/CodeLlama-34b-Instruct-hf":
|
60 |
// `inputs` tokens + `max_new_tokens` must be <= 8192
|
61 |
// error: `inputs` must have less than 4096 tokens.
|
62 |
-
|
|
|
|
|
|
|
|
|
63 |
return_full_text: false,
|
64 |
}
|
65 |
})) {
|
@@ -67,7 +45,10 @@ let tutorial = prefix
|
|
67 |
tutorial += output.token.text
|
68 |
process.stdout.write(output.token.text)
|
69 |
// res.write(output.token.text)
|
70 |
-
if (tutorial.includes('<|end|>')
|
|
|
|
|
|
|
71 |
break
|
72 |
}
|
73 |
}
|
|
|
1 |
import { HfInference } from '@huggingface/inference'
|
2 |
import { RepoFile } from './types.mts'
|
3 |
import { createLlamaPrompt } from './createLlamaPrompt.mts'
|
|
|
4 |
import { parseTutorial } from './parseTutorial.mts'
|
5 |
+
import { getPythonApp} from './getPythonApp.mts'
|
6 |
+
import { getWebApp } from './getWebApp.mts'
|
7 |
+
import { isPythonAppPrompt } from './isPythonAppPrompt.mts'
|
8 |
|
9 |
+
export const generateFiles = async (prompt: string, token: string) => {
|
|
|
|
|
10 |
if (`${prompt}`.length < 2) {
|
11 |
throw new Error(`prompt too short, please enter at least ${prompt} characters`)
|
12 |
}
|
13 |
|
14 |
+
const { prefix, instructions } =
|
15 |
+
isPythonAppPrompt(prompt)
|
16 |
+
? getPythonApp(prompt)
|
17 |
+
: getWebApp(prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
+
const inputs = createLlamaPrompt(instructions) + "\nSure! Here are the source files:\n" + prefix
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
let tutorial = prefix
|
22 |
|
23 |
try {
|
24 |
+
const hf = new HfInference(token)
|
25 |
+
|
26 |
for await (const output of hf.textGenerationStream({
|
27 |
+
// model: "tiiuae/falcon-180B-chat",
|
28 |
model: "codellama/CodeLlama-34b-Instruct-hf",
|
29 |
inputs,
|
30 |
parameters: {
|
|
|
33 |
// for "codellama/CodeLlama-34b-Instruct-hf":
|
34 |
// `inputs` tokens + `max_new_tokens` must be <= 8192
|
35 |
// error: `inputs` must have less than 4096 tokens.
|
36 |
+
|
37 |
+
// for "tiiuae/falcon-180B-chat":
|
38 |
+
// `inputs` tokens + `max_new_tokens` must be <= 8192
|
39 |
+
// error: `inputs` must have less than 4096 tokens.
|
40 |
+
max_new_tokens: 4096,
|
41 |
return_full_text: false,
|
42 |
}
|
43 |
})) {
|
|
|
45 |
tutorial += output.token.text
|
46 |
process.stdout.write(output.token.text)
|
47 |
// res.write(output.token.text)
|
48 |
+
if (tutorial.includes('<|end|>')
|
49 |
+
|| tutorial.includes('[ENDINSTRUCTION]')
|
50 |
+
|| tutorial.includes('[/TASK]')
|
51 |
+
|| tutorial.includes('<|assistant|>')) {
|
52 |
break
|
53 |
}
|
54 |
}
|
src/getPythonApp.mts
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { streamlitDoc } from "./streamlitDoc.mts";
|
2 |
+
|
3 |
+
export function getPythonApp(prompt: string) {
|
4 |
+
const prefix = "# In app.py:\n```"
|
5 |
+
|
6 |
+
const instructions = [
|
7 |
+
{
|
8 |
+
role: "system",
|
9 |
+
content: [
|
10 |
+
`You are a Python developer, expert at crafting Streamlit applications to deploy to Hugging Face.`,
|
11 |
+
`Here is an extract from the Streamlit documentation:`,
|
12 |
+
streamlitDoc
|
13 |
+
].filter(item => item).join("\n")
|
14 |
+
},
|
15 |
+
{
|
16 |
+
role: "user",
|
17 |
+
content: `Please write, file by file, the source code for a Streamlit app.
|
18 |
+
|
19 |
+
Please limit yourself to the following Python modules:
|
20 |
+
- numpy
|
21 |
+
- streamlit
|
22 |
+
- matplotlib
|
23 |
+
|
24 |
+
Don't forget to write a README.md with the following header:
|
25 |
+
\`\`\`
|
26 |
+
---
|
27 |
+
license: apache-2.0
|
28 |
+
title: <app name>
|
29 |
+
sdk: streamlit
|
30 |
+
emoji: π
|
31 |
+
colorFrom: green
|
32 |
+
colorTo: blue
|
33 |
+
---
|
34 |
+
\`\`\`
|
35 |
+
|
36 |
+
The app is about: ${prompt}`,
|
37 |
+
}
|
38 |
+
]
|
39 |
+
|
40 |
+
return { prefix, instructions }
|
41 |
+
}
|
src/getWebApp.mts
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { alpine } from "./alpine.mts"
|
2 |
+
|
3 |
+
export function getWebApp(prompt: string) {
|
4 |
+
const prefix = "# In index.html:\n```"
|
5 |
+
|
6 |
+
const instructions = [
|
7 |
+
{
|
8 |
+
role: "system",
|
9 |
+
content: [
|
10 |
+
`You are a JavaScript developer, expert at crafting applications using AlpineJS, DaisyUI and Tailwind.`,
|
11 |
+
`Here is an extract from the alpine documentation:`,
|
12 |
+
alpine
|
13 |
+
].filter(item => item).join("\n")
|
14 |
+
},
|
15 |
+
{
|
16 |
+
role: "user",
|
17 |
+
content: `Please write, file by file, the source code for a HTML JS app.
|
18 |
+
|
19 |
+
Remember, these library importats are mandatory:
|
20 |
+
- AlpineJS (use "https://cdn.jsdelivr.net/npm/[email protected]/dist/cdn.min.js")
|
21 |
+
- DaisyUI (use "https://cdn.jsdelivr.net/npm/[email protected]/dist/full.css")
|
22 |
+
- Tailwind (use "https://cdn.tailwindcss.com?plugins=forms,typography,aspect-ratio")
|
23 |
+
|
24 |
+
But you can optionally also load those:
|
25 |
+
- Three.js (use "https://cdnjs.cloudflare.com/ajax/libs/three.js/0.156.1/three.min.js")
|
26 |
+
|
27 |
+
The rest should be written using vanilla javascript
|
28 |
+
|
29 |
+
Don't forget to write a README.md with the following header:
|
30 |
+
\`\`\`
|
31 |
+
---
|
32 |
+
license: apache-2.0
|
33 |
+
title: <app name>
|
34 |
+
sdk: static
|
35 |
+
emoji: π¨βπ»
|
36 |
+
colorFrom: yellow
|
37 |
+
colorTo: green
|
38 |
+
---
|
39 |
+
\`\`\`
|
40 |
+
|
41 |
+
The app is about: ${prompt}`,
|
42 |
+
}
|
43 |
+
]
|
44 |
+
|
45 |
+
return { prefix, instructions }
|
46 |
+
}
|
src/index.mts
CHANGED
@@ -1,12 +1,7 @@
|
|
1 |
import express from 'express'
|
2 |
-
import { HfInference } from '@huggingface/inference'
|
3 |
import { createSpace } from './createSpace.mts'
|
4 |
-
import { RepoFile } from './types.mts'
|
5 |
import { generateFiles } from './generateFiles.mts'
|
6 |
|
7 |
-
const hfi = new HfInference(process.env.HF_API_TOKEN)
|
8 |
-
const hf = hfi.endpoint(process.env.HF_ENDPOINT_URL)
|
9 |
-
|
10 |
const app = express()
|
11 |
const port = 7860
|
12 |
|
@@ -50,6 +45,14 @@ app.get('/app', async (req, res) => {
|
|
50 |
return
|
51 |
}
|
52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
const id = `${pending.total++}`
|
54 |
console.log(`new request ${id}`)
|
55 |
|
@@ -69,7 +72,7 @@ app.get('/app', async (req, res) => {
|
|
69 |
let files = []
|
70 |
|
71 |
while (nbAttempts-- > 0) {
|
72 |
-
files = await generateFiles(`${req.query.prompt ||Β ""}
|
73 |
if (files.length) {
|
74 |
console.log(`seems like we have ${files.length} files`)
|
75 |
break
|
@@ -78,7 +81,7 @@ app.get('/app', async (req, res) => {
|
|
78 |
|
79 |
console.log("files:", JSON.stringify(files, null, 2))
|
80 |
|
81 |
-
await createSpace(files)
|
82 |
|
83 |
res.write(JSON.stringify(files, null, 2))
|
84 |
res.end()
|
|
|
1 |
import express from 'express'
|
|
|
2 |
import { createSpace } from './createSpace.mts'
|
|
|
3 |
import { generateFiles } from './generateFiles.mts'
|
4 |
|
|
|
|
|
|
|
5 |
const app = express()
|
6 |
const port = 7860
|
7 |
|
|
|
45 |
return
|
46 |
}
|
47 |
|
48 |
+
const token = `${req.query.token}`
|
49 |
+
|
50 |
+
if (!token.startsWith("hf_")) {
|
51 |
+
res.write(`the provided token seems to be invalid`)
|
52 |
+
res.end()
|
53 |
+
return
|
54 |
+
}
|
55 |
+
|
56 |
const id = `${pending.total++}`
|
57 |
console.log(`new request ${id}`)
|
58 |
|
|
|
72 |
let files = []
|
73 |
|
74 |
while (nbAttempts-- > 0) {
|
75 |
+
files = await generateFiles(`${req.query.prompt ||Β ""}`, token)
|
76 |
if (files.length) {
|
77 |
console.log(`seems like we have ${files.length} files`)
|
78 |
break
|
|
|
81 |
|
82 |
console.log("files:", JSON.stringify(files, null, 2))
|
83 |
|
84 |
+
await createSpace(files, token)
|
85 |
|
86 |
res.write(JSON.stringify(files, null, 2))
|
87 |
res.end()
|
src/isPythonAppPrompt.mts
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
export function isPythonAppPrompt(prompt: string) {
|
2 |
+
const lowerCasePrompt = prompt.toLocaleLowerCase()
|
3 |
+
return lowerCasePrompt.includes("python")
|
4 |
+
|| lowerCasePrompt.includes("streamlit")
|
5 |
+
|| lowerCasePrompt.includes("gradio")
|
6 |
+
}
|