ManojINaik commited on
Commit
4776181
1 Parent(s): 473963a

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +32 -29
main.py CHANGED
@@ -3,44 +3,43 @@ from pydantic import BaseModel
3
  from huggingface_hub import InferenceClient
4
  import uvicorn
5
 
6
-
7
  app = FastAPI()
8
 
9
- client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
 
10
 
11
- class Item(BaseModel):
12
- prompt: str
13
- history: list
14
- system_prompt: str
15
  temperature: float = 0.0
16
  max_new_tokens: int = 1048
17
  top_p: float = 0.15
18
  repetition_penalty: float = 1.0
19
 
20
- def format_prompt(message, history):
 
21
  prompt = "<s>"
22
  for user_prompt, bot_response in history:
23
- prompt += f"[INST] {user_prompt} [/INST]"
24
- prompt += f" {bot_response}</s> "
25
- prompt += f"[INST] {message} [/INST]"
26
  return prompt
27
 
28
- def generate(item: Item):
29
- temperature = float(item.temperature)
30
- if temperature < 1e-2:
31
- temperature = 1e-2
32
- top_p = float(item.top_p)
33
-
34
- generate_kwargs = dict(
35
- temperature=temperature,
36
- max_new_tokens=item.max_new_tokens,
37
- top_p=top_p,
38
- repetition_penalty=item.repetition_penalty,
39
- do_sample=True,
40
- seed=42,
41
- )
42
-
43
- formatted_prompt = format_prompt(f"{item.system_prompt}, {item.prompt}", item.history)
44
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
45
  output = ""
46
 
@@ -48,7 +47,11 @@ def generate(item: Item):
48
  output += response.token.text
49
  return output
50
 
51
- @app.post("/generate/")
52
- async def generate_text(item: Item):
53
- return {"response": generate(item)}
 
54
 
 
 
 
 
3
  from huggingface_hub import InferenceClient
4
  import uvicorn
5
 
 
6
  app = FastAPI()
7
 
8
+ # Initialize the InferenceClient with the specified model
9
+ client = InferenceClient("nvidia/Llama-3.1-Nemotron-70B-Instruct-HF")
10
 
11
+ # Define the structure of the request body
12
+ class CourseRequest(BaseModel):
13
+ course_name: str
14
+ history: list = [] # Keeping history optional
15
  temperature: float = 0.0
16
  max_new_tokens: int = 1048
17
  top_p: float = 0.15
18
  repetition_penalty: float = 1.0
19
 
20
+ # Format the prompt for the model
21
+ def format_prompt(course_name, history):
22
  prompt = "<s>"
23
  for user_prompt, bot_response in history:
24
+ prompt += f"[INST] {user_prompt} [/INST] {bot_response} </s> "
25
+ prompt += f"[INST] Generate a roadmap for the course: {course_name} [/INST]"
 
26
  return prompt
27
 
28
+ # Generate text using the specified parameters
29
+ def generate(course_request: CourseRequest):
30
+ temperature = max(float(course_request.temperature), 1e-2)
31
+ top_p = float(course_request.top_p)
32
+
33
+ generate_kwargs = {
34
+ 'temperature': temperature,
35
+ 'max_new_tokens': course_request.max_new_tokens,
36
+ 'top_p': top_p,
37
+ 'repetition_penalty': course_request.repetition_penalty,
38
+ 'do_sample': True,
39
+ 'seed': 42,
40
+ }
41
+
42
+ formatted_prompt = format_prompt(course_request.course_name, course_request.history)
 
43
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
44
  output = ""
45
 
 
47
  output += response.token.text
48
  return output
49
 
50
+ # Define the API endpoint for generating course roadmaps
51
+ @app.post("/generate-roadmap/")
52
+ async def generate_roadmap(course_request: CourseRequest):
53
+ return {"roadmap": generate(course_request)}
54
 
55
+ # Run the application (uncomment the next two lines if running this as a standalone script)
56
+ # if __name__ == "__main__":
57
+ # uvicorn.run(app, host="0.0.0.0", port=8000)