KarthickAdopleAI commited on
Commit
f5244b8
1 Parent(s): b6c4bbc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -32
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import os
2
- import openai
3
  import PyPDF2
4
  import gradio as gr
5
  import docx
@@ -8,7 +8,10 @@ import re
8
 
9
  class Resume_Overall:
10
  def __init__(self):
11
- pass
 
 
 
12
 
13
  def extract_text_from_file(self,file_path):
14
  # Get the file extension
@@ -48,17 +51,22 @@ class Resume_Overall:
48
  resume = self.extract_text_from_file(resume_path)
49
 
50
 
 
51
  # Define the prompt or input for the model
52
- prompt = f"""Analyze the resume to generate online courses with website links to improve skills following resume delimitted by triple backticks. Generate atmost five courses.
 
 
53
  result format should be:
54
  course:[course].
55
  website link:[website link]
56
  ```{resume}```
57
- """
 
 
58
 
59
  # Generate a response from the GPT-3 model
60
- response = openai.Completion.create(
61
- engine='text-davinci-003',
62
  prompt=prompt,
63
  max_tokens=200,
64
  temperature=0,
@@ -67,7 +75,7 @@ class Resume_Overall:
67
  )
68
 
69
  # Extract the generated text from the API response
70
- generated_text = response.choices[0].text.strip()
71
 
72
  return generated_text
73
  def summary_response(self,resume_path):
@@ -76,13 +84,16 @@ class Resume_Overall:
76
 
77
 
78
  # Define the prompt or input for the model
79
- prompt = f"""Analyze the resume to write the summary for following resume delimitted by triple backticks.
 
 
80
  ```{resume}```
81
- """
 
82
 
83
  # Generate a response from the GPT-3 model
84
- response = openai.Completion.create(
85
- engine='text-davinci-003',
86
  prompt=prompt,
87
  max_tokens=200,
88
  temperature=0,
@@ -91,7 +102,7 @@ class Resume_Overall:
91
  )
92
 
93
  # Extract the generated text from the API response
94
- generated_text = response.choices[0].text.strip()
95
 
96
  return generated_text
97
 
@@ -102,13 +113,16 @@ class Resume_Overall:
102
 
103
 
104
  # Define the prompt or input for the model
105
- prompt = f"""Find Education Gaps in given resume. Find Skills in resume.
 
 
106
  ```{resume}```
107
- """
 
108
 
109
  # Generate a response from the GPT-3 model
110
- response = openai.Completion.create(
111
- engine='text-davinci-003', # Choose the GPT-3 engine you want to use
112
  prompt=prompt,
113
  max_tokens=100, # Set the maximum number of tokens in the generated response
114
  temperature=0, # Controls the randomness of the output. Higher values = more random, lower values = more focused
@@ -117,21 +131,25 @@ class Resume_Overall:
117
  )
118
 
119
  # Extract the generated text from the API response
120
- generated_text = response.choices[0].text.strip()
121
 
122
  return generated_text
123
 
124
  def _generate_job_list(self, resume: str) -> str:
125
- prompt = f"List out perfect job roles for based on resume informations:{resume}"
126
- response = openai.Completion.create(
127
- engine='text-davinci-003',
 
 
 
 
128
  prompt=prompt,
129
  max_tokens=100,
130
  temperature=0,
131
  n=1,
132
  stop=None,
133
  )
134
- generated_text = response.choices[0].text.strip()
135
  return generated_text
136
 
137
 
@@ -143,8 +161,9 @@ class Resume_Overall:
143
 
144
  def generate_job_description(self, role, experience):
145
  # Generate a response from the GPT-3 model
146
-
147
- prompt = f"""Your task is generate Job description for this {role} with {experience} years of experience.
 
148
  Job Description Must have
149
  1. Job Title
150
  2. Job Summary : [200 words]
@@ -152,16 +171,18 @@ class Resume_Overall:
152
  4. Required Skills : Six Skills
153
  5. Qualifications
154
  These topics must have in that Generated Job Description.
155
- """
156
- response = openai.Completion.create(
157
- engine='text-davinci-003', # Choose the GPT-3 engine you want to use
 
 
158
  prompt=prompt,
159
  max_tokens=500, # Set the maximum number of tokens in the generated response
160
  temperature=0.5, # Controls the randomness of the output. Higher values = more random, lower values = more focused
161
  )
162
 
163
  # Extract the generated text from the API response
164
- generated_text = response.choices[0].text.strip()
165
 
166
  return generated_text
167
 
@@ -171,13 +192,17 @@ class Resume_Overall:
171
 
172
 
173
  # Define the prompt or input for the model
174
- prompt = f"""Generate interview questions for screening following job_description delimitted by triple backticks. Generate atmost ten questions.
 
 
175
  ```{job_description}```
176
- """
 
 
177
 
178
  # Generate a response from the GPT-3 model
179
- response = openai.Completion.create(
180
- engine='text-davinci-003', # Choose the GPT-3 engine you want to use
181
  prompt=prompt,
182
  max_tokens=200, # Set the maximum number of tokens in the generated response
183
  temperature=0, # Controls the randomness of the output. Higher values = more random, lower values = more focused
@@ -186,7 +211,7 @@ class Resume_Overall:
186
  )
187
 
188
  # Extract the generated text from the API response
189
- generated_text = response.choices[0].text.strip()
190
 
191
  return generated_text
192
 
 
1
  import os
2
+ from openai import AzureOpenAI
3
  import PyPDF2
4
  import gradio as gr
5
  import docx
 
8
 
9
  class Resume_Overall:
10
  def __init__(self):
11
+ self.client = AzureOpenAI(api_key=os.getenv("AZURE_OPENAI_KEY"),
12
+ api_version="2023-07-01-preview",
13
+ azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
14
+ )
15
 
16
  def extract_text_from_file(self,file_path):
17
  # Get the file extension
 
51
  resume = self.extract_text_from_file(resume_path)
52
 
53
 
54
+
55
  # Define the prompt or input for the model
56
+ conversation = [
57
+ {"role": "system", "content": "You are a Resume Assistant."},
58
+ {"role": "user", "content": f"""Analyze the resume to generate online courses with website links to improve skills following resume delimitted by triple backticks. Generate atmost five courses.
59
  result format should be:
60
  course:[course].
61
  website link:[website link]
62
  ```{resume}```
63
+ """}
64
+ ]
65
+
66
 
67
  # Generate a response from the GPT-3 model
68
+ chat_completion = self.client.chat.completions.create(
69
+ model = "ChatGPT",
70
  prompt=prompt,
71
  max_tokens=200,
72
  temperature=0,
 
75
  )
76
 
77
  # Extract the generated text from the API response
78
+ generated_text = chat_completion.choices[0].message.content
79
 
80
  return generated_text
81
  def summary_response(self,resume_path):
 
84
 
85
 
86
  # Define the prompt or input for the model
87
+ conversation = [
88
+ {"role": "system", "content": "You are a Resume Summarizer."},
89
+ {"role": "user", "content": f"""Analyze the resume to write the summary for following resume delimitted by triple backticks.
90
  ```{resume}```
91
+ """}
92
+ ]
93
 
94
  # Generate a response from the GPT-3 model
95
+ chat_completion = self.client.chat.completions.create(
96
+ model = "ChatGPT",
97
  prompt=prompt,
98
  max_tokens=200,
99
  temperature=0,
 
102
  )
103
 
104
  # Extract the generated text from the API response
105
+ generated_text = chat_completion.choices[0].message.content
106
 
107
  return generated_text
108
 
 
113
 
114
 
115
  # Define the prompt or input for the model
116
+ conversation = [
117
+ {"role": "system", "content": "You are a Resume Assistant."},
118
+ {"role": "user", "content": f"""Find Education Gaps in given resume. Find Skills in resume.
119
  ```{resume}```
120
+ """}
121
+ ]
122
 
123
  # Generate a response from the GPT-3 model
124
+ chat_completion = self.client.chat.completions.create(
125
+ model = "ChatGPT", # Choose the GPT-3 engine you want to use
126
  prompt=prompt,
127
  max_tokens=100, # Set the maximum number of tokens in the generated response
128
  temperature=0, # Controls the randomness of the output. Higher values = more random, lower values = more focused
 
131
  )
132
 
133
  # Extract the generated text from the API response
134
+ generated_text = chat_completion.choices[0].message.content
135
 
136
  return generated_text
137
 
138
  def _generate_job_list(self, resume: str) -> str:
139
+ conversation = [
140
+ {"role": "system", "content": "You are a Resume Assistant."},
141
+ {"role": "user", "content": f"List out perfect job roles for based on resume informations:{resume}"}
142
+ ]
143
+
144
+ chat_completion = self.client.chat.completions.create(
145
+ model = "ChatGPT",
146
  prompt=prompt,
147
  max_tokens=100,
148
  temperature=0,
149
  n=1,
150
  stop=None,
151
  )
152
+ generated_text = chat_completion.choices[0].message.content
153
  return generated_text
154
 
155
 
 
161
 
162
  def generate_job_description(self, role, experience):
163
  # Generate a response from the GPT-3 model
164
+ conversation = [
165
+ {"role": "system", "content": "You are a Resume Assistant."},
166
+ {"role": "user", "content": f"""Your task is generate Job description for this {role} with {experience} years of experience.
167
  Job Description Must have
168
  1. Job Title
169
  2. Job Summary : [200 words]
 
171
  4. Required Skills : Six Skills
172
  5. Qualifications
173
  These topics must have in that Generated Job Description.
174
+ """}
175
+ ]
176
+
177
+ chat_completion = self.client.chat.completions.create(
178
+ model = "ChatGPT", # Choose the GPT-3 engine you want to use
179
  prompt=prompt,
180
  max_tokens=500, # Set the maximum number of tokens in the generated response
181
  temperature=0.5, # Controls the randomness of the output. Higher values = more random, lower values = more focused
182
  )
183
 
184
  # Extract the generated text from the API response
185
+ generated_text = chat_completion.choices[0].message.content
186
 
187
  return generated_text
188
 
 
192
 
193
 
194
  # Define the prompt or input for the model
195
+ conversation = [
196
+ {"role": "system", "content": "You are a Resume Assistant."},
197
+ {"role": "user", "content": f"""Generate interview questions for screening following job_description delimitted by triple backticks. Generate atmost ten questions.
198
  ```{job_description}```
199
+ """}
200
+ ]
201
+
202
 
203
  # Generate a response from the GPT-3 model
204
+ chat_completion = self.client.chat.completions.create(
205
+ model = "ChatGPT", # Choose the GPT-3 engine you want to use
206
  prompt=prompt,
207
  max_tokens=200, # Set the maximum number of tokens in the generated response
208
  temperature=0, # Controls the randomness of the output. Higher values = more random, lower values = more focused
 
211
  )
212
 
213
  # Extract the generated text from the API response
214
+ generated_text = chat_completion.choices[0].message.content
215
 
216
  return generated_text
217