EmicoBinsfinder commited on
Commit
6ce3789
1 Parent(s): e785b84

Update app.py

Browse files

Added Extra Features and Cleaned Output

Files changed (1) hide show
  1. app.py +142 -78
app.py CHANGED
@@ -198,7 +198,7 @@ def generateresponse(history, temp, top_p, tokens):
198
  "text-generation",
199
  model=model,
200
  tokenizer=tokenizer,
201
- max_length=512,
202
  temperature=temp,
203
  top_p=top_p,
204
  repetition_penalty=1.15
@@ -208,25 +208,24 @@ def generateresponse(history, temp, top_p, tokens):
208
  outputs = outputs[0]['generated_text']
209
  outputs = str(outputs).split('### Response')[1]
210
 
211
- response = f"Response: {outputs}"
212
  return response
213
 
214
-
215
  def run_model(userin, dropd):
216
 
217
  global model
218
  global tokenizer
219
 
220
- if dropd in ["Apparatus Claim", "Method of Use Claim", "Method Claim"]:
221
  PROMPT = claim_selector(userin, dropd)
222
- elif dropd in ["Generate a Description Paragraph", "Generate a Abstract", "What are the Benefits/Technical Effects"]:
223
  PROMPT = desc_selector(userin, dropd)
224
 
225
  pipe = pipeline(
226
  "text-generation",
227
  model=model,
228
  tokenizer=tokenizer,
229
- max_length=512,
230
  temperature=0.7,
231
  top_p=0.95,
232
  repetition_penalty=1.15
@@ -236,11 +235,12 @@ def run_model(userin, dropd):
236
 
237
  outputs = outputs[0]['generated_text']
238
  outputs = str(outputs).split('### Response')[1]
 
239
 
240
- response = f"Response: {outputs}"
241
  return response
242
 
243
- def prosecute(application, priorart, dropd):
244
 
245
  global model
246
  global tokenizer
@@ -249,44 +249,100 @@ def prosecute(application, priorart, dropd):
249
  "text-generation",
250
  model=model,
251
  tokenizer=tokenizer,
252
- max_length=512,
253
  temperature=0.7,
254
  top_p=0.95,
255
  repetition_penalty=1.15
256
  )
257
 
258
  PROMPT = f"""
259
- Below is an instruction that describes a task. Write a response that appropriately completes the request.
260
- ### Instruction:
261
- Draft a patent novelty/inventive step argument using the {dropd} approach:
262
 
263
  Application: {application}
264
 
265
  Prior Art: {priorart}
266
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267
  ### Response:"""
268
 
269
  outputs = pipe(PROMPT)
270
 
271
  outputs = outputs[0]['generated_text']
272
  outputs = str(outputs).split('### Response')[1]
 
273
 
274
- response = f"Response: {outputs}"
275
  return response
276
 
277
  def claim_selector(userin, dropd):
278
 
279
- PROMPT = f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
280
- ### Instruction:
281
- Draft a patent claim 1 for {dropd} comprising {userin}
282
- ### Response:"""
283
 
284
  return PROMPT
285
 
286
  def desc_selector(userin, dropd):
287
 
288
- PROMPT = f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
289
- ### Instruction:
290
  {dropd} for a patent application for the following invention: {userin}
291
  ### Response:"""
292
 
@@ -301,27 +357,38 @@ theme = gr.themes.Base(
301
  )
302
 
303
  with gr.Blocks(title='Claimed', theme=theme) as demo:
304
-
305
  gr.Markdown("""
306
- # CLAIMED - A GENERATIVE TOOLKIT FOR PATENT ATTORNEYS
307
- The patenting process can be incredibly time-consuming and expensive. We're on a mission to change that.
308
 
309
  Welcome to our demo! We've trained Meta's Llama on over 200k entries, with a focus on tasks related to the intellectual property domain.
310
  Please note that this is for research purposes and shouldn't be used commercially.
311
  None of the outputs of this model, taken in part or in its entirety, constitutes legal advice. If you are seeking protection for you intellectual property, consult a registered patent/trademark attorney.
312
-
313
  """)
314
-
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  with gr.Tab("Claim Drafter"):
316
  gr.Markdown("""
317
  Use this tool to expand your idea into the technical language of a patent claim. You can specify the type of claim you want using the dropdown menu.
318
  """)
319
- Claimchoices = gr.Dropdown(["Apparatus Claim", "Method of Use Claim", "Method Claim", ], label='Choose Claim Type Here')
320
 
321
  with gr.Row(scale=1, min_width=600):
322
  text1 = gr.Textbox(label="Input",
323
- placeholder='Type in your idea here!')
324
- text2 = gr.Textbox(label="Output")
325
  with gr.Row():
326
  btn = gr.Button("Submit")
327
  btn.click(fn=claim_selector, inputs=[text1, Claimchoices]).then(run_model, inputs=[text1, Claimchoices], outputs=text2)
@@ -330,45 +397,30 @@ with gr.Blocks(title='Claimed', theme=theme) as demo:
330
  gr.Markdown("""
331
  Use this tool to expand your patent claim into a description. You can also use this tool to generate abstracts and give you ideas about the benefit of an invention by changing the settings in the dropdown menu.
332
  """)
333
- Descriptionchoices = gr.Dropdown(["Generate a Description Paragraph", "Generate a Abstract", "What are the Benefits/Technical Effects"], label='Choose Generation Type Here')
334
  with gr.Row(scale=1, min_width=600):
335
 
336
  text1 = gr.Textbox(label="Input",
337
- placeholder='Type in your idea here!')
338
- text2 = gr.Textbox(label="Output")
339
  with gr.Row():
340
  btn = gr.Button("Submit")
341
  btn.click(fn=desc_selector, inputs=[text1, Descriptionchoices]).then(run_model, inputs=[text1, Descriptionchoices], outputs=text2)
342
 
343
  with gr.Tab("Prosecution Beta"):
344
  gr.Markdown("""
345
- Use this tool to generate ideas for how to overcome objections to novelty and inventive step. Outputs are in the EPO's problem-solution format. For now, this tool only works on relatively short inputs, so maybe try with some simple claims or short paragraphs.
346
  """)
347
- dropd = gr.Dropdown(["Problem Solution", "Windsurfing/Pozzoli"], label='Choose Generation Type Here')
348
  with gr.Row(scale=1, min_width=600):
349
  with gr.Column():
350
- application = gr.Text(label="Present Invention")
351
- priorart = gr.Text(label="Prior Art Document")
352
- text2 = gr.Textbox(label="Output")
 
353
  with gr.Row():
354
  btn = gr.Button("Submit")
355
- btn.click(fn=prosecute, inputs=[application, priorart, dropd], outputs=text2)
356
-
357
-
358
- # with gr.Tab("Claimed Infill"):
359
- # gr.Markdown("""
360
- # Below is our
361
-
362
- # Example input: A device to help the visually impaired using proprioception.
363
-
364
- # Output:
365
- # """)
366
- # with gr.Row(scale=1, min_width=600):
367
- # text1 = gr.Textbox(label="Input",
368
- # placeholder='Type in your idea here!')
369
- # text2 = gr.Textbox(label="Output")
370
-
371
-
372
  with gr.Tab("CPC Search Tool"):
373
  gr.Markdown("""
374
  Use this tool to classify your invention according to the Cooperative Patent Classification system.
@@ -377,36 +429,48 @@ with gr.Blocks(title='Claimed', theme=theme) as demo:
377
 
378
  ClassifyChoices = gr.Dropdown(["Google Patent Search", "Espacenet Patent Search"], label='Choose Search Type Here')
379
  with gr.Row(scale=1, min_width=600):
380
- userin = gr.Textbox(label="Input",
381
- placeholder='Type in your Claim/Description/Abstract Here')
382
- output = gr.Textbox(label="Output")
383
  with gr.Row():
384
  classify_btn = gr.Button("Classify")
385
  classify_btn.click(fn=classifier, inputs=[userin, ClassifyChoices] , outputs=output)
386
 
387
- gr.Markdown("""
388
- # THE CHATBOT
389
- Do you want a bit more freedom over the outputs you generate? No worries, you can use a chatbot version of our model below. You can ask it anything.
390
- If you're concerned about a particular output, hit the flag button and we will use that information to improve the model.
391
- """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
392
 
393
- chatbot = gr.Chatbot([], elem_id="Claimed Assistant").style(height=500)
394
- with gr.Row():
395
- with gr.Column(scale=1):
396
- txt = gr.Textbox(
397
- show_label=False,
398
- placeholder="Enter text and submit",
399
- ).style(container=False)
400
-
401
- with gr.Row():
402
- with gr.Accordion("Parameters"):
403
- temp = gr.Slider(minimum=0, maximum=1, value=0.6, label="Temperature", step=0.1)
404
- top_p = gr.Slider(minimum=0.5, maximum=1, value=0.95, label="Top P", step=0.1)
405
- tokens = gr.Slider(minimum=5, maximum=512, value=256, label="Max Tokens", step=1)
406
-
407
- txt.submit(add_text, [chatbot, txt], [chatbot, txt]).then(
408
- generateresponse, [chatbot, temp, top_p, tokens], chatbot)
409
-
410
 
411
  gr.Markdown("""
412
  # HAVE AN IDEA? GET IT CLAIMED
@@ -415,6 +479,6 @@ with gr.Blocks(title='Claimed', theme=theme) as demo:
415
  As far as data is concerned, you have nothing to worry about! We don't store any of your inputs to use for further training, we're not OpenAI.
416
 
417
  """)
418
- demo.queue(concurrency_count=9)
419
  demo.launch(share=True)
420
 
 
198
  "text-generation",
199
  model=model,
200
  tokenizer=tokenizer,
201
+ max_length=2048,
202
  temperature=temp,
203
  top_p=top_p,
204
  repetition_penalty=1.15
 
208
  outputs = outputs[0]['generated_text']
209
  outputs = str(outputs).split('### Response')[1]
210
 
211
+ response = f"Response{outputs}"
212
  return response
213
 
 
214
  def run_model(userin, dropd):
215
 
216
  global model
217
  global tokenizer
218
 
219
+ if dropd in ["An apparatus", "A method of use", "A method", "A method of making", "A system"]:
220
  PROMPT = claim_selector(userin, dropd)
221
+ elif dropd in ["Generate a Detailed Description Paragraph", "Generate a Abstract", "What are the Benefits/Technical Effects"]:
222
  PROMPT = desc_selector(userin, dropd)
223
 
224
  pipe = pipeline(
225
  "text-generation",
226
  model=model,
227
  tokenizer=tokenizer,
228
+ max_length=2048,
229
  temperature=0.7,
230
  top_p=0.95,
231
  repetition_penalty=1.15
 
235
 
236
  outputs = outputs[0]['generated_text']
237
  outputs = str(outputs).split('### Response')[1]
238
+ outputs = outputs.split('\n \n \n \n*')[0]
239
 
240
+ response = f"Response{outputs}"
241
  return response
242
 
243
+ def prosecute(application, priorart):
244
 
245
  global model
246
  global tokenizer
 
249
  "text-generation",
250
  model=model,
251
  tokenizer=tokenizer,
252
+ max_length=2048,
253
  temperature=0.7,
254
  top_p=0.95,
255
  repetition_penalty=1.15
256
  )
257
 
258
  PROMPT = f"""
259
+ Draft an argument for the patentability in favour of the application using the European Patent Office Problem Solution. Start by summarising the difference between the application and the prior art:
 
 
260
 
261
  Application: {application}
262
 
263
  Prior Art: {priorart}
264
 
265
+ ### Response: The objective technical problem solved by the present invention"""
266
+
267
+ outputs = pipe(PROMPT)
268
+
269
+ outputs = outputs[0]['generated_text']
270
+ outputs = str(outputs).split('### Response')[1]
271
+ outputs = outputs.split('\n \n \n \n*')[0]
272
+
273
+ response = f"Response{outputs}"
274
+ return response
275
+
276
+ def ideator(userin):
277
+
278
+ global model
279
+ global tokenizer
280
+
281
+ pipe = pipeline(
282
+ "text-generation",
283
+ model=model,
284
+ tokenizer=tokenizer,
285
+ max_length=2048,
286
+ temperature=0.7,
287
+ top_p=0.95,
288
+ repetition_penalty=1.15
289
+ )
290
+
291
+ PROMPT = f"""
292
+ How can I make {userin}
293
+
294
+ ### Response: You could implement the invention as follows:"""
295
+
296
+ outputs = pipe(PROMPT)
297
+
298
+ outputs = outputs[0]['generated_text']
299
+ outputs = str(outputs).split('### Response')[1]
300
+ outputs = outputs.split('\n \n \n \n*')[0]
301
+
302
+
303
+ response = f"Response{outputs}"
304
+ return response
305
+
306
+ def Chat(userin):
307
+
308
+ global model
309
+ global tokenizer
310
+
311
+ pipe = pipeline(
312
+ "text-generation",
313
+ model=model,
314
+ tokenizer=tokenizer,
315
+ max_length=2048,
316
+ temperature=0.7,
317
+ top_p=0.95,
318
+ repetition_penalty=1.15
319
+ )
320
+
321
+ PROMPT = f"""Below is a query from a user. Respond appropriately to the query.
322
+ ### Query:
323
+ {userin}
324
  ### Response:"""
325
 
326
  outputs = pipe(PROMPT)
327
 
328
  outputs = outputs[0]['generated_text']
329
  outputs = str(outputs).split('### Response')[1]
330
+ outputs = outputs.split('\n \n \n \n*')[0]
331
 
332
+ response = f"Response{outputs}"
333
  return response
334
 
335
  def claim_selector(userin, dropd):
336
 
337
+ PROMPT = f"""
338
+ Draft a patent claim 1 for {dropd} for the following invention: {userin}
339
+ ### Response:{dropd} comprising:"""
 
340
 
341
  return PROMPT
342
 
343
  def desc_selector(userin, dropd):
344
 
345
+ PROMPT = f"""
 
346
  {dropd} for a patent application for the following invention: {userin}
347
  ### Response:"""
348
 
 
357
  )
358
 
359
  with gr.Blocks(title='Claimed', theme=theme) as demo:
 
360
  gr.Markdown("""
361
+ # CLAIMED - A GENERATIVE TOOLKIT FOR PATENT ATTORNEYS AND INVENTORS
362
+ The patenting process can be complex, time-consuming and expensive. We believe that AI will one day solve these problems.
363
 
364
  Welcome to our demo! We've trained Meta's Llama on over 200k entries, with a focus on tasks related to the intellectual property domain.
365
  Please note that this is for research purposes and shouldn't be used commercially.
366
  None of the outputs of this model, taken in part or in its entirety, constitutes legal advice. If you are seeking protection for you intellectual property, consult a registered patent/trademark attorney.
 
367
  """)
368
+
369
+ with gr.Tab("Ideator"):
370
+ gr.Markdown("""
371
+ Use this tool to generate ideas!
372
+ """)
373
+ with gr.Row(scale=1, min_width=600):
374
+ with gr.Column():
375
+ userin = gr.Text(label="Input", lines=5)
376
+ with gr.Column():
377
+ text2 = gr.Textbox(label="Output", lines=5)
378
+ with gr.Row():
379
+ btn = gr.Button("Submit")
380
+ btn.click(fn=ideator, inputs=[userin], outputs=text2)
381
+
382
  with gr.Tab("Claim Drafter"):
383
  gr.Markdown("""
384
  Use this tool to expand your idea into the technical language of a patent claim. You can specify the type of claim you want using the dropdown menu.
385
  """)
386
+ Claimchoices = gr.Dropdown(["An apparatus", "A method of use", "A method", "A method of making", "A system"], label='Choose Claim Type Here')
387
 
388
  with gr.Row(scale=1, min_width=600):
389
  text1 = gr.Textbox(label="Input",
390
+ placeholder='Type in your idea here!', lines=5)
391
+ text2 = gr.Textbox(label="Output", lines=5)
392
  with gr.Row():
393
  btn = gr.Button("Submit")
394
  btn.click(fn=claim_selector, inputs=[text1, Claimchoices]).then(run_model, inputs=[text1, Claimchoices], outputs=text2)
 
397
  gr.Markdown("""
398
  Use this tool to expand your patent claim into a description. You can also use this tool to generate abstracts and give you ideas about the benefit of an invention by changing the settings in the dropdown menu.
399
  """)
400
+ Descriptionchoices = gr.Dropdown(["Generate a Detailed Description Paragraph", "Generate a Abstract", "What are the Benefits/Technical Effects"], label='Choose Generation Type Here')
401
  with gr.Row(scale=1, min_width=600):
402
 
403
  text1 = gr.Textbox(label="Input",
404
+ placeholder='Type in your idea here!', lines=5)
405
+ text2 = gr.Textbox(label="Output", lines=5)
406
  with gr.Row():
407
  btn = gr.Button("Submit")
408
  btn.click(fn=desc_selector, inputs=[text1, Descriptionchoices]).then(run_model, inputs=[text1, Descriptionchoices], outputs=text2)
409
 
410
  with gr.Tab("Prosecution Beta"):
411
  gr.Markdown("""
412
+ Use this tool to generate ideas for how to overcome objections to novelty and inventive step. For now, this tool only works on relatively short inputs, so maybe try with some simple claims or short paragraphs.
413
  """)
 
414
  with gr.Row(scale=1, min_width=600):
415
  with gr.Column():
416
+ application = gr.Text(label="Present Invention", lines=5)
417
+ priorart = gr.Text(label="Prior Art Document", lines=5)
418
+ with gr.Column():
419
+ text2 = gr.Textbox(label="Output", lines=5)
420
  with gr.Row():
421
  btn = gr.Button("Submit")
422
+ btn.click(fn=prosecute, inputs=[application, priorart], outputs=text2)
423
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424
  with gr.Tab("CPC Search Tool"):
425
  gr.Markdown("""
426
  Use this tool to classify your invention according to the Cooperative Patent Classification system.
 
429
 
430
  ClassifyChoices = gr.Dropdown(["Google Patent Search", "Espacenet Patent Search"], label='Choose Search Type Here')
431
  with gr.Row(scale=1, min_width=600):
432
+ userin = gr.Textbox(label="Input", placeholder='Type in your Claim/Description/Abstract Here',lines=5)
433
+ output = gr.Textbox(label="Output", lines=5)
 
434
  with gr.Row():
435
  classify_btn = gr.Button("Classify")
436
  classify_btn.click(fn=classifier, inputs=[userin, ClassifyChoices] , outputs=output)
437
 
438
+ with gr.Tab("Chat"):
439
+ gr.Markdown("""
440
+ Do you want a bit more freedom over the outputs you generate? No problem! You can use a chatbot version of our model below. You can ask it anything.
441
+ If you're concerned about a particular output, hit the flag button and we will use that information to improve the model.
442
+ """)
443
+ with gr.Row(scale=1, min_width=600):
444
+ with gr.Column():
445
+ userin = gr.Text(label="Question", lines=5)
446
+ with gr.Column():
447
+ text2 = gr.Textbox(label="Answer", lines=5)
448
+ with gr.Row():
449
+ btn = gr.Button("Submit")
450
+ btn.click(fn=Chat, inputs=[userin], outputs=text2)
451
+
452
+ # gr.Markdown("""
453
+ # # THE CHATBOT
454
+ # Do you want a bit more freedom over the outputs you generate? No problem! You can use a chatbot version of our model below. You can ask it anything.
455
+ # If you're concerned about a particular output, hit the flag button and we will use that information to improve the model.
456
+ # """)
457
 
458
+ # chatbot = gr.Chatbot([], elem_id="Claimed Assistant").style(height=500)
459
+ # with gr.Row():
460
+ # with gr.Column(scale=1):
461
+ # txt = gr.Textbox(
462
+ # show_label=False,
463
+ # placeholder="Enter text and submit",
464
+ # ).style(container=False)
465
+ #
466
+ # with gr.Row():
467
+ # with gr.Accordion("Parameters"):
468
+ # temp = gr.Slider(minimum=0, maximum=1, value=0.6, label="Temperature", step=0.1)
469
+ # top_p = gr.Slider(minimum=0.5, maximum=1, value=0.95, label="Top P", step=0.1)
470
+ # tokens = gr.Slider(minimum=5, maximum=1024, value=256, label="Max Tokens", step=1)
471
+ #
472
+ # txt.submit(add_text, [chatbot, txt], [chatbot, txt]).then(
473
+ # generateresponse, [chatbot, temp, top_p, tokens], chatbot)
 
474
 
475
  gr.Markdown("""
476
  # HAVE AN IDEA? GET IT CLAIMED
 
479
  As far as data is concerned, you have nothing to worry about! We don't store any of your inputs to use for further training, we're not OpenAI.
480
 
481
  """)
482
+ demo.queue(max_size=15)
483
  demo.launch(share=True)
484