PatentToolkit / app.py
thepolymerguy's picture
Update app.py
09d37d3
raw
history blame
6.88 kB
import gradio as gr
def add_text(history, text):
history = history + [(text, None)]
return history, ""
def add_file(history, file):
history = history + [((file.name,), None)]
return history
def bot(history):
response = "**That's cool!**"
history[-1][1] = response
return history
"""
Place holder alpaca model trained example:
Required:
!pip install -q datasets loralib sentencepiece
!pip install -q git+https://github.com/zphang/transformers@c3dc391
!pip install -q git+https://github.com/huggingface/peft.git
!pip install bitsandbytes
"""
'''
from peft import PeftModel
from transformers import LLaMATokenizer, LLaMAForCausalLM, GenerationConfig
tokenizer = LLaMATokenizer.from_pretrained("chavinlo/alpaca-native")
model = LLaMAForCausalLM.from_pretrained(
"chavinlo/alpaca-native",
load_in_8bit=True,
device_map="auto",
)
'''
def generateresponse(history):
"""
Model definition here:
"""
'''
global model
global tokenizer
PROMPT = f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
### Instruction:
{user}
### Response:"""
inputs = tokenizer(
PROMPT,
return_tensors="pt",
)
input_ids = inputs["input_ids"].cuda()
generation_config = GenerationConfig(
temperature=0.6,
top_p=0.95,
repetition_penalty=1.15,
)
print("Generating...")
generation_output = model.generate(
input_ids=input_ids,
generation_config=generation_config,
return_dict_in_generate=True,
output_scores=True,
max_new_tokens=256,
)
output = []
for s in generation_output.sequences:
outputs.append(tokenizer.decode(s))
print(tokenizer.decode(s))
output = (outputs[0].split('### Response:'))[1]
'''
user = history[-1][0]
response = f"You asked: {user}"
history[-1][1] = response
print(history)
return history
theme = gr.themes.Base(
primary_hue="indigo",
).set(
prose_text_size='*text_sm'
)
with gr.Blocks(title='Claimed', theme=theme) as demo:
gr.Markdown("""
# CLAIMED - A GENERATIVE TOOLKIT FOR PATENT ATTORNEYS 🫑
Hey there, genius!
Welcome to our demo! We've trained Meta's Llama on almost 200k data entries in the question/answer format.
In the future, we are looking to expand our model's capabilities further to assist in a range of IP related tasks.
If you are interested in using a more powerful model that we have trained, or you have any suggestions of features you would like to see us add, please get in touch!
As far as data is concerned, you have nothing to worry about! We don't store any of your inputs to use for further training, we're not OpenAI πŸ‘€. We'd just like to know if this is something people would be interested in using!
Please note that this is for research purposes and shouldn't be used commercially.
None of the outputs should be taken as solid legal advice. If you are an inventor looking to patent an invention, always seek the help of a registered patent attorney.
If you
""")
with gr.Tab("Text Drafter"):
gr.Markdown("""
You can use this tool to expand your idea using Claim Language.
Example input: A device to help the visually impaired using proprioception.
Output:
""")
text_input = gr.Textbox()
text_output = gr.Textbox()
text_button = gr.Button("")
with gr.Tab("Description Generator"):
gr.Markdown("""
Patent descriptions are loooonggg and boring! You can use this tool to
Example input: A device to help the visually impaired using proprioception.
Output:
""")
with gr.Row(scale=1, min_width=600):
text1 = gr.Textbox(label="Input",
placeholder='Type in your idea here!')
text2 = gr.Textbox(label="Output")
with gr.Tab("Knowledge Graph"):
gr.Markdown("""
Are you more of a visual type? Use this tool to generate graphical representations of your ideas and how their features interlink.
Example input: A device to help the visually impaired using proprioception.
Output:
""")
with gr.Row(scale=1, min_width=600):
text1 = gr.Textbox(label="Input",
placeholder='Type in your idea here!')
text2 = gr.Textbox(label="Output")
with gr.Tab("Prosecution Ideator"):
gr.Markdown("""
Below is our
Example input: A device to help the visually impaired using proprioception.
Output:
""")
with gr.Row(scale=1, min_width=600):
text1 = gr.Textbox(label="Input",
placeholder='Type in your idea here!')
text2 = gr.Textbox(label="Output")
with gr.Tab("Claimed Infill"):
gr.Markdown("""
Below is our
Example input: A device to help the visually impaired using proprioception.
Output:
""")
with gr.Row(scale=1, min_width=600):
text1 = gr.Textbox(label="Input",
placeholder='Type in your idea here!')
text2 = gr.Textbox(label="Output")
gr.Markdown("""
# THE CHATBOT
Do you want a bit more freedom over the outputs you generate? No worries, you can use a chatbot version of our model below. You can ask it anything by the way, just try to keep it PG.
If you're concerned about an output from the model, hit the flag button and we will use that information to improve the model.
""")
chatbot = gr.Chatbot([], elem_id="Claimed Assistant").style(height=500)
with gr.Row():
with gr.Column(scale=0.85):
txt = gr.Textbox(
show_label=False,
placeholder="Enter text and submit",
).style(container=False)
with gr.Column(scale=0.15, min_width=0):
btn = gr.Button("Submit")
txt.submit(add_text, [chatbot, txt], [chatbot, txt]).then(
generateresponse, chatbot, chatbot
)
gr.Markdown("""
# HAVE AN IDEA? GET IT CLAIMED
In the future, we are looking to expand our model's capabilities further to assist in a range of IP related tasks.
If you are interested in using a more powerful model that we have trained, or you have any suggestions of features you would like to see us add, please get in touch!
As far as data is concerned, you have nothing to worry about! We don't store any of your inputs to use for further training, we're not OpenAI πŸ‘€. We'd just like to know if this is something people would be interested in using!
""")
demo.launch()