from dora import DoraStatus import pylcs import textwrap import os import pyarrow as pa import numpy as np from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline import json import re def extract_python_code_blocks(text): """ Extracts Python code blocks from the given text that are enclosed in triple backticks with a python language identifier. Parameters: - text: A string that may contain one or more Python code blocks. Returns: - A list of strings, where each string is a block of Python code extracted from the text. """ pattern = r"```python\n(.*?)\n```" matches = re.findall(pattern, text, re.DOTALL) if len(matches) == 0: pattern = r"```python\n(.*?)(?:\n```|$)" matches = re.findall(pattern, text, re.DOTALL) if len(matches) == 0: return [text] return matches def extract_json_code_blocks(text): """ Extracts json code blocks from the given text that are enclosed in triple backticks with a json language identifier. Parameters: - text: A string that may contain one or more json code blocks. Returns: - A list of strings, where each string is a block of json code extracted from the text. """ pattern = r"```json\n(.*?)\n```" matches = re.findall(pattern, text, re.DOTALL) if len(matches) == 0: pattern = r"```json\n(.*?)(?:\n```|$)" matches = re.findall(pattern, text, re.DOTALL) if len(matches) == 0: return [text] return matches def remove_last_line(python_code): """ Removes the last line from a given string of Python code. Parameters: - python_code: A string representing Python source code. Returns: - A string with the last line removed. """ lines = python_code.split("\n") # Split the string into lines if lines: # Check if there are any lines to remove lines.pop() # Remove the last line return "\n".join(lines) # Join the remaining lines back into a string def calculate_similarity(source, target): """ Calculate a similarity score between the source and target strings. This uses the edit distance relative to the length of the strings. """ edit_distance = pylcs.edit_distance(source, target) max_length = max(len(source), len(target)) # Normalize the score by the maximum possible edit distance (the length of the longer string) similarity = 1 - (edit_distance / max_length) return similarity def find_best_match_location(source_code, target_block): """ Find the best match for the target_block within the source_code by searching line by line, considering blocks of varying lengths. """ source_lines = source_code.split("\n") target_lines = target_block.split("\n") best_similarity = 0 best_start_index = -1 best_end_index = -1 # Iterate over the source lines to find the best matching range for all lines in target_block for start_index in range(len(source_lines) - len(target_lines) + 1): for end_index in range(start_index + len(target_lines), len(source_lines) + 1): current_window = "\n".join(source_lines[start_index:end_index]) current_similarity = calculate_similarity(current_window, target_block) if current_similarity > best_similarity: best_similarity = current_similarity best_start_index = start_index best_end_index = end_index # Convert line indices back to character indices for replacement char_start_index = len("\n".join(source_lines[:best_start_index])) + ( 1 if best_start_index > 0 else 0 ) char_end_index = len("\n".join(source_lines[:best_end_index])) return char_start_index, char_end_index def replace_code_in_source(source_code, replacement_block: str): """ Replace the best matching block in the source_code with the replacement_block, considering variable block lengths. """ replacement_block = extract_python_code_blocks(replacement_block)[0] print("replacement_block: ", replacement_block) replacement_block = remove_last_line(replacement_block) start_index, end_index = find_best_match_location(source_code, replacement_block) if start_index != -1 and end_index != -1: # Replace the best matching part with the replacement block new_source = ( source_code[:start_index] + replacement_block + source_code[end_index:] ) return new_source else: return source_code def save_as(content, path): # use at the end of replace_2 as save_as(end_result, "file_path") with open(path, "w") as file: file.write(content) class Operator: def __init__(self): # Load tokenizer model_name_or_path = "/home/peiji/deepseek-coder-6.7B-instruct-GPTQ/" # To use a different branch, change revision # For example: revision="gptq-4bit-32g-actorder_True" self.model = AutoModelForCausalLM.from_pretrained( model_name_or_path, device_map="auto", trust_remote_code=False, revision="main", ) self.tokenizer = AutoTokenizer.from_pretrained( model_name_or_path, use_fast=True ) def on_event( self, dora_event, send_output, ) -> DoraStatus: if dora_event["type"] == "INPUT": input = dora_event["value"][0].as_py() if False: with open(input["path"], "r", encoding="utf8") as f: raw = f.read() prompt = f"{raw} \n {input['query']}. " print("prompt: ", prompt, flush=True) output = self.ask_mistral( "You're a python code expert. Respond with the small modified code only. No explaination", prompt, ) print("output: {}".format(output)) source_code = replace_code_in_source(raw, output) send_output( "output_file", pa.array( [ { "raw": source_code, # "path": input["path"], # "response": output, # "prompt": prompt, } ] ), dora_event["metadata"], ) else: output = self.ask_mistral( """You're a json expert. Format your response as a json with a topic and a data field in a ```json block. No explaination needed. No code needed. The schema for those json are: - led: Int[3] (min: 0, max: 255) # RGB values - blaster: Int (min: 0, max: 128) - control: Int[3] (min: -1, max: 1) - rotation: Int[2] (min: -55, max: 55) - message: String The response should look like this: ```json { "topic": "led", "data": [255, 0, 0] } ``` """, input["query"], ) output = extract_json_code_blocks(output)[0] print("output: {}".format(output), flush=True) try: output = json.loads(output) if not isinstance(output["data"], list): output["data"] = [output["data"]] if output["topic"] in [ "led", "blaster", "control", "rotation", "text", ]: print("output", output) send_output( output["topic"], pa.array(output["data"]), dora_event["metadata"], ) except: print("Could not parse json") # if data is not iterable, put data in a list return DoraStatus.CONTINUE def ask_mistral(self, system_message, prompt): prompt_template = f""" ### Instruction {system_message} {prompt} ### Response: """ # Generate output input = self.tokenizer(prompt_template, return_tensors="pt") input_ids = input.input_ids.cuda() # add attention mask here attention_mask = input["attention_mask"] output = self.model.generate( inputs=input_ids, temperature=0.7, do_sample=True, top_p=0.95, top_k=40, max_new_tokens=512, attention_mask=attention_mask, eos_token_id=self.tokenizer.eos_token_id, ) # Get the tokens from the output, decode them, print them # Get text between im_start and im_end return self.tokenizer.decode(output[0], skip_special_tokens=True)[ len(prompt_template) : ] if __name__ == "__main__": op = Operator() # Path to the current file current_file_path = __file__ # Directory of the current file current_directory = os.path.dirname(current_file_path) path = current_directory + "plot.py" with open(path, "r", encoding="utf8") as f: raw = f.read() op.on_event( { "type": "INPUT", "id": "tick", "value": pa.array( [ { "raw": raw, "path": path, "query": "Send message my name is Carlito", } ] ), "metadata": [], }, print, )