Anupam251272 commited on
Commit
36bb1ae
1 Parent(s): 89de218

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +131 -0
app.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ import pandas as pd
4
+ import numpy as np
5
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
6
+ from sklearn.model_selection import train_test_split
7
+ from sklearn.preprocessing import LabelEncoder
8
+ import json
9
+ from datetime import datetime
10
+
11
+ class LegalAISystem:
12
+ def __init__(self):
13
+ self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
14
+ self.tokenizer = AutoTokenizer.from_pretrained('nlpaueb/legal-bert-base-uncased')
15
+ self.model = AutoModelForSequenceClassification.from_pretrained('nlpaueb/legal-bert-base-uncased')
16
+ self.model.to(self.device)
17
+ self.label_encoder = LabelEncoder()
18
+ self.case_history = []
19
+
20
+ def preprocess_data(self, text):
21
+ # Clean and normalize text
22
+ text = str(text).lower().strip()
23
+ # Add more preprocessing steps as needed
24
+ return text
25
+
26
+ def extract_features(self, text):
27
+ # Tokenize and prepare features
28
+ inputs = self.tokenizer(
29
+ text,
30
+ padding=True,
31
+ truncation=True,
32
+ max_length=512,
33
+ return_tensors="pt"
34
+ ).to(self.device)
35
+ return inputs
36
+
37
+ def predict_outcome(self, case_text):
38
+ # Preprocess input
39
+ processed_text = self.preprocess_data(case_text)
40
+
41
+ # Extract features
42
+ features = self.extract_features(processed_text)
43
+
44
+ # Make prediction
45
+ with torch.no_grad():
46
+ outputs = self.model(**features)
47
+ predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
48
+
49
+ # Get prediction probabilities
50
+ probs = predictions.cpu().numpy()[0]
51
+
52
+ # Store in case history
53
+ self.case_history.append({
54
+ 'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
55
+ 'case_text': case_text,
56
+ 'prediction_probs': probs.tolist()
57
+ })
58
+
59
+ return {
60
+ 'Favorable': float(probs[1]),
61
+ 'Unfavorable': float(probs[0])
62
+ }
63
+
64
+ def analyze_precedents(self, case_text):
65
+ # Implement similarity search for relevant precedents
66
+ # This is a simplified version
67
+ return ["Precedent 1: Smith v. Jones (2019)",
68
+ "Precedent 2: Brown v. State (2020)"]
69
+
70
+ def generate_report(self, case_text, prediction, precedents):
71
+ report = f"""
72
+ Legal Case Analysis Report
73
+ ========================
74
+ Date: {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
75
+
76
+ Case Summary:
77
+ {case_text[:500]}...
78
+
79
+ Prediction:
80
+ - Favorable Outcome: {prediction['Favorable']:.2%}
81
+ - Unfavorable Outcome: {prediction['Unfavorable']:.2%}
82
+
83
+ Relevant Precedents:
84
+ {chr(10).join(precedents)}
85
+
86
+ Note: This is an AI-generated analysis and should be reviewed by legal professionals.
87
+ """
88
+ return report
89
+
90
+ def create_gradio_interface():
91
+ legal_ai = LegalAISystem()
92
+
93
+ def process_case(case_text):
94
+ # Analyze case
95
+ prediction = legal_ai.predict_outcome(case_text)
96
+ precedents = legal_ai.analyze_precedents(case_text)
97
+ report = legal_ai.generate_report(case_text, prediction, precedents)
98
+
99
+ # Create visualization data
100
+ prob_chart = {
101
+ "Favorable": prediction['Favorable'],
102
+ "Unfavorable": prediction['Unfavorable']
103
+ }
104
+
105
+ return (
106
+ report,
107
+ prob_chart,
108
+ f"Confidence: {max(prediction.values()):.2%}"
109
+ )
110
+
111
+ # Create Gradio interface
112
+ iface = gr.Interface(
113
+ fn=process_case,
114
+ inputs=[
115
+ gr.Textbox(label="Enter Case Details", lines=10)
116
+ ],
117
+ outputs=[
118
+ gr.Textbox(label="Analysis Report", lines=10),
119
+ gr.Label(label="Outcome Probabilities"),
120
+ gr.Textbox(label="Model Confidence")
121
+ ],
122
+ title="AI Legal Case Analysis System",
123
+ description="Enter case details to get real-time analysis and predictions."
124
+ )
125
+
126
+ return iface
127
+
128
+ # Launch the interface
129
+ if __name__ == "__main__":
130
+ interface = create_gradio_interface()
131
+ interface.launch(share=True, debug=True)