monra commited on
Commit
1fc65b1
1 Parent(s): 0efc2d5

API: Removed unused providers

Browse files
g4f/Provider/Providers/Aichat.py DELETED
@@ -1,37 +0,0 @@
1
- import os, requests
2
- from ...typing import sha256, Dict, get_type_hints
3
-
4
- url = 'https://chat-gpt.org/chat'
5
- model = ['gpt-3.5-turbo']
6
- supports_stream = False
7
-
8
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
9
- headers = {
10
- 'authority': 'chat-gpt.org',
11
- 'accept': '*/*',
12
- 'cache-control': 'no-cache',
13
- 'content-type': 'application/json',
14
- 'origin': 'https://chat-gpt.org',
15
- 'pragma': 'no-cache',
16
- 'referer': 'https://chat-gpt.org/chat',
17
- 'sec-ch-ua-mobile': '?0',
18
- 'sec-ch-ua-platform': '"macOS"',
19
- 'sec-fetch-dest': 'empty',
20
- 'sec-fetch-mode': 'cors',
21
- 'sec-fetch-site': 'same-origin',
22
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
23
- }
24
-
25
- json_data = {
26
- 'message': messages[-1]['content'],
27
- 'temperature': 1,
28
- 'presence_penalty': 0,
29
- 'top_p': 1,
30
- 'frequency_penalty': 0
31
- }
32
-
33
- response = requests.post('https://chat-gpt.org/api/text', headers=headers, json=json_data)
34
- yield response.json()['message']
35
-
36
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
37
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
g4f/Provider/Providers/Aws.py DELETED
@@ -1,26 +0,0 @@
1
- import os
2
- import requests
3
-
4
- from ...typing import sha256, Dict, get_type_hints
5
-
6
- url = 'https://4aiu6ctrknfxkoaigkigzh5lwm0cciuc.lambda-url.ap-east-1.on.aws/chat/completions'
7
- model = ['gpt-3.5-turbo', 'gpt-4']
8
- supports_stream = False
9
-
10
- class Auth(requests.auth.AuthBase):
11
- def __init__(self):
12
- self.token = 'sk-1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL'
13
-
14
- def __call__(self, r):
15
- r.headers["authorization"] = f"Bearer {self.token}"
16
- return r
17
-
18
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
19
-
20
- response = requests.post(url,
21
- auth=Auth(), json={"model": model,"messages": messages})
22
-
23
- yield (response.json()['choices'][0]['message']['content'])
24
-
25
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
26
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
g4f/Provider/Providers/Bard.py DELETED
@@ -1,80 +0,0 @@
1
- # implement proxy argument
2
-
3
- import os, requests, json, browser_cookie3, re, random
4
- from ...typing import sha256, Dict, get_type_hints
5
-
6
- url = 'https://bard.google.com'
7
- model = ['Palm2']
8
- supports_stream = False
9
-
10
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
11
- psid = {cookie.name: cookie.value for cookie in browser_cookie3.chrome(
12
- domain_name='.google.com')}['__Secure-1PSID']
13
-
14
- formatted = '\n'.join(
15
- [f"{message['role']}: {message['content']}" for message in messages]
16
- )
17
- prompt = f'{formatted}\nAssistant:'
18
-
19
- proxy = None
20
-
21
- if proxy is None:
22
- raise Exception('Proxy is required for Bard (set in g4f/Provider/Providers/Bard.py line 18)')
23
-
24
- snlm0e = False
25
- conversation_id = None
26
- response_id = None
27
- choice_id = None
28
-
29
- client = requests.Session()
30
- client.proxies = {
31
- 'http': f'https://{proxy}',
32
- 'https': f'https://{proxy}'} if proxy else None
33
-
34
- client.headers = {
35
- 'authority': 'bard.google.com',
36
- 'content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
37
- 'origin': 'https://bard.google.com',
38
- 'referer': 'https://bard.google.com/',
39
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
40
- 'x-same-domain': '1',
41
- 'cookie': f'__Secure-1PSID={psid}'
42
- }
43
-
44
- snlm0e = (
45
- re.search(
46
- r'SNlM0e\":\"(.*?)\"', client.get('https://bard.google.com/').text
47
- )[1]
48
- if not snlm0e
49
- else snlm0e
50
- )
51
-
52
- params = {
53
- 'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
54
- '_reqid': random.randint(1111, 9999),
55
- 'rt': 'c'
56
- }
57
-
58
- data = {
59
- 'at': snlm0e,
60
- 'f.req': json.dumps([None, json.dumps([[prompt], None, [conversation_id, response_id, choice_id]])])}
61
-
62
- intents = '.'.join([
63
- 'assistant',
64
- 'lamda',
65
- 'BardFrontendService'
66
- ])
67
-
68
- response = client.post(f'https://bard.google.com/_/BardChatUi/data/{intents}/StreamGenerate',
69
- data=data, params=params)
70
-
71
- if chat_data := json.loads(response.content.splitlines()[3])[0][2]:
72
- json_chat_data = json.loads(chat_data)
73
-
74
- yield json_chat_data[0][0]
75
-
76
- else:
77
- yield 'error'
78
-
79
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
80
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
g4f/Provider/Providers/Bing.py DELETED
@@ -1,31 +0,0 @@
1
- import os
2
- import json
3
- import time
4
- import subprocess
5
-
6
- from ...typing import sha256, Dict, get_type_hints
7
-
8
- url = 'https://bing.com/chat'
9
- model = ['gpt-3.5-turbo', 'gpt-4']
10
- supports_stream = True
11
-
12
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
13
- path = os.path.dirname(os.path.realpath(__file__))
14
- config = json.dumps({
15
- 'messages': messages,
16
- 'model': model}, separators=(',', ':'))
17
-
18
- try:
19
- subprocess.run(["python3", "--version"], capture_output=True, check=True)
20
- cmd = ["python3", f"{path}/helpers/bing.py", config]
21
- except subprocess.CalledProcessError:
22
- cmd = ["python", f"{path}/helpers/bing.py", config]
23
-
24
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
25
-
26
- for line in iter(p.stdout.readline, b''):
27
- yield line.decode('utf-8', errors='ignore') #[:-1]
28
-
29
-
30
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
31
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
g4f/Provider/Providers/Forefront.py CHANGED
@@ -4,7 +4,7 @@ import requests
4
  from ...typing import sha256, Dict, get_type_hints
5
 
6
  url = 'forefront.com'
7
- model = ['gpt-3.5-turbo']
8
  supports_stream = True
9
 
10
 
@@ -16,11 +16,11 @@ def _create_completion(model: str, messages: list, stream: bool, **kwargs):
16
  'parentId': '',
17
  'workspaceId': '',
18
  'messagePersona': '607e41fe-95be-497e-8e97-010a59b2e2c0',
19
- 'model': 'gpt-4',
20
  'messages': messages[:-1] if len(messages) > 1 else [],
21
  'internetMode': 'auto'
22
  }
23
-
24
  response = requests.post('https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat',
25
  json=json_data, stream=True)
26
 
 
4
  from ...typing import sha256, Dict, get_type_hints
5
 
6
  url = 'forefront.com'
7
+ model = ['gpt-3.5-turbo', 'gpt-4']
8
  supports_stream = True
9
 
10
 
 
16
  'parentId': '',
17
  'workspaceId': '',
18
  'messagePersona': '607e41fe-95be-497e-8e97-010a59b2e2c0',
19
+ 'model': model,
20
  'messages': messages[:-1] if len(messages) > 1 else [],
21
  'internetMode': 'auto'
22
  }
23
+
24
  response = requests.post('https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat',
25
  json=json_data, stream=True)
26
 
g4f/Provider/Providers/Openai.py DELETED
@@ -1,33 +0,0 @@
1
- import os
2
- import json
3
- import time
4
- import subprocess
5
-
6
- from ...typing import sha256, Dict, get_type_hints
7
-
8
- url = 'https://chat.openai.com/chat'
9
- model = ['gpt-3.5-turbo']
10
- supports_stream = True
11
-
12
-
13
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
14
-
15
- path = os.path.dirname(os.path.realpath(__file__))
16
- config = json.dumps({
17
- 'model': model,
18
- 'messages': messages[:-1] if len(messages) > 1 else [],
19
- })
20
-
21
- try:
22
- subprocess.run(["python3", "--version"], capture_output=True, check=True)
23
- cmd = ["python3", f"{path}/helpers/openai.py", config]
24
- except subprocess.CalledProcessError:
25
- cmd = ["python", f"{path}/helpers/openai.py", config]
26
-
27
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
28
-
29
- for line in iter(p.stdout.readline, b''):
30
- yield line.decode('utf-8', errors='ignore') #[:-1]
31
-
32
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
33
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
g4f/Provider/Providers/Ora.py DELETED
@@ -1,42 +0,0 @@
1
- import os, requests, uuid
2
- from ...typing import sha256, Dict, get_type_hints
3
-
4
- url = 'https://ora.ai'
5
- model = ['gpt-3.5-turbo', 'gpt-4']
6
- supports_stream = False
7
-
8
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
9
- headers = {
10
- 'authority': 'ora.ai',
11
- 'accept': '*/*',
12
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
13
- 'cache-control': 'no-cache',
14
- 'content-type': 'application/json',
15
- 'origin': 'https://ora.ai',
16
- 'pragma': 'no-cache',
17
- 'referer': 'https://ora.ai/chat/',
18
- 'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
19
- 'sec-ch-ua-mobile': '?0',
20
- 'sec-ch-ua-platform': '"macOS"',
21
- 'sec-fetch-dest': 'empty',
22
- 'sec-fetch-mode': 'cors',
23
- 'sec-fetch-site': 'same-origin',
24
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
25
- }
26
-
27
- json_data = {
28
- 'chatbotId': 'adb2b793-e667-46b9-8d80-114eaa9a4c40',
29
- 'input': messages[-1]['content'],
30
- 'userId': f'auto:{uuid.uuid4()}',
31
- 'provider': 'OPEN_AI',
32
- 'config': False,
33
- 'includeHistory': False
34
- }
35
-
36
- response = requests.post('https://ora.ai/api/conversation',
37
- headers=headers, json=json_data)
38
-
39
- yield response.json()['response']
40
-
41
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
42
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
g4f/Provider/Providers/Phind.py DELETED
@@ -1,42 +0,0 @@
1
- import os
2
- import json
3
- import time
4
- import subprocess
5
-
6
- from ...typing import sha256, Dict, get_type_hints
7
-
8
- url = 'https://phind.com'
9
- model = ['gpt-3.5-turbo', 'gpt-4']
10
- supports_stream = True
11
-
12
-
13
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
14
-
15
- path = os.path.dirname(os.path.realpath(__file__))
16
- config = json.dumps({
17
- 'model': model,
18
- 'messages': messages[:-1] if len(messages) > 1 else [],
19
- })
20
-
21
- try:
22
- subprocess.run(["python3", "--version"],
23
- capture_output=True, check=True)
24
- cmd = ["python3", f"{path}/helpers/phind.py", config]
25
- except subprocess.CalledProcessError:
26
- cmd = ["python", f"{path}/helpers/phind.py", config]
27
-
28
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
29
-
30
- for line in iter(p.stdout.readline, b''):
31
- if b'<title>Just a moment...</title>' in line:
32
- os.system('clear' if os.name == 'posix' else 'cls')
33
- yield 'Clouflare error, please try again...'
34
- os._exit(0)
35
-
36
- elif b'ping - 2023-' not in line:
37
- yield line.decode('utf-8', errors='ignore') # [:-1]
38
-
39
-
40
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
41
- '(%s)' % ', '.join(
42
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
g4f/Provider/Providers/Pierangelo.py DELETED
@@ -1,56 +0,0 @@
1
- import os
2
- import requests
3
- from ...typing import sha256, Dict, get_type_hints
4
-
5
- url = 'https://chat.pierangelo.info'
6
- model = ['gpt-4', 'gpt-3.5-turbo']
7
- supports_stream = True
8
-
9
- models = {
10
- 'gpt-4': {
11
- 'id':'gpt-4',
12
- 'name':'GPT-4'
13
- },
14
- 'gpt-3.5-turbo': {
15
- 'id':'gpt-3.5-turbo',
16
- 'name':'GPT-3.5'
17
- }
18
- }
19
-
20
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
21
-
22
- headers = {
23
- 'authority': 'chat.pierangelo.info',
24
- 'accept': '*/*',
25
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
26
- 'cache-control': 'no-cache',
27
- 'content-type': 'application/json',
28
- 'origin': 'https://chat.pierangelo.info',
29
- 'pragma': 'no-cache',
30
- 'referer': 'https://chat.pierangelo.info/',
31
- 'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
32
- 'sec-ch-ua-mobile': '?0',
33
- 'sec-ch-ua-platform': '"macOS"',
34
- 'sec-fetch-dest': 'empty',
35
- 'sec-fetch-mode': 'cors',
36
- 'sec-fetch-site': 'same-origin',
37
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
38
- }
39
-
40
- json_data = {
41
- 'model': models[model],
42
- 'messages': messages,
43
- 'key': '',
44
- 'prompt': "You are ChatGPT, a large language model trained by OpenAI. Answer consisely",
45
- 'temperature': 0.7
46
- }
47
-
48
- yield from requests.post(
49
- 'https://chat.pierangelo.info/api/chat',
50
- headers=headers,
51
- json=json_data,
52
- stream=True,
53
- )
54
-
55
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
56
- '(%s)' % ', '.join([f'{name}: {get_type_hints(_create_completion)[name].__name__}' for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
g4f/Provider/Providers/Theb.py DELETED
@@ -1,33 +0,0 @@
1
- import os
2
- import json
3
- import time
4
- import subprocess
5
-
6
- from ...typing import sha256, Dict, get_type_hints
7
-
8
- url = 'https://theb.ai'
9
- model = ['gpt-3.5-turbo']
10
- supports_stream = True
11
-
12
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
13
-
14
- path = os.path.dirname(os.path.realpath(__file__))
15
- config = json.dumps({
16
- 'model': model,
17
- 'messages': messages[:-1] if len(messages) > 1 else [],
18
- })
19
-
20
- try:
21
- subprocess.run(["python3", "--version"], capture_output=True, check=True)
22
- cmd = ["python3", f"{path}/helpers/theb.py", config]
23
- except subprocess.CalledProcessError:
24
- cmd = ["python", f"{path}/helpers/theb.py", config]
25
-
26
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
27
-
28
- for line in iter(p.stdout.readline, b''):
29
- yield line.decode('utf-8', errors='ignore') #[:-1]
30
-
31
-
32
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
33
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
g4f/Provider/Providers/You.py DELETED
@@ -1,30 +0,0 @@
1
- import os
2
- import json
3
- import time
4
- import subprocess
5
-
6
- from ...typing import sha256, Dict, get_type_hints
7
-
8
- url = 'https://you.com'
9
- model = 'gpt-3.5-turbo'
10
- supports_stream = True
11
-
12
-
13
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
14
-
15
- path = os.path.dirname(os.path.realpath(__file__))
16
- config = json.dumps({
17
- 'messages': messages[:-1] if len(messages) > 1 else [],
18
- })
19
-
20
- try:
21
- subprocess.run(["python3", "--version"],
22
- capture_output=True, check=True)
23
- cmd = ["python3", f"{path}/helpers/you.py", config]
24
- except subprocess.CalledProcessError:
25
- cmd = ["python", f"{path}/helpers/you.py", config]
26
-
27
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
28
-
29
- for line in iter(p.stdout.readline, b''):
30
- yield line.decode('utf-8', errors='ignore') # [:-1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
g4f/Provider/Providers/Yqcloud.py DELETED
@@ -1,32 +0,0 @@
1
- import os
2
- import time
3
- import requests
4
-
5
- from ...typing import sha256, Dict, get_type_hints
6
- supports_stream = True
7
-
8
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
9
-
10
- headers = {
11
- 'authority': 'api.aichatos.cloud',
12
- 'origin': 'https://chat9.yqcloud.top',
13
- 'referer': 'https://chat9.yqcloud.top/',
14
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
15
- }
16
-
17
- json_data = {
18
- 'prompt': f"{messages[-1]['content']}",
19
- 'userId': f'#/chat/{int(time.time() * 1000)}',
20
- 'network': True,
21
- 'apikey': '',
22
- 'system': '',
23
- 'withoutContext': False,
24
- }
25
-
26
- response = requests.post('https://api.aichatos.cloud/api/generateStream', headers=headers, json=json_data, stream=True)
27
- for token in response.iter_content(chunk_size=2046):
28
- if b'always respond in english' not in token:
29
- yield (token.decode('utf-8'))
30
-
31
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
32
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
g4f/Provider/Providers/helpers/bing.py DELETED
@@ -1,205 +0,0 @@
1
- import sys
2
- import ssl
3
- import uuid
4
- import json
5
- import time
6
- import random
7
- import asyncio
8
- import certifi
9
- import requests
10
- import websockets
11
- import browser_cookie3
12
-
13
- config = json.loads(sys.argv[1])
14
-
15
- ssl_context = ssl.create_default_context()
16
- ssl_context.load_verify_locations(certifi.where())
17
-
18
-
19
-
20
- conversationstyles = {
21
- 'gpt-4': [ #'precise'
22
- "nlu_direct_response_filter",
23
- "deepleo",
24
- "disable_emoji_spoken_text",
25
- "responsible_ai_policy_235",
26
- "enablemm",
27
- "h3imaginative",
28
- "travelansgnd",
29
- "dv3sugg",
30
- "clgalileo",
31
- "gencontentv3",
32
- "dv3sugg",
33
- "responseos",
34
- "e2ecachewrite",
35
- "cachewriteext",
36
- "nodlcpcwrite",
37
- "travelansgnd",
38
- "nojbfedge",
39
- ],
40
- 'balanced': [
41
- "nlu_direct_response_filter",
42
- "deepleo",
43
- "disable_emoji_spoken_text",
44
- "responsible_ai_policy_235",
45
- "enablemm",
46
- "galileo",
47
- "dv3sugg",
48
- "responseos",
49
- "e2ecachewrite",
50
- "cachewriteext",
51
- "nodlcpcwrite",
52
- "travelansgnd",
53
- "nojbfedge",
54
- ],
55
- 'gpt-3.5-turbo': [ #'precise'
56
- "nlu_direct_response_filter",
57
- "deepleo",
58
- "disable_emoji_spoken_text",
59
- "responsible_ai_policy_235",
60
- "enablemm",
61
- "galileo",
62
- "dv3sugg",
63
- "responseos",
64
- "e2ecachewrite",
65
- "cachewriteext",
66
- "nodlcpcwrite",
67
- "travelansgnd",
68
- "h3precise",
69
- "clgalileo",
70
- "nojbfedge",
71
- ]
72
- }
73
-
74
- def format(msg: dict) -> str:
75
- return json.dumps(msg) + '\x1e'
76
-
77
- def get_token():
78
- return
79
-
80
- class AsyncCompletion:
81
- async def create(self, optionSets : list = None, token : str = None): # No auth required anymore
82
-
83
- create = None
84
- for _ in range(5):
85
- try:
86
- create = requests.get('https://edgeservices.bing.com/edgesvc/turing/conversation/create',
87
- headers = {
88
- 'host': 'edgeservices.bing.com',
89
- 'accept-encoding': 'gzip, deflate, br',
90
- 'connection': 'keep-alive',
91
- 'authority': 'edgeservices.bing.com',
92
- 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
93
- 'accept-language': 'en-US,en;q=0.9',
94
- 'cache-control': 'max-age=0',
95
- 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
96
- 'sec-ch-ua-arch': '"x86"',
97
- 'sec-ch-ua-bitness': '"64"',
98
- 'sec-ch-ua-full-version': '"110.0.1587.69"',
99
- 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
100
- 'sec-ch-ua-mobile': '?0',
101
- 'sec-ch-ua-model': '""',
102
- 'sec-ch-ua-platform': '"Windows"',
103
- 'sec-ch-ua-platform-version': '"15.0.0"',
104
- 'sec-fetch-dest': 'document',
105
- 'sec-fetch-mode': 'navigate',
106
- 'sec-fetch-site': 'none',
107
- 'sec-fetch-user': '?1',
108
- 'upgrade-insecure-requests': '1',
109
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
110
- 'x-edge-shopping-flag': '1',
111
- 'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
112
- }
113
- )
114
-
115
- conversationId = create.json()['conversationId']
116
- clientId = create.json()['clientId']
117
- conversationSignature = create.json()['conversationSignature']
118
-
119
- except Exception as e:
120
- time.sleep(0.5)
121
- if create is None: raise Exception('Failed to create conversation.')
122
-
123
- wss: websockets.WebSocketClientProtocol or None = None
124
-
125
- wss = await websockets.connect('wss://sydney.bing.com/sydney/ChatHub', max_size = None, ssl = ssl_context,
126
- extra_headers = {
127
- 'accept': 'application/json',
128
- 'accept-language': 'en-US,en;q=0.9',
129
- 'content-type': 'application/json',
130
- 'sec-ch-ua': '"Not_A Brand";v="99", Microsoft Edge";v="110", "Chromium";v="110"',
131
- 'sec-ch-ua-arch': '"x86"',
132
- 'sec-ch-ua-bitness': '"64"',
133
- 'sec-ch-ua-full-version': '"109.0.1518.78"',
134
- 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
135
- 'sec-ch-ua-mobile': '?0',
136
- 'sec-ch-ua-model': "",
137
- 'sec-ch-ua-platform': '"Windows"',
138
- 'sec-ch-ua-platform-version': '"15.0.0"',
139
- 'sec-fetch-dest': 'empty',
140
- 'sec-fetch-mode': 'cors',
141
- 'sec-fetch-site': 'same-origin',
142
- 'x-ms-client-request-id': str(uuid.uuid4()),
143
- 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
144
- 'Referer': 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx',
145
- 'Referrer-Policy': 'origin-when-cross-origin',
146
- 'x-forwarded-for': f'13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}'
147
- }
148
- )
149
-
150
- await wss.send(format({'protocol': 'json', 'version': 1}))
151
- await wss.recv()
152
-
153
- struct = {
154
- 'arguments': [
155
- {
156
- 'source': 'cib',
157
- 'optionsSets': optionSets,
158
- 'isStartOfSession': True,
159
- 'message': {
160
- 'author': 'user',
161
- 'inputMethod': 'Keyboard',
162
- 'text': self,
163
- 'messageType': 'Chat',
164
- },
165
- 'conversationSignature': conversationSignature,
166
- 'participant': {'id': clientId},
167
- 'conversationId': conversationId,
168
- }
169
- ],
170
- 'invocationId': '0',
171
- 'target': 'chat',
172
- 'type': 4,
173
- }
174
-
175
- await wss.send(format(struct))
176
-
177
- base_string = ''
178
-
179
- final = False
180
- while not final:
181
- objects = str(await wss.recv()).split('\x1e')
182
- for obj in objects:
183
- if obj is None or obj == '':
184
- continue
185
-
186
- response = json.loads(obj)
187
- if response.get('type') == 1 and response['arguments'][0].get('messages',):
188
- response_text = response['arguments'][0]['messages'][0]['adaptiveCards'][0]['body'][0].get('text')
189
-
190
- yield (response_text.replace(base_string, ''))
191
- base_string = response_text
192
-
193
- elif response.get('type') == 2:
194
- final = True
195
-
196
- await wss.close()
197
-
198
- async def run(optionSets, messages):
199
- async for value in AsyncCompletion.create(prompt=messages[-1]['content'],
200
- optionSets=optionSets):
201
-
202
- print(value, flush=True, end = '')
203
-
204
- optionSet = conversationstyles[config['model']]
205
- asyncio.run(run(optionSet, config['messages']))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
g4f/Provider/Providers/helpers/openai.py DELETED
@@ -1,106 +0,0 @@
1
- import sys
2
- import uuid
3
- import json
4
- import browser_cookie3
5
-
6
- from curl_cffi import requests
7
-
8
- config = json.loads(sys.argv[1])
9
-
10
- def session_auth(cookies):
11
- headers = {
12
- 'authority': 'chat.openai.com',
13
- 'accept': '*/*',
14
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
15
- 'cache-control': 'no-cache',
16
- 'pragma': 'no-cache',
17
- 'referer': 'https://chat.openai.com/chat',
18
- 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
19
- 'sec-ch-ua-mobile': '?0',
20
- 'sec-ch-ua-platform': '"macOS"',
21
- 'sec-fetch-dest': 'empty',
22
- 'sec-fetch-mode': 'cors',
23
- 'sec-fetch-site': 'same-origin',
24
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
25
- }
26
-
27
- return requests.get('https://chat.openai.com/api/auth/session',
28
- cookies=cookies, headers=headers, impersonate='chrome110').json()
29
-
30
- all_cookies = {cookie.name: cookie.value for cookie in browser_cookie3.chrome(
31
- domain_name='chat.openai.com')}
32
-
33
- try:
34
- cookies = {
35
- '__Secure-next-auth.session-token': all_cookies['__Secure-next-auth.session-token'],
36
- }
37
- except Exception:
38
- print('Failed to get "__Secure-next-auth.session-token" in chrome, please make sure you are authenticated on openai.com')
39
- exit(0)
40
-
41
- headers = {
42
- 'authority': 'chat.openai.com',
43
- 'accept': 'text/event-stream',
44
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
45
- 'authorization': 'Bearer ' + session_auth(cookies)['accessToken'],
46
- 'cache-control': 'no-cache',
47
- 'content-type': 'application/json',
48
- 'origin': 'https://chat.openai.com',
49
- 'pragma': 'no-cache',
50
- 'referer': 'https://chat.openai.com/chat',
51
- 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
52
- 'sec-ch-ua-mobile': '?0',
53
- 'sec-ch-ua-platform': '"macOS"',
54
- 'sec-fetch-dest': 'empty',
55
- 'sec-fetch-mode': 'cors',
56
- 'sec-fetch-site': 'same-origin',
57
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
58
- }
59
-
60
- payload = {
61
- 'action': 'next',
62
- 'history_and_training_disabled': False,
63
- 'messages': [
64
- {
65
- 'id': str(uuid.uuid4()),
66
- 'author': {
67
- 'role': 'user',
68
- },
69
- 'content': {
70
- 'content_type': 'text',
71
- 'parts': [
72
- config['messages'][-1]['content']
73
- ]
74
- }
75
- }
76
- ],
77
- 'model': 'text-davinci-002-render-sha',
78
- 'parent_message_id': str(uuid.uuid4()),
79
- 'supports_modapi': True,
80
- 'timezone_offset_min': -60
81
- }
82
-
83
- completion = ''
84
-
85
- def format(chunk):
86
- try:
87
- global completion
88
-
89
- if b'parts' in chunk:
90
- json_data = json.loads(chunk.decode('utf-8').split('data: ')[1])
91
- token = json_data['message']['content']['parts'][0]
92
- token = token.replace(completion, '')
93
- completion += token
94
-
95
- print(token, flush=True, end = '')
96
-
97
- except Exception as e:
98
- pass
99
-
100
- for _ in range(3):
101
- try:
102
- response = requests.post('https://chat.openai.com/backend-api/conversation',
103
- json=payload, headers=headers, content_callback=format, impersonate='chrome110')
104
- break
105
- except:
106
- continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
g4f/Provider/Providers/helpers/phind.py DELETED
@@ -1,69 +0,0 @@
1
- import sys
2
- import json
3
- import datetime
4
- import urllib.parse
5
-
6
- from curl_cffi import requests
7
-
8
- config = json.loads(sys.argv[1])
9
- prompt = config['messages'][-1]['content']
10
-
11
- skill = 'expert' if config['model'] == 'gpt-4' else 'intermediate'
12
-
13
- json_data = json.dumps({
14
- 'question': prompt,
15
- 'options': {
16
- 'skill': skill,
17
- 'date': datetime.datetime.now().strftime('%d/%m/%Y'),
18
- 'language': 'en',
19
- 'detailed': True,
20
- 'creative': True,
21
- 'customLinks': []}}, separators=(',', ':'))
22
-
23
- headers = {
24
- 'Content-Type': 'application/json',
25
- 'Pragma': 'no-cache',
26
- 'Accept': '*/*',
27
- 'Sec-Fetch-Site': 'same-origin',
28
- 'Accept-Language': 'en-GB,en;q=0.9',
29
- 'Cache-Control': 'no-cache',
30
- 'Sec-Fetch-Mode': 'cors',
31
- 'Content-Length': str(len(json_data)),
32
- 'Origin': 'https://www.phind.com',
33
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15',
34
- 'Referer': f'https://www.phind.com/search?q={urllib.parse.quote(prompt)}&source=searchbox',
35
- 'Connection': 'keep-alive',
36
- 'Host': 'www.phind.com',
37
- 'Sec-Fetch-Dest': 'empty'
38
- }
39
-
40
-
41
- def output(chunk):
42
- try:
43
- if b'PHIND_METADATA' in chunk:
44
- return
45
-
46
- if chunk == b'data: \r\ndata: \r\ndata: \r\n\r\n':
47
- chunk = b'data: \n\r\n\r\n'
48
-
49
- chunk = chunk.decode()
50
-
51
- chunk = chunk.replace('data: \r\n\r\ndata: ', 'data: \n')
52
- chunk = chunk.replace('\r\ndata: \r\ndata: \r\n\r\n', '\n\r\n\r\n')
53
- chunk = chunk.replace('data: ', '').replace('\r\n\r\n', '')
54
-
55
- print(chunk, flush=True, end = '')
56
-
57
- except json.decoder.JSONDecodeError:
58
- pass
59
-
60
- while True:
61
- try:
62
- response = requests.post('https://www.phind.com/api/infer/answer',
63
- headers=headers, data=json_data, content_callback=output, timeout=999999, impersonate='safari15_5')
64
-
65
- exit(0)
66
-
67
- except Exception as e:
68
- print('an error occured, retrying... |', e, flush=True)
69
- continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
g4f/Provider/Providers/helpers/theb.py DELETED
@@ -1,49 +0,0 @@
1
- import json
2
- import sys
3
- from curl_cffi import requests
4
-
5
- config = json.loads(sys.argv[1])
6
- prompt = config['messages'][-1]['content']
7
-
8
- headers = {
9
- 'authority': 'chatbot.theb.ai',
10
- 'accept': 'application/json, text/plain, */*',
11
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
12
- 'content-type': 'application/json',
13
- 'origin': 'https://chatbot.theb.ai',
14
- 'referer': 'https://chatbot.theb.ai/',
15
- 'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
16
- 'sec-ch-ua-mobile': '?0',
17
- 'sec-ch-ua-platform': '"macOS"',
18
- 'sec-fetch-dest': 'empty',
19
- 'sec-fetch-mode': 'cors',
20
- 'sec-fetch-site': 'same-origin',
21
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
22
- }
23
-
24
- json_data = {
25
- 'prompt': prompt,
26
- 'options': {}
27
- }
28
-
29
- def format(chunk):
30
- try:
31
- chunk_json = json.loads(chunk.decode('utf-8'))
32
- completion_chunk = chunk_json['detail']['choices'][0]['delta']['content']
33
-
34
- print(completion_chunk, flush=True, end = '')
35
-
36
- except Exception as e:
37
- print('[ERROR] an error occured, retrying... |', e, flush=True)
38
- return
39
-
40
- while True:
41
- try:
42
- response = requests.post('https://chatbot.theb.ai/api/chat-process',
43
- headers=headers, json=json_data, content_callback=format, impersonate='chrome110')
44
-
45
- exit(0)
46
-
47
- except Exception as e:
48
- print('[ERROR] an error occured, retrying... |', e, flush=True)
49
- continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
g4f/Provider/Providers/helpers/you.py DELETED
@@ -1,79 +0,0 @@
1
- import sys
2
- import json
3
- import urllib.parse
4
-
5
- from curl_cffi import requests
6
-
7
- config = json.loads(sys.argv[1])
8
- messages = config['messages']
9
- prompt = ''
10
-
11
-
12
- def transform(messages: list) -> list:
13
- result = []
14
- i = 0
15
-
16
- while i < len(messages):
17
- if messages[i]['role'] == 'user':
18
- question = messages[i]['content']
19
- i += 1
20
-
21
- if i < len(messages) and messages[i]['role'] == 'assistant':
22
- answer = messages[i]['content']
23
- i += 1
24
- else:
25
- answer = ''
26
-
27
- result.append({'question': question, 'answer': answer})
28
-
29
- elif messages[i]['role'] == 'assistant':
30
- result.append({'question': '', 'answer': messages[i]['content']})
31
- i += 1
32
-
33
- elif messages[i]['role'] == 'system':
34
- result.append({'question': messages[i]['content'], 'answer': ''})
35
- i += 1
36
-
37
- return result
38
-
39
- headers = {
40
- 'Content-Type': 'application/x-www-form-urlencoded',
41
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
42
- 'Sec-Fetch-Site': 'same-origin',
43
- 'Accept-Language': 'en-GB,en;q=0.9',
44
- 'Sec-Fetch-Mode': 'navigate',
45
- 'Host': 'you.com',
46
- 'Origin': 'https://you.com',
47
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15',
48
- 'Referer': 'https://you.com/api/streamingSearch?q=nice&safeSearch=Moderate&onShoppingPage=false&mkt=&responseFilter=WebPages,Translations,TimeZone,Computation,RelatedSearches&domain=youchat&queryTraceId=7a6671f8-5881-404d-8ea3-c3f8301f85ba&chat=%5B%7B%22question%22%3A%22hi%22%2C%22answer%22%3A%22Hello!%20How%20can%20I%20assist%20you%20today%3F%22%7D%5D&chatId=7a6671f8-5881-404d-8ea3-c3f8301f85ba&__cf_chl_tk=ex2bw6vn5vbLsUm8J5rDYUC0Bjzc1XZqka6vUl6765A-1684108495-0-gaNycGzNDtA',
49
- 'Connection': 'keep-alive',
50
- 'Sec-Fetch-Dest': 'document',
51
- 'Priority': 'u=0, i',
52
- }
53
-
54
- if messages[-1]['role'] == 'user':
55
- prompt = messages[-1]['content']
56
- messages = messages[:-1]
57
-
58
- params = urllib.parse.urlencode({
59
- 'q': prompt,
60
- 'domain': 'youchat',
61
- 'chat': transform(messages)
62
- })
63
-
64
- def output(chunk):
65
- if b'"youChatToken"' in chunk:
66
- chunk_json = json.loads(chunk.decode().split('data: ')[1])
67
-
68
- print(chunk_json['youChatToken'], flush=True, end = '')
69
-
70
- while True:
71
- try:
72
- response = requests.get(f'https://you.com/api/streamingSearch?{params}',
73
- headers=headers, content_callback=output, impersonate='safari15_5')
74
-
75
- exit(0)
76
-
77
- except Exception as e:
78
- print('an error occured, retrying... |', e, flush=True)
79
- continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
g4f/Provider/__init__.py CHANGED
@@ -1,17 +1,4 @@
1
  from . import Provider
2
  from .Providers import (
3
- Phind,
4
- You,
5
- Bing,
6
- Openai,
7
- Yqcloud,
8
- Theb,
9
- Aichat,
10
- Ora,
11
- Aws,
12
- Bard,
13
- Pierangelo,
14
  Forefront
15
  )
16
-
17
- Palm = Bard
 
1
  from . import Provider
2
  from .Providers import (
 
 
 
 
 
 
 
 
 
 
 
3
  Forefront
4
  )
 
 
g4f/__init__.py CHANGED
@@ -3,13 +3,14 @@ import sys
3
  from .typing import MetaModels, Union
4
  from . import Provider
5
 
 
6
  class Model(metaclass=MetaModels):
7
-
8
  class model:
9
  name: str
10
  base_provider: str
11
  best_site: str
12
-
13
  class gpt_35_turbo:
14
  name: str = 'gpt-3.5-turbo'
15
  base_provider: str = 'openai'
@@ -18,19 +19,16 @@ class Model(metaclass=MetaModels):
18
  class gpt_4:
19
  name: str = 'gpt-4'
20
  base_provider: str = 'openai'
21
- best_site: Provider.Provider = Provider.Bing
22
-
23
- class davinvi_003:
24
- name: str = 'davinvi-003'
25
- base_provider: str = 'openai'
26
  best_site: Provider.Provider = Provider.Forefront
27
-
 
28
  class Utils:
29
  convert: dict = {
30
  'gpt-3.5-turbo': Model.gpt_35_turbo,
31
  'gpt-4': Model.gpt_4
32
  }
33
 
 
34
  class ChatCompletion:
35
  @staticmethod
36
  def create(model: Model.model or str, messages: list, provider: Provider.Provider = None, stream: bool = False, **kwargs):
 
3
  from .typing import MetaModels, Union
4
  from . import Provider
5
 
6
+
7
  class Model(metaclass=MetaModels):
8
+
9
  class model:
10
  name: str
11
  base_provider: str
12
  best_site: str
13
+
14
  class gpt_35_turbo:
15
  name: str = 'gpt-3.5-turbo'
16
  base_provider: str = 'openai'
 
19
  class gpt_4:
20
  name: str = 'gpt-4'
21
  base_provider: str = 'openai'
 
 
 
 
 
22
  best_site: Provider.Provider = Provider.Forefront
23
+
24
+
25
  class Utils:
26
  convert: dict = {
27
  'gpt-3.5-turbo': Model.gpt_35_turbo,
28
  'gpt-4': Model.gpt_4
29
  }
30
 
31
+
32
  class ChatCompletion:
33
  @staticmethod
34
  def create(model: Model.model or str, messages: list, provider: Provider.Provider = None, stream: bool = False, **kwargs):
server/backend.py CHANGED
@@ -44,7 +44,7 @@ class Backend_Api:
44
 
45
  # Generate response
46
  response = ChatCompletion.create(model=model, stream=True,
47
- messages=messages, provider=g4f.Provider.Forefront)
48
 
49
  return self.app.response_class(generate_stream(response, jailbreak), mimetype='text/event-stream')
50
 
 
44
 
45
  # Generate response
46
  response = ChatCompletion.create(model=model, stream=True,
47
+ messages=messages)
48
 
49
  return self.app.response_class(generate_stream(response, jailbreak), mimetype='text/event-stream')
50