Spaces:
Running
Running
Switch API Provider
Browse files- client/html/index.html +2 -2
- g4f/Provider/Providers/Lockchat.py +32 -0
- g4f/Provider/__init__.py +2 -1
- g4f/__init__.py +1 -1
client/html/index.html
CHANGED
@@ -71,8 +71,8 @@
|
|
71 |
<div class="buttons">
|
72 |
<div class="field">
|
73 |
<select class="dropdown" name="model" id="model">
|
74 |
-
<option value="gpt-3.5-turbo">GPT-3.5</option>
|
75 |
-
<option value="gpt-4"
|
76 |
</select>
|
77 |
</div>
|
78 |
<div class="field">
|
|
|
71 |
<div class="buttons">
|
72 |
<div class="field">
|
73 |
<select class="dropdown" name="model" id="model">
|
74 |
+
<option value="gpt-3.5-turbo" selected>GPT-3.5</option>
|
75 |
+
<option value="gpt-4" disabled>GPT-4 (maintenance)</option>
|
76 |
</select>
|
77 |
</div>
|
78 |
<div class="field">
|
g4f/Provider/Providers/Lockchat.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import os
|
3 |
+
import json
|
4 |
+
from ...typing import sha256, Dict, get_type_hints
|
5 |
+
url = 'http://super.lockchat.app'
|
6 |
+
model = ['gpt-4', 'gpt-3.5-turbo']
|
7 |
+
supports_stream = True
|
8 |
+
needs_auth = False
|
9 |
+
|
10 |
+
def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
|
11 |
+
|
12 |
+
payload = {
|
13 |
+
"temperature": 0.7,
|
14 |
+
"messages": messages,
|
15 |
+
"model": model,
|
16 |
+
"stream": True,
|
17 |
+
}
|
18 |
+
headers = {
|
19 |
+
"user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0",
|
20 |
+
}
|
21 |
+
response = requests.post("http://super.lockchat.app/v1/chat/completions?auth=FnMNPlwZEnGFqvEc9470Vw==",
|
22 |
+
json=payload, headers=headers, stream=True)
|
23 |
+
for token in response.iter_lines():
|
24 |
+
if b'The model: `gpt-4` does not exist' in token:
|
25 |
+
print('error, retrying...')
|
26 |
+
_create_completion(model=model, messages=messages, stream=stream, temperature=temperature, **kwargs)
|
27 |
+
if b"content" in token:
|
28 |
+
token = json.loads(token.decode('utf-8').split('data: ')[1])['choices'][0]['delta'].get('content')
|
29 |
+
if token: yield (token)
|
30 |
+
|
31 |
+
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
32 |
+
'(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
g4f/Provider/__init__.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
from . import Provider
|
2 |
from .Providers import (
|
3 |
-
Forefront
|
|
|
4 |
)
|
|
|
1 |
from . import Provider
|
2 |
from .Providers import (
|
3 |
+
Forefront,
|
4 |
+
Lockchat
|
5 |
)
|
g4f/__init__.py
CHANGED
@@ -14,7 +14,7 @@ class Model(metaclass=MetaModels):
|
|
14 |
class gpt_35_turbo:
|
15 |
name: str = 'gpt-3.5-turbo'
|
16 |
base_provider: str = 'openai'
|
17 |
-
best_site: Provider.Provider = Provider.
|
18 |
|
19 |
class gpt_4:
|
20 |
name: str = 'gpt-4'
|
|
|
14 |
class gpt_35_turbo:
|
15 |
name: str = 'gpt-3.5-turbo'
|
16 |
base_provider: str = 'openai'
|
17 |
+
best_site: Provider.Provider = Provider.Lockchat
|
18 |
|
19 |
class gpt_4:
|
20 |
name: str = 'gpt-4'
|