AchyuthGamer
commited on
Commit
•
b895885
1
Parent(s):
3b4d3e7
Upload 100 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- g4f/g4f/Provider/AItianhu.py +50 -0
- g4f/g4f/Provider/AItianhuSpace.py +73 -0
- g4f/g4f/Provider/Acytoo.py +51 -0
- g4f/g4f/Provider/AiService.py +36 -0
- g4f/g4f/Provider/Aibn.py +51 -0
- g4f/g4f/Provider/Aichat.py +54 -0
- g4f/g4f/Provider/Ails.py +106 -0
- g4f/g4f/Provider/Aivvm.py +78 -0
- g4f/g4f/Provider/Bard.py +92 -0
- g4f/g4f/Provider/Bing.py +283 -0
- g4f/g4f/Provider/ChatBase.py +62 -0
- g4f/g4f/Provider/ChatgptAi.py +75 -0
- g4f/g4f/Provider/ChatgptDuo.py +51 -0
- g4f/g4f/Provider/ChatgptLogin.py +74 -0
- g4f/g4f/Provider/CodeLinkAva.py +64 -0
- g4f/g4f/Provider/DeepAi.py +63 -0
- g4f/g4f/Provider/DfeHub.py +77 -0
- g4f/g4f/Provider/EasyChat.py +111 -0
- g4f/g4f/Provider/Equing.py +81 -0
- g4f/g4f/Provider/FastGpt.py +86 -0
- g4f/g4f/Provider/Forefront.py +40 -0
- g4f/g4f/Provider/GetGpt.py +88 -0
- g4f/g4f/Provider/GptGo.py +78 -0
- g4f/g4f/Provider/H2o.py +109 -0
- g4f/g4f/Provider/HuggingChat.py +104 -0
- g4f/g4f/Provider/Liaobots.py +89 -0
- g4f/g4f/Provider/Lockchat.py +64 -0
- g4f/g4f/Provider/Myshell.py +172 -0
- g4f/g4f/Provider/Opchatgpts.py +8 -0
- g4f/g4f/Provider/OpenAssistant.py +100 -0
- g4f/g4f/Provider/OpenaiChat.py +88 -0
- g4f/g4f/Provider/PerplexityAi.py +87 -0
- g4f/g4f/Provider/Raycast.py +72 -0
- g4f/g4f/Provider/Theb.py +97 -0
- g4f/g4f/Provider/V50.py +67 -0
- g4f/g4f/Provider/Vercel.py +377 -0
- g4f/g4f/Provider/Vitalentum.py +68 -0
- g4f/g4f/Provider/Wewordle.py +65 -0
- g4f/g4f/Provider/Wuguokai.py +63 -0
- g4f/g4f/Provider/Ylokh.py +79 -0
- g4f/g4f/Provider/You.py +40 -0
- g4f/g4f/Provider/Yqcloud.py +48 -0
- g4f/g4f/Provider/__init__.py +95 -0
- g4f/g4f/Provider/__pycache__/AItianhu.cpython-310.pyc +0 -0
- g4f/g4f/Provider/__pycache__/AItianhuSpace.cpython-310.pyc +0 -0
- g4f/g4f/Provider/__pycache__/Acytoo.cpython-310.pyc +0 -0
- g4f/g4f/Provider/__pycache__/AiService.cpython-310.pyc +0 -0
- g4f/g4f/Provider/__pycache__/Aibn.cpython-310.pyc +0 -0
- g4f/g4f/Provider/__pycache__/Aichat.cpython-310.pyc +0 -0
- g4f/g4f/Provider/__pycache__/Ails.cpython-310.pyc +0 -0
g4f/g4f/Provider/AItianhu.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
from curl_cffi.requests import AsyncSession
|
5 |
+
|
6 |
+
from .base_provider import AsyncProvider, format_prompt
|
7 |
+
|
8 |
+
|
9 |
+
class AItianhu(AsyncProvider):
|
10 |
+
url = "https://www.aitianhu.com"
|
11 |
+
working = True
|
12 |
+
supports_gpt_35_turbo = True
|
13 |
+
|
14 |
+
@classmethod
|
15 |
+
async def create_async(
|
16 |
+
cls,
|
17 |
+
model: str,
|
18 |
+
messages: list[dict[str, str]],
|
19 |
+
proxy: str = None,
|
20 |
+
**kwargs
|
21 |
+
) -> str:
|
22 |
+
data = {
|
23 |
+
"prompt": format_prompt(messages),
|
24 |
+
"options": {},
|
25 |
+
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
|
26 |
+
"temperature": 0.8,
|
27 |
+
"top_p": 1,
|
28 |
+
**kwargs
|
29 |
+
}
|
30 |
+
async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107", verify=False) as session:
|
31 |
+
response = await session.post(cls.url + "/api/chat-process", json=data)
|
32 |
+
response.raise_for_status()
|
33 |
+
line = response.text.splitlines()[-1]
|
34 |
+
line = json.loads(line)
|
35 |
+
return line["text"]
|
36 |
+
|
37 |
+
|
38 |
+
@classmethod
|
39 |
+
@property
|
40 |
+
def params(cls):
|
41 |
+
params = [
|
42 |
+
("model", "str"),
|
43 |
+
("messages", "list[dict[str, str]]"),
|
44 |
+
("stream", "bool"),
|
45 |
+
("proxy", "str"),
|
46 |
+
("temperature", "float"),
|
47 |
+
("top_p", "int"),
|
48 |
+
]
|
49 |
+
param = ", ".join([": ".join(p) for p in params])
|
50 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/AItianhuSpace.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import random, json
|
4 |
+
|
5 |
+
from g4f.requests import AsyncSession, StreamRequest
|
6 |
+
from .base_provider import AsyncGeneratorProvider, format_prompt
|
7 |
+
|
8 |
+
domains = {
|
9 |
+
"gpt-3.5-turbo": ".aitianhu.space",
|
10 |
+
"gpt-4": ".aitianhu.website",
|
11 |
+
}
|
12 |
+
|
13 |
+
class AItianhuSpace(AsyncGeneratorProvider):
|
14 |
+
url = "https://chat3.aiyunos.top/"
|
15 |
+
working = True
|
16 |
+
supports_gpt_35_turbo = True
|
17 |
+
|
18 |
+
@classmethod
|
19 |
+
async def create_async_generator(
|
20 |
+
cls,
|
21 |
+
model: str,
|
22 |
+
messages: list[dict[str, str]],
|
23 |
+
stream: bool = True,
|
24 |
+
**kwargs
|
25 |
+
) -> str:
|
26 |
+
if not model:
|
27 |
+
model = "gpt-3.5-turbo"
|
28 |
+
elif not model in domains:
|
29 |
+
raise ValueError(f"Model are not supported: {model}")
|
30 |
+
|
31 |
+
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
|
32 |
+
rand = ''.join(random.choice(chars) for _ in range(6))
|
33 |
+
domain = domains[model]
|
34 |
+
url = f'https://{rand}{domain}/api/chat-process'
|
35 |
+
|
36 |
+
headers = {
|
37 |
+
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
|
38 |
+
}
|
39 |
+
async with AsyncSession(headers=headers, impersonate="chrome107", verify=False) as session:
|
40 |
+
data = {
|
41 |
+
"prompt": format_prompt(messages),
|
42 |
+
"options": {},
|
43 |
+
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
|
44 |
+
"temperature": 0.8,
|
45 |
+
"top_p": 1,
|
46 |
+
**kwargs
|
47 |
+
}
|
48 |
+
async with StreamRequest(session, "POST", url, json=data) as response:
|
49 |
+
response.raise_for_status()
|
50 |
+
async for line in response.content:
|
51 |
+
line = json.loads(line.rstrip())
|
52 |
+
if "detail" in line:
|
53 |
+
content = line["detail"]["choices"][0]["delta"].get("content")
|
54 |
+
if content:
|
55 |
+
yield content
|
56 |
+
elif "message" in line and "AI-4接口非常昂贵" in line["message"]:
|
57 |
+
raise RuntimeError("Rate limit for GPT 4 reached")
|
58 |
+
else:
|
59 |
+
raise RuntimeError("Response: {line}")
|
60 |
+
|
61 |
+
|
62 |
+
@classmethod
|
63 |
+
@property
|
64 |
+
def params(cls):
|
65 |
+
params = [
|
66 |
+
("model", "str"),
|
67 |
+
("messages", "list[dict[str, str]]"),
|
68 |
+
("stream", "bool"),
|
69 |
+
("temperature", "float"),
|
70 |
+
("top_p", "int"),
|
71 |
+
]
|
72 |
+
param = ", ".join([": ".join(p) for p in params])
|
73 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/Acytoo.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from aiohttp import ClientSession
|
4 |
+
|
5 |
+
from ..typing import AsyncGenerator
|
6 |
+
from .base_provider import AsyncGeneratorProvider
|
7 |
+
|
8 |
+
|
9 |
+
class Acytoo(AsyncGeneratorProvider):
|
10 |
+
url = 'https://chat.acytoo.com'
|
11 |
+
working = True
|
12 |
+
supports_gpt_35_turbo = True
|
13 |
+
|
14 |
+
@classmethod
|
15 |
+
async def create_async_generator(
|
16 |
+
cls,
|
17 |
+
model: str,
|
18 |
+
messages: list[dict[str, str]],
|
19 |
+
proxy: str = None,
|
20 |
+
**kwargs
|
21 |
+
) -> AsyncGenerator:
|
22 |
+
|
23 |
+
async with ClientSession(
|
24 |
+
headers=_create_header()
|
25 |
+
) as session:
|
26 |
+
async with session.post(
|
27 |
+
cls.url + '/api/completions',
|
28 |
+
proxy=proxy,
|
29 |
+
json=_create_payload(messages, **kwargs)
|
30 |
+
) as response:
|
31 |
+
response.raise_for_status()
|
32 |
+
async for stream in response.content.iter_any():
|
33 |
+
if stream:
|
34 |
+
yield stream.decode()
|
35 |
+
|
36 |
+
|
37 |
+
def _create_header():
|
38 |
+
return {
|
39 |
+
'accept': '*/*',
|
40 |
+
'content-type': 'application/json',
|
41 |
+
}
|
42 |
+
|
43 |
+
|
44 |
+
def _create_payload(messages: list[dict[str, str]], temperature: float = 0.5, **kwargs):
|
45 |
+
return {
|
46 |
+
'key' : '',
|
47 |
+
'model' : 'gpt-3.5-turbo',
|
48 |
+
'messages' : messages,
|
49 |
+
'temperature' : temperature,
|
50 |
+
'password' : ''
|
51 |
+
}
|
g4f/g4f/Provider/AiService.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import requests
|
4 |
+
|
5 |
+
from ..typing import Any, CreateResult
|
6 |
+
from .base_provider import BaseProvider
|
7 |
+
|
8 |
+
|
9 |
+
class AiService(BaseProvider):
|
10 |
+
url = "https://aiservice.vercel.app/"
|
11 |
+
working = False
|
12 |
+
supports_gpt_35_turbo = True
|
13 |
+
|
14 |
+
@staticmethod
|
15 |
+
def create_completion(
|
16 |
+
model: str,
|
17 |
+
messages: list[dict[str, str]],
|
18 |
+
stream: bool,
|
19 |
+
**kwargs: Any,
|
20 |
+
) -> CreateResult:
|
21 |
+
base = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
|
22 |
+
base += "\nassistant: "
|
23 |
+
|
24 |
+
headers = {
|
25 |
+
"accept": "*/*",
|
26 |
+
"content-type": "text/plain;charset=UTF-8",
|
27 |
+
"sec-fetch-dest": "empty",
|
28 |
+
"sec-fetch-mode": "cors",
|
29 |
+
"sec-fetch-site": "same-origin",
|
30 |
+
"Referer": "https://aiservice.vercel.app/chat",
|
31 |
+
}
|
32 |
+
data = {"input": base}
|
33 |
+
url = "https://aiservice.vercel.app/api/chat/answer"
|
34 |
+
response = requests.post(url, headers=headers, json=data)
|
35 |
+
response.raise_for_status()
|
36 |
+
yield response.json()["data"]
|
g4f/g4f/Provider/Aibn.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import time
|
4 |
+
import hashlib
|
5 |
+
|
6 |
+
from ..typing import AsyncGenerator
|
7 |
+
from g4f.requests import AsyncSession
|
8 |
+
from .base_provider import AsyncGeneratorProvider
|
9 |
+
|
10 |
+
|
11 |
+
class Aibn(AsyncGeneratorProvider):
|
12 |
+
url = "https://aibn.cc"
|
13 |
+
supports_gpt_35_turbo = True
|
14 |
+
working = True
|
15 |
+
|
16 |
+
@classmethod
|
17 |
+
async def create_async_generator(
|
18 |
+
cls,
|
19 |
+
model: str,
|
20 |
+
messages: list[dict[str, str]],
|
21 |
+
**kwargs
|
22 |
+
) -> AsyncGenerator:
|
23 |
+
async with AsyncSession(impersonate="chrome107") as session:
|
24 |
+
timestamp = int(time.time())
|
25 |
+
data = {
|
26 |
+
"messages": messages,
|
27 |
+
"pass": None,
|
28 |
+
"sign": generate_signature(timestamp, messages[-1]["content"]),
|
29 |
+
"time": timestamp
|
30 |
+
}
|
31 |
+
async with session.post(f"{cls.url}/api/generate", json=data) as response:
|
32 |
+
response.raise_for_status()
|
33 |
+
async for chunk in response.content.iter_any():
|
34 |
+
yield chunk.decode()
|
35 |
+
|
36 |
+
@classmethod
|
37 |
+
@property
|
38 |
+
def params(cls):
|
39 |
+
params = [
|
40 |
+
("model", "str"),
|
41 |
+
("messages", "list[dict[str, str]]"),
|
42 |
+
("stream", "bool"),
|
43 |
+
("temperature", "float"),
|
44 |
+
]
|
45 |
+
param = ", ".join([": ".join(p) for p in params])
|
46 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
47 |
+
|
48 |
+
|
49 |
+
def generate_signature(timestamp: int, message: str, secret: str = "undefined"):
|
50 |
+
data = f"{timestamp}:{message}:{secret}"
|
51 |
+
return hashlib.sha256(data.encode()).hexdigest()
|
g4f/g4f/Provider/Aichat.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from aiohttp import ClientSession
|
4 |
+
|
5 |
+
from .base_provider import AsyncProvider, format_prompt
|
6 |
+
|
7 |
+
|
8 |
+
class Aichat(AsyncProvider):
|
9 |
+
url = "https://chat-gpt.org/chat"
|
10 |
+
working = True
|
11 |
+
supports_gpt_35_turbo = True
|
12 |
+
|
13 |
+
@staticmethod
|
14 |
+
async def create_async(
|
15 |
+
model: str,
|
16 |
+
messages: list[dict[str, str]],
|
17 |
+
proxy: str = None,
|
18 |
+
**kwargs
|
19 |
+
) -> str:
|
20 |
+
headers = {
|
21 |
+
"authority": "chat-gpt.org",
|
22 |
+
"accept": "*/*",
|
23 |
+
"cache-control": "no-cache",
|
24 |
+
"content-type": "application/json",
|
25 |
+
"origin": "https://chat-gpt.org",
|
26 |
+
"pragma": "no-cache",
|
27 |
+
"referer": "https://chat-gpt.org/chat",
|
28 |
+
"sec-ch-ua-mobile": "?0",
|
29 |
+
"sec-ch-ua-platform": '"macOS"',
|
30 |
+
"sec-fetch-dest": "empty",
|
31 |
+
"sec-fetch-mode": "cors",
|
32 |
+
"sec-fetch-site": "same-origin",
|
33 |
+
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
|
34 |
+
}
|
35 |
+
async with ClientSession(
|
36 |
+
headers=headers
|
37 |
+
) as session:
|
38 |
+
json_data = {
|
39 |
+
"message": format_prompt(messages),
|
40 |
+
"temperature": kwargs.get('temperature', 0.5),
|
41 |
+
"presence_penalty": 0,
|
42 |
+
"top_p": kwargs.get('top_p', 1),
|
43 |
+
"frequency_penalty": 0,
|
44 |
+
}
|
45 |
+
async with session.post(
|
46 |
+
"https://chat-gpt.org/api/text",
|
47 |
+
proxy=proxy,
|
48 |
+
json=json_data
|
49 |
+
) as response:
|
50 |
+
response.raise_for_status()
|
51 |
+
result = await response.json()
|
52 |
+
if not result['response']:
|
53 |
+
raise Exception(f"Error Response: {result}")
|
54 |
+
return result["message"]
|
g4f/g4f/Provider/Ails.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import hashlib
|
4 |
+
import time
|
5 |
+
import uuid
|
6 |
+
import json
|
7 |
+
from datetime import datetime
|
8 |
+
from aiohttp import ClientSession
|
9 |
+
|
10 |
+
from ..typing import SHA256, AsyncGenerator
|
11 |
+
from .base_provider import AsyncGeneratorProvider
|
12 |
+
|
13 |
+
|
14 |
+
class Ails(AsyncGeneratorProvider):
|
15 |
+
url: str = "https://ai.ls"
|
16 |
+
working = True
|
17 |
+
supports_gpt_35_turbo = True
|
18 |
+
|
19 |
+
@staticmethod
|
20 |
+
async def create_async_generator(
|
21 |
+
model: str,
|
22 |
+
messages: list[dict[str, str]],
|
23 |
+
stream: bool,
|
24 |
+
proxy: str = None,
|
25 |
+
**kwargs
|
26 |
+
) -> AsyncGenerator:
|
27 |
+
headers = {
|
28 |
+
"authority": "api.caipacity.com",
|
29 |
+
"accept": "*/*",
|
30 |
+
"accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
31 |
+
"authorization": "Bearer free",
|
32 |
+
"client-id": str(uuid.uuid4()),
|
33 |
+
"client-v": "0.1.278",
|
34 |
+
"content-type": "application/json",
|
35 |
+
"origin": "https://ai.ls",
|
36 |
+
"referer": "https://ai.ls/",
|
37 |
+
"sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
38 |
+
"sec-ch-ua-mobile": "?0",
|
39 |
+
"sec-ch-ua-platform": '"Windows"',
|
40 |
+
"sec-fetch-dest": "empty",
|
41 |
+
"sec-fetch-mode": "cors",
|
42 |
+
"sec-fetch-site": "cross-site",
|
43 |
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
44 |
+
"from-url": "https://ai.ls/?chat=1"
|
45 |
+
}
|
46 |
+
async with ClientSession(
|
47 |
+
headers=headers
|
48 |
+
) as session:
|
49 |
+
timestamp = _format_timestamp(int(time.time() * 1000))
|
50 |
+
json_data = {
|
51 |
+
"model": "gpt-3.5-turbo",
|
52 |
+
"temperature": kwargs.get("temperature", 0.6),
|
53 |
+
"stream": True,
|
54 |
+
"messages": messages,
|
55 |
+
"d": datetime.now().strftime("%Y-%m-%d"),
|
56 |
+
"t": timestamp,
|
57 |
+
"s": _hash({"t": timestamp, "m": messages[-1]["content"]}),
|
58 |
+
}
|
59 |
+
async with session.post(
|
60 |
+
"https://api.caipacity.com/v1/chat/completions",
|
61 |
+
proxy=proxy,
|
62 |
+
json=json_data
|
63 |
+
) as response:
|
64 |
+
response.raise_for_status()
|
65 |
+
start = "data: "
|
66 |
+
async for line in response.content:
|
67 |
+
line = line.decode('utf-8')
|
68 |
+
if line.startswith(start) and line != "data: [DONE]":
|
69 |
+
line = line[len(start):-1]
|
70 |
+
line = json.loads(line)
|
71 |
+
token = line["choices"][0]["delta"].get("content")
|
72 |
+
if token:
|
73 |
+
if "ai.ls" in token or "ai.ci" in token:
|
74 |
+
raise Exception("Response Error: " + token)
|
75 |
+
yield token
|
76 |
+
|
77 |
+
|
78 |
+
@classmethod
|
79 |
+
@property
|
80 |
+
def params(cls):
|
81 |
+
params = [
|
82 |
+
("model", "str"),
|
83 |
+
("messages", "list[dict[str, str]]"),
|
84 |
+
("stream", "bool"),
|
85 |
+
("temperature", "float"),
|
86 |
+
]
|
87 |
+
param = ", ".join([": ".join(p) for p in params])
|
88 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
89 |
+
|
90 |
+
|
91 |
+
def _hash(json_data: dict[str, str]) -> SHA256:
|
92 |
+
base_string: str = "%s:%s:%s:%s" % (
|
93 |
+
json_data["t"],
|
94 |
+
json_data["m"],
|
95 |
+
"WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf",
|
96 |
+
len(json_data["m"]),
|
97 |
+
)
|
98 |
+
|
99 |
+
return SHA256(hashlib.sha256(base_string.encode()).hexdigest())
|
100 |
+
|
101 |
+
|
102 |
+
def _format_timestamp(timestamp: int) -> str:
|
103 |
+
e = timestamp
|
104 |
+
n = e % 10
|
105 |
+
r = n + 1 if n % 2 == 0 else n
|
106 |
+
return str(e - n + r)
|
g4f/g4f/Provider/Aivvm.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
import requests
|
3 |
+
|
4 |
+
from .base_provider import BaseProvider
|
5 |
+
from ..typing import CreateResult
|
6 |
+
|
7 |
+
models = {
|
8 |
+
'gpt-3.5-turbo': {'id': 'gpt-3.5-turbo', 'name': 'GPT-3.5'},
|
9 |
+
'gpt-3.5-turbo-0613': {'id': 'gpt-3.5-turbo-0613', 'name': 'GPT-3.5-0613'},
|
10 |
+
'gpt-3.5-turbo-16k': {'id': 'gpt-3.5-turbo-16k', 'name': 'GPT-3.5-16K'},
|
11 |
+
'gpt-3.5-turbo-16k-0613': {'id': 'gpt-3.5-turbo-16k-0613', 'name': 'GPT-3.5-16K-0613'},
|
12 |
+
'gpt-4': {'id': 'gpt-4', 'name': 'GPT-4'},
|
13 |
+
'gpt-4-0613': {'id': 'gpt-4-0613', 'name': 'GPT-4-0613'},
|
14 |
+
'gpt-4-32k': {'id': 'gpt-4-32k', 'name': 'GPT-4-32K'},
|
15 |
+
'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'},
|
16 |
+
}
|
17 |
+
|
18 |
+
class Aivvm(BaseProvider):
|
19 |
+
url = 'https://chat.aivvm.com'
|
20 |
+
supports_stream = True
|
21 |
+
working = True
|
22 |
+
supports_gpt_35_turbo = True
|
23 |
+
supports_gpt_4 = True
|
24 |
+
|
25 |
+
@classmethod
|
26 |
+
def create_completion(cls,
|
27 |
+
model: str,
|
28 |
+
messages: list[dict[str, str]],
|
29 |
+
stream: bool,
|
30 |
+
**kwargs
|
31 |
+
) -> CreateResult:
|
32 |
+
if not model:
|
33 |
+
model = "gpt-3.5-turbo"
|
34 |
+
elif model not in models:
|
35 |
+
raise ValueError(f"Model are not supported: {model}")
|
36 |
+
|
37 |
+
headers = {
|
38 |
+
"authority" : "chat.aivvm.com",
|
39 |
+
"accept" : "*/*",
|
40 |
+
"accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
41 |
+
"content-type" : "application/json",
|
42 |
+
"origin" : "https://chat.aivvm.com",
|
43 |
+
"referer" : "https://chat.aivvm.com/",
|
44 |
+
"sec-ch-ua" : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
|
45 |
+
"sec-ch-ua-mobile" : "?0",
|
46 |
+
"sec-ch-ua-platform" : '"macOS"',
|
47 |
+
"sec-fetch-dest" : "empty",
|
48 |
+
"sec-fetch-mode" : "cors",
|
49 |
+
"sec-fetch-site" : "same-origin",
|
50 |
+
"user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36",
|
51 |
+
}
|
52 |
+
|
53 |
+
json_data = {
|
54 |
+
"model" : models[model],
|
55 |
+
"messages" : messages,
|
56 |
+
"key" : "",
|
57 |
+
"prompt" : "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
|
58 |
+
"temperature" : kwargs.get("temperature", 0.7)
|
59 |
+
}
|
60 |
+
|
61 |
+
response = requests.post(
|
62 |
+
"https://chat.aivvm.com/api/chat", headers=headers, json=json_data, stream=True)
|
63 |
+
response.raise_for_status()
|
64 |
+
|
65 |
+
for chunk in response.iter_content(chunk_size=None):
|
66 |
+
yield chunk.decode('utf-8')
|
67 |
+
|
68 |
+
@classmethod
|
69 |
+
@property
|
70 |
+
def params(cls):
|
71 |
+
params = [
|
72 |
+
('model', 'str'),
|
73 |
+
('messages', 'list[dict[str, str]]'),
|
74 |
+
('stream', 'bool'),
|
75 |
+
('temperature', 'float'),
|
76 |
+
]
|
77 |
+
param = ', '.join([': '.join(p) for p in params])
|
78 |
+
return f'g4f.provider.{cls.__name__} supports: ({param})'
|
g4f/g4f/Provider/Bard.py
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
import random
|
5 |
+
import re
|
6 |
+
|
7 |
+
from aiohttp import ClientSession
|
8 |
+
|
9 |
+
from .base_provider import AsyncProvider, format_prompt, get_cookies
|
10 |
+
|
11 |
+
|
12 |
+
class Bard(AsyncProvider):
|
13 |
+
url = "https://bard.google.com"
|
14 |
+
needs_auth = True
|
15 |
+
working = True
|
16 |
+
_snlm0e = None
|
17 |
+
|
18 |
+
@classmethod
|
19 |
+
async def create_async(
|
20 |
+
cls,
|
21 |
+
model: str,
|
22 |
+
messages: list[dict[str, str]],
|
23 |
+
proxy: str = None,
|
24 |
+
cookies: dict = None,
|
25 |
+
**kwargs
|
26 |
+
) -> str:
|
27 |
+
prompt = format_prompt(messages)
|
28 |
+
if proxy and "://" not in proxy:
|
29 |
+
proxy = f"http://{proxy}"
|
30 |
+
if not cookies:
|
31 |
+
cookies = get_cookies(".google.com")
|
32 |
+
|
33 |
+
headers = {
|
34 |
+
'authority': 'bard.google.com',
|
35 |
+
'origin': 'https://bard.google.com',
|
36 |
+
'referer': 'https://bard.google.com/',
|
37 |
+
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
38 |
+
'x-same-domain': '1',
|
39 |
+
}
|
40 |
+
|
41 |
+
async with ClientSession(
|
42 |
+
cookies=cookies,
|
43 |
+
headers=headers
|
44 |
+
) as session:
|
45 |
+
if not cls._snlm0e:
|
46 |
+
async with session.get(cls.url, proxy=proxy) as response:
|
47 |
+
text = await response.text()
|
48 |
+
|
49 |
+
match = re.search(r'SNlM0e\":\"(.*?)\"', text)
|
50 |
+
if not match:
|
51 |
+
raise RuntimeError("No snlm0e value.")
|
52 |
+
cls._snlm0e = match.group(1)
|
53 |
+
|
54 |
+
params = {
|
55 |
+
'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
|
56 |
+
'_reqid': random.randint(1111, 9999),
|
57 |
+
'rt': 'c'
|
58 |
+
}
|
59 |
+
|
60 |
+
data = {
|
61 |
+
'at': cls._snlm0e,
|
62 |
+
'f.req': json.dumps([None, json.dumps([[prompt]])])
|
63 |
+
}
|
64 |
+
|
65 |
+
intents = '.'.join([
|
66 |
+
'assistant',
|
67 |
+
'lamda',
|
68 |
+
'BardFrontendService'
|
69 |
+
])
|
70 |
+
|
71 |
+
async with session.post(
|
72 |
+
f'{cls.url}/_/BardChatUi/data/{intents}/StreamGenerate',
|
73 |
+
data=data,
|
74 |
+
params=params,
|
75 |
+
proxy=proxy
|
76 |
+
) as response:
|
77 |
+
response = await response.text()
|
78 |
+
response = json.loads(response.splitlines()[3])[0][2]
|
79 |
+
response = json.loads(response)[4][0][1][0]
|
80 |
+
return response
|
81 |
+
|
82 |
+
@classmethod
|
83 |
+
@property
|
84 |
+
def params(cls):
|
85 |
+
params = [
|
86 |
+
("model", "str"),
|
87 |
+
("messages", "list[dict[str, str]]"),
|
88 |
+
("stream", "bool"),
|
89 |
+
("proxy", "str"),
|
90 |
+
]
|
91 |
+
param = ", ".join([": ".join(p) for p in params])
|
92 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/Bing.py
ADDED
@@ -0,0 +1,283 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import random
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
from aiohttp import ClientSession, ClientTimeout
|
7 |
+
from ..typing import AsyncGenerator
|
8 |
+
from .base_provider import AsyncGeneratorProvider, get_cookies
|
9 |
+
|
10 |
+
|
11 |
+
class Bing(AsyncGeneratorProvider):
|
12 |
+
url = "https://bing.com/chat"
|
13 |
+
working = True
|
14 |
+
supports_gpt_4 = True
|
15 |
+
|
16 |
+
@staticmethod
|
17 |
+
def create_async_generator(
|
18 |
+
model: str,
|
19 |
+
messages: list[dict[str, str]],
|
20 |
+
cookies: dict = None, **kwargs) -> AsyncGenerator:
|
21 |
+
|
22 |
+
if not cookies:
|
23 |
+
cookies = get_cookies(".bing.com")
|
24 |
+
if len(messages) < 2:
|
25 |
+
prompt = messages[0]["content"]
|
26 |
+
context = None
|
27 |
+
else:
|
28 |
+
prompt = messages[-1]["content"]
|
29 |
+
context = create_context(messages[:-1])
|
30 |
+
|
31 |
+
if not cookies or "SRCHD" not in cookies:
|
32 |
+
cookies = {
|
33 |
+
'SRCHD' : 'AF=NOFORM',
|
34 |
+
'PPLState' : '1',
|
35 |
+
'KievRPSSecAuth': '',
|
36 |
+
'SUID' : '',
|
37 |
+
'SRCHUSR' : '',
|
38 |
+
'SRCHHPGUSR' : '',
|
39 |
+
}
|
40 |
+
return stream_generate(prompt, context, cookies)
|
41 |
+
|
42 |
+
def create_context(messages: list[dict[str, str]]):
|
43 |
+
context = "".join(f"[{message['role']}](#message)\n{message['content']}\n\n" for message in messages)
|
44 |
+
|
45 |
+
return context
|
46 |
+
|
47 |
+
class Conversation():
|
48 |
+
def __init__(self, conversationId: str, clientId: str, conversationSignature: str) -> None:
|
49 |
+
self.conversationId = conversationId
|
50 |
+
self.clientId = clientId
|
51 |
+
self.conversationSignature = conversationSignature
|
52 |
+
|
53 |
+
async def create_conversation(session: ClientSession) -> Conversation:
|
54 |
+
url = 'https://www.bing.com/turing/conversation/create'
|
55 |
+
async with await session.get(url) as response:
|
56 |
+
response = await response.json()
|
57 |
+
conversationId = response.get('conversationId')
|
58 |
+
clientId = response.get('clientId')
|
59 |
+
conversationSignature = response.get('conversationSignature')
|
60 |
+
|
61 |
+
if not conversationId or not clientId or not conversationSignature:
|
62 |
+
raise Exception('Failed to create conversation.')
|
63 |
+
|
64 |
+
return Conversation(conversationId, clientId, conversationSignature)
|
65 |
+
|
66 |
+
async def list_conversations(session: ClientSession) -> list:
|
67 |
+
url = "https://www.bing.com/turing/conversation/chats"
|
68 |
+
async with session.get(url) as response:
|
69 |
+
response = await response.json()
|
70 |
+
return response["chats"]
|
71 |
+
|
72 |
+
async def delete_conversation(session: ClientSession, conversation: Conversation) -> list:
|
73 |
+
url = "https://sydney.bing.com/sydney/DeleteSingleConversation"
|
74 |
+
json = {
|
75 |
+
"conversationId": conversation.conversationId,
|
76 |
+
"conversationSignature": conversation.conversationSignature,
|
77 |
+
"participant": {"id": conversation.clientId},
|
78 |
+
"source": "cib",
|
79 |
+
"optionsSets": ["autosave"]
|
80 |
+
}
|
81 |
+
async with session.post(url, json=json) as response:
|
82 |
+
response = await response.json()
|
83 |
+
return response["result"]["value"] == "Success"
|
84 |
+
|
85 |
+
class Defaults:
|
86 |
+
delimiter = "\x1e"
|
87 |
+
ip_address = f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
|
88 |
+
|
89 |
+
allowedMessageTypes = [
|
90 |
+
"Chat",
|
91 |
+
"Disengaged",
|
92 |
+
"AdsQuery",
|
93 |
+
"SemanticSerp",
|
94 |
+
"GenerateContentQuery",
|
95 |
+
"SearchQuery",
|
96 |
+
"ActionRequest",
|
97 |
+
"Context",
|
98 |
+
"Progress",
|
99 |
+
"AdsQuery",
|
100 |
+
"SemanticSerp",
|
101 |
+
]
|
102 |
+
|
103 |
+
sliceIds = [
|
104 |
+
"winmuid3tf",
|
105 |
+
"osbsdusgreccf",
|
106 |
+
"ttstmout",
|
107 |
+
"crchatrev",
|
108 |
+
"winlongmsgtf",
|
109 |
+
"ctrlworkpay",
|
110 |
+
"norespwtf",
|
111 |
+
"tempcacheread",
|
112 |
+
"temptacache",
|
113 |
+
"505scss0",
|
114 |
+
"508jbcars0",
|
115 |
+
"515enbotdets0",
|
116 |
+
"5082tsports",
|
117 |
+
"515vaoprvs",
|
118 |
+
"424dagslnv1s0",
|
119 |
+
"kcimgattcf",
|
120 |
+
"427startpms0",
|
121 |
+
]
|
122 |
+
|
123 |
+
location = {
|
124 |
+
"locale": "en-US",
|
125 |
+
"market": "en-US",
|
126 |
+
"region": "US",
|
127 |
+
"locationHints": [
|
128 |
+
{
|
129 |
+
"country": "United States",
|
130 |
+
"state": "California",
|
131 |
+
"city": "Los Angeles",
|
132 |
+
"timezoneoffset": 8,
|
133 |
+
"countryConfidence": 8,
|
134 |
+
"Center": {"Latitude": 34.0536909, "Longitude": -118.242766},
|
135 |
+
"RegionType": 2,
|
136 |
+
"SourceType": 1,
|
137 |
+
}
|
138 |
+
],
|
139 |
+
}
|
140 |
+
|
141 |
+
headers = {
|
142 |
+
'accept': '*/*',
|
143 |
+
'accept-language': 'en-US,en;q=0.9',
|
144 |
+
'cache-control': 'max-age=0',
|
145 |
+
'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
|
146 |
+
'sec-ch-ua-arch': '"x86"',
|
147 |
+
'sec-ch-ua-bitness': '"64"',
|
148 |
+
'sec-ch-ua-full-version': '"110.0.1587.69"',
|
149 |
+
'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
|
150 |
+
'sec-ch-ua-mobile': '?0',
|
151 |
+
'sec-ch-ua-model': '""',
|
152 |
+
'sec-ch-ua-platform': '"Windows"',
|
153 |
+
'sec-ch-ua-platform-version': '"15.0.0"',
|
154 |
+
'sec-fetch-dest': 'document',
|
155 |
+
'sec-fetch-mode': 'navigate',
|
156 |
+
'sec-fetch-site': 'none',
|
157 |
+
'sec-fetch-user': '?1',
|
158 |
+
'upgrade-insecure-requests': '1',
|
159 |
+
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
|
160 |
+
'x-edge-shopping-flag': '1',
|
161 |
+
'x-forwarded-for': ip_address,
|
162 |
+
}
|
163 |
+
|
164 |
+
optionsSets = {
|
165 |
+
"optionsSets": [
|
166 |
+
'saharasugg',
|
167 |
+
'enablenewsfc',
|
168 |
+
'clgalileo',
|
169 |
+
'gencontentv3',
|
170 |
+
"nlu_direct_response_filter",
|
171 |
+
"deepleo",
|
172 |
+
"disable_emoji_spoken_text",
|
173 |
+
"responsible_ai_policy_235",
|
174 |
+
"enablemm",
|
175 |
+
"h3precise"
|
176 |
+
"dtappid",
|
177 |
+
"cricinfo",
|
178 |
+
"cricinfov2",
|
179 |
+
"dv3sugg",
|
180 |
+
"nojbfedge"
|
181 |
+
]
|
182 |
+
}
|
183 |
+
|
184 |
+
def format_message(msg: dict) -> str:
|
185 |
+
return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
|
186 |
+
|
187 |
+
def create_message(conversation: Conversation, prompt: str, context: str=None) -> str:
|
188 |
+
struct = {
|
189 |
+
'arguments': [
|
190 |
+
{
|
191 |
+
**Defaults.optionsSets,
|
192 |
+
'source': 'cib',
|
193 |
+
'allowedMessageTypes': Defaults.allowedMessageTypes,
|
194 |
+
'sliceIds': Defaults.sliceIds,
|
195 |
+
'traceId': os.urandom(16).hex(),
|
196 |
+
'isStartOfSession': True,
|
197 |
+
'message': Defaults.location | {
|
198 |
+
'author': 'user',
|
199 |
+
'inputMethod': 'Keyboard',
|
200 |
+
'text': prompt,
|
201 |
+
'messageType': 'Chat'
|
202 |
+
},
|
203 |
+
'conversationSignature': conversation.conversationSignature,
|
204 |
+
'participant': {
|
205 |
+
'id': conversation.clientId
|
206 |
+
},
|
207 |
+
'conversationId': conversation.conversationId
|
208 |
+
}
|
209 |
+
],
|
210 |
+
'invocationId': '0',
|
211 |
+
'target': 'chat',
|
212 |
+
'type': 4
|
213 |
+
}
|
214 |
+
|
215 |
+
if context:
|
216 |
+
struct['arguments'][0]['previousMessages'] = [{
|
217 |
+
"author": "user",
|
218 |
+
"description": context,
|
219 |
+
"contextType": "WebPage",
|
220 |
+
"messageType": "Context",
|
221 |
+
"messageId": "discover-web--page-ping-mriduna-----"
|
222 |
+
}]
|
223 |
+
return format_message(struct)
|
224 |
+
|
225 |
+
async def stream_generate(
|
226 |
+
prompt: str,
|
227 |
+
context: str=None,
|
228 |
+
cookies: dict=None
|
229 |
+
):
|
230 |
+
async with ClientSession(
|
231 |
+
timeout=ClientTimeout(total=900),
|
232 |
+
cookies=cookies,
|
233 |
+
headers=Defaults.headers,
|
234 |
+
) as session:
|
235 |
+
conversation = await create_conversation(session)
|
236 |
+
try:
|
237 |
+
async with session.ws_connect(
|
238 |
+
'wss://sydney.bing.com/sydney/ChatHub',
|
239 |
+
autoping=False,
|
240 |
+
) as wss:
|
241 |
+
|
242 |
+
await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
|
243 |
+
msg = await wss.receive(timeout=900)
|
244 |
+
|
245 |
+
await wss.send_str(create_message(conversation, prompt, context))
|
246 |
+
|
247 |
+
response_txt = ''
|
248 |
+
result_text = ''
|
249 |
+
returned_text = ''
|
250 |
+
final = False
|
251 |
+
|
252 |
+
while not final:
|
253 |
+
msg = await wss.receive(timeout=900)
|
254 |
+
objects = msg.data.split(Defaults.delimiter)
|
255 |
+
for obj in objects:
|
256 |
+
if obj is None or not obj:
|
257 |
+
continue
|
258 |
+
|
259 |
+
response = json.loads(obj)
|
260 |
+
if response.get('type') == 1 and response['arguments'][0].get('messages'):
|
261 |
+
message = response['arguments'][0]['messages'][0]
|
262 |
+
if (message['contentOrigin'] != 'Apology'):
|
263 |
+
response_txt = result_text + \
|
264 |
+
message['adaptiveCards'][0]['body'][0].get('text', '')
|
265 |
+
|
266 |
+
if message.get('messageType'):
|
267 |
+
inline_txt = message['adaptiveCards'][0]['body'][0]['inlines'][0].get('text')
|
268 |
+
response_txt += inline_txt + '\n'
|
269 |
+
result_text += inline_txt + '\n'
|
270 |
+
|
271 |
+
if response_txt.startswith(returned_text):
|
272 |
+
new = response_txt[len(returned_text):]
|
273 |
+
if new != "\n":
|
274 |
+
yield new
|
275 |
+
returned_text = response_txt
|
276 |
+
elif response.get('type') == 2:
|
277 |
+
result = response['item']['result']
|
278 |
+
if result.get('error'):
|
279 |
+
raise Exception(f"{result['value']}: {result['message']}")
|
280 |
+
final = True
|
281 |
+
break
|
282 |
+
finally:
|
283 |
+
await delete_conversation(session, conversation)
|
g4f/g4f/Provider/ChatBase.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from aiohttp import ClientSession
|
4 |
+
|
5 |
+
from ..typing import AsyncGenerator
|
6 |
+
from .base_provider import AsyncGeneratorProvider
|
7 |
+
|
8 |
+
|
9 |
+
class ChatBase(AsyncGeneratorProvider):
|
10 |
+
url = "https://www.chatbase.co"
|
11 |
+
supports_gpt_35_turbo = True
|
12 |
+
supports_gpt_4 = True
|
13 |
+
working = True
|
14 |
+
|
15 |
+
@classmethod
|
16 |
+
async def create_async_generator(
|
17 |
+
cls,
|
18 |
+
model: str,
|
19 |
+
messages: list[dict[str, str]],
|
20 |
+
**kwargs
|
21 |
+
) -> AsyncGenerator:
|
22 |
+
if model == "gpt-4":
|
23 |
+
chat_id = "quran---tafseer-saadi-pdf-wbgknt7zn"
|
24 |
+
elif model == "gpt-3.5-turbo" or not model:
|
25 |
+
chat_id = "chatbase--1--pdf-p680fxvnm"
|
26 |
+
else:
|
27 |
+
raise ValueError(f"Model are not supported: {model}")
|
28 |
+
headers = {
|
29 |
+
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
|
30 |
+
"Accept" : "*/*",
|
31 |
+
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
32 |
+
"Origin" : cls.url,
|
33 |
+
"Referer" : cls.url + "/",
|
34 |
+
"Sec-Fetch-Dest" : "empty",
|
35 |
+
"Sec-Fetch-Mode" : "cors",
|
36 |
+
"Sec-Fetch-Site" : "same-origin",
|
37 |
+
}
|
38 |
+
async with ClientSession(
|
39 |
+
headers=headers
|
40 |
+
) as session:
|
41 |
+
data = {
|
42 |
+
"messages": messages,
|
43 |
+
"captchaCode": "hadsa",
|
44 |
+
"chatId": chat_id,
|
45 |
+
"conversationId": f"kcXpqEnqUie3dnJlsRi_O-{chat_id}"
|
46 |
+
}
|
47 |
+
async with session.post("https://www.chatbase.co/api/fe/chat", json=data) as response:
|
48 |
+
response.raise_for_status()
|
49 |
+
async for stream in response.content.iter_any():
|
50 |
+
yield stream.decode()
|
51 |
+
|
52 |
+
|
53 |
+
@classmethod
|
54 |
+
@property
|
55 |
+
def params(cls):
|
56 |
+
params = [
|
57 |
+
("model", "str"),
|
58 |
+
("messages", "list[dict[str, str]]"),
|
59 |
+
("stream", "bool"),
|
60 |
+
]
|
61 |
+
param = ", ".join([": ".join(p) for p in params])
|
62 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/ChatgptAi.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import re
|
4 |
+
import html
|
5 |
+
import json
|
6 |
+
from aiohttp import ClientSession
|
7 |
+
|
8 |
+
from ..typing import AsyncGenerator
|
9 |
+
from .base_provider import AsyncGeneratorProvider
|
10 |
+
|
11 |
+
|
12 |
+
class ChatgptAi(AsyncGeneratorProvider):
|
13 |
+
url: str = "https://chatgpt.ai/"
|
14 |
+
working = True
|
15 |
+
supports_gpt_35_turbo = True
|
16 |
+
_system_data = None
|
17 |
+
|
18 |
+
@classmethod
|
19 |
+
async def create_async_generator(
|
20 |
+
cls,
|
21 |
+
model: str,
|
22 |
+
messages: list[dict[str, str]],
|
23 |
+
proxy: str = None,
|
24 |
+
**kwargs
|
25 |
+
) -> AsyncGenerator:
|
26 |
+
headers = {
|
27 |
+
"authority" : "chatgpt.ai",
|
28 |
+
"accept" : "*/*",
|
29 |
+
"accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
30 |
+
"cache-control" : "no-cache",
|
31 |
+
"origin" : "https://chatgpt.ai",
|
32 |
+
"pragma" : "no-cache",
|
33 |
+
"referer" : cls.url,
|
34 |
+
"sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
35 |
+
"sec-ch-ua-mobile" : "?0",
|
36 |
+
"sec-ch-ua-platform" : '"Windows"',
|
37 |
+
"sec-fetch-dest" : "empty",
|
38 |
+
"sec-fetch-mode" : "cors",
|
39 |
+
"sec-fetch-site" : "same-origin",
|
40 |
+
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
41 |
+
}
|
42 |
+
async with ClientSession(
|
43 |
+
headers=headers
|
44 |
+
) as session:
|
45 |
+
if not cls._system_data:
|
46 |
+
async with session.get(cls.url, proxy=proxy) as response:
|
47 |
+
response.raise_for_status()
|
48 |
+
match = re.findall(r"data-system='([^']+)'", await response.text())
|
49 |
+
if not match:
|
50 |
+
raise RuntimeError("No system data")
|
51 |
+
cls._system_data = json.loads(html.unescape(match[0]))
|
52 |
+
|
53 |
+
data = {
|
54 |
+
"botId": cls._system_data["botId"],
|
55 |
+
"clientId": "",
|
56 |
+
"contextId": cls._system_data["contextId"],
|
57 |
+
"id": cls._system_data["id"],
|
58 |
+
"messages": messages[:-1],
|
59 |
+
"newMessage": messages[-1]["content"],
|
60 |
+
"session": cls._system_data["sessionId"],
|
61 |
+
"stream": True
|
62 |
+
}
|
63 |
+
async with session.post(
|
64 |
+
"https://chatgpt.ai/wp-json/mwai-ui/v1/chats/submit",
|
65 |
+
proxy=proxy,
|
66 |
+
json=data
|
67 |
+
) as response:
|
68 |
+
response.raise_for_status()
|
69 |
+
start = "data: "
|
70 |
+
async for line in response.content:
|
71 |
+
line = line.decode('utf-8')
|
72 |
+
if line.startswith(start):
|
73 |
+
line = json.loads(line[len(start):-1])
|
74 |
+
if line["type"] == "live":
|
75 |
+
yield line["data"]
|
g4f/g4f/Provider/ChatgptDuo.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from g4f.requests import AsyncSession
|
4 |
+
from .base_provider import AsyncProvider, format_prompt
|
5 |
+
|
6 |
+
|
7 |
+
class ChatgptDuo(AsyncProvider):
|
8 |
+
url = "https://chatgptduo.com"
|
9 |
+
supports_gpt_35_turbo = True
|
10 |
+
working = True
|
11 |
+
|
12 |
+
@classmethod
|
13 |
+
async def create_async(
|
14 |
+
cls,
|
15 |
+
model: str,
|
16 |
+
messages: list[dict[str, str]],
|
17 |
+
**kwargs
|
18 |
+
) -> str:
|
19 |
+
async with AsyncSession(impersonate="chrome107") as session:
|
20 |
+
prompt = format_prompt(messages),
|
21 |
+
data = {
|
22 |
+
"prompt": prompt,
|
23 |
+
"search": prompt,
|
24 |
+
"purpose": "ask",
|
25 |
+
}
|
26 |
+
async with session.post(f"{cls.url}/", data=data) as response:
|
27 |
+
response.raise_for_status()
|
28 |
+
data = await response.json()
|
29 |
+
|
30 |
+
cls._sources = [{
|
31 |
+
"title": source["title"],
|
32 |
+
"url": source["link"],
|
33 |
+
"snippet": source["snippet"]
|
34 |
+
} for source in data["results"]]
|
35 |
+
|
36 |
+
return data["answer"]
|
37 |
+
|
38 |
+
@classmethod
|
39 |
+
def get_sources(cls):
|
40 |
+
return cls._sources
|
41 |
+
|
42 |
+
@classmethod
|
43 |
+
@property
|
44 |
+
def params(cls):
|
45 |
+
params = [
|
46 |
+
("model", "str"),
|
47 |
+
("messages", "list[dict[str, str]]"),
|
48 |
+
("stream", "bool"),
|
49 |
+
]
|
50 |
+
param = ", ".join([": ".join(p) for p in params])
|
51 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/ChatgptLogin.py
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import os, re
|
4 |
+
from aiohttp import ClientSession
|
5 |
+
|
6 |
+
from .base_provider import AsyncProvider, format_prompt
|
7 |
+
|
8 |
+
|
9 |
+
class ChatgptLogin(AsyncProvider):
|
10 |
+
url = "https://opchatgpts.net"
|
11 |
+
supports_gpt_35_turbo = True
|
12 |
+
working = True
|
13 |
+
_nonce = None
|
14 |
+
|
15 |
+
@classmethod
|
16 |
+
async def create_async(
|
17 |
+
cls,
|
18 |
+
model: str,
|
19 |
+
messages: list[dict[str, str]],
|
20 |
+
**kwargs
|
21 |
+
) -> str:
|
22 |
+
headers = {
|
23 |
+
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
|
24 |
+
"Accept" : "*/*",
|
25 |
+
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
26 |
+
"Origin" : "https://opchatgpts.net",
|
27 |
+
"Alt-Used" : "opchatgpts.net",
|
28 |
+
"Referer" : "https://opchatgpts.net/chatgpt-free-use/",
|
29 |
+
"Sec-Fetch-Dest" : "empty",
|
30 |
+
"Sec-Fetch-Mode" : "cors",
|
31 |
+
"Sec-Fetch-Site" : "same-origin",
|
32 |
+
}
|
33 |
+
async with ClientSession(
|
34 |
+
headers=headers
|
35 |
+
) as session:
|
36 |
+
if not cls._nonce:
|
37 |
+
async with session.get(
|
38 |
+
"https://opchatgpts.net/chatgpt-free-use/",
|
39 |
+
params={"id": os.urandom(6).hex()},
|
40 |
+
) as response:
|
41 |
+
result = re.search(r'data-nonce="(.*?)"', await response.text())
|
42 |
+
if not result:
|
43 |
+
raise RuntimeError("No nonce value")
|
44 |
+
cls._nonce = result.group(1)
|
45 |
+
data = {
|
46 |
+
"_wpnonce": cls._nonce,
|
47 |
+
"post_id": 28,
|
48 |
+
"url": "https://opchatgpts.net/chatgpt-free-use",
|
49 |
+
"action": "wpaicg_chat_shortcode_message",
|
50 |
+
"message": format_prompt(messages),
|
51 |
+
"bot_id": 0
|
52 |
+
}
|
53 |
+
async with session.post("https://opchatgpts.net/wp-admin/admin-ajax.php", data=data) as response:
|
54 |
+
response.raise_for_status()
|
55 |
+
data = await response.json()
|
56 |
+
if "data" in data:
|
57 |
+
return data["data"]
|
58 |
+
elif "msg" in data:
|
59 |
+
raise RuntimeError(data["msg"])
|
60 |
+
else:
|
61 |
+
raise RuntimeError(f"Response: {data}")
|
62 |
+
|
63 |
+
|
64 |
+
@classmethod
|
65 |
+
@property
|
66 |
+
def params(cls):
|
67 |
+
params = [
|
68 |
+
("model", "str"),
|
69 |
+
("messages", "list[dict[str, str]]"),
|
70 |
+
("stream", "bool"),
|
71 |
+
("temperature", "float"),
|
72 |
+
]
|
73 |
+
param = ", ".join([": ".join(p) for p in params])
|
74 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/CodeLinkAva.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from aiohttp import ClientSession
|
4 |
+
import json
|
5 |
+
|
6 |
+
from ..typing import AsyncGenerator
|
7 |
+
from .base_provider import AsyncGeneratorProvider
|
8 |
+
|
9 |
+
|
10 |
+
class CodeLinkAva(AsyncGeneratorProvider):
|
11 |
+
url = "https://ava-ai-ef611.web.app"
|
12 |
+
supports_gpt_35_turbo = True
|
13 |
+
working = True
|
14 |
+
|
15 |
+
@classmethod
|
16 |
+
async def create_async_generator(
|
17 |
+
cls,
|
18 |
+
model: str,
|
19 |
+
messages: list[dict[str, str]],
|
20 |
+
**kwargs
|
21 |
+
) -> AsyncGenerator:
|
22 |
+
headers = {
|
23 |
+
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
|
24 |
+
"Accept" : "*/*",
|
25 |
+
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
26 |
+
"Origin" : cls.url,
|
27 |
+
"Referer" : cls.url + "/",
|
28 |
+
"Sec-Fetch-Dest" : "empty",
|
29 |
+
"Sec-Fetch-Mode" : "cors",
|
30 |
+
"Sec-Fetch-Site" : "same-origin",
|
31 |
+
}
|
32 |
+
async with ClientSession(
|
33 |
+
headers=headers
|
34 |
+
) as session:
|
35 |
+
data = {
|
36 |
+
"messages": messages,
|
37 |
+
"temperature": 0.6,
|
38 |
+
"stream": True,
|
39 |
+
**kwargs
|
40 |
+
}
|
41 |
+
async with session.post("https://ava-alpha-api.codelink.io/api/chat", json=data) as response:
|
42 |
+
response.raise_for_status()
|
43 |
+
async for line in response.content:
|
44 |
+
line = line.decode()
|
45 |
+
if line.startswith("data: "):
|
46 |
+
if line.startswith("data: [DONE]"):
|
47 |
+
break
|
48 |
+
line = json.loads(line[6:-1])
|
49 |
+
content = line["choices"][0]["delta"].get("content")
|
50 |
+
if content:
|
51 |
+
yield content
|
52 |
+
|
53 |
+
|
54 |
+
@classmethod
|
55 |
+
@property
|
56 |
+
def params(cls):
|
57 |
+
params = [
|
58 |
+
("model", "str"),
|
59 |
+
("messages", "list[dict[str, str]]"),
|
60 |
+
("stream", "bool"),
|
61 |
+
("temperature", "float"),
|
62 |
+
]
|
63 |
+
param = ", ".join([": ".join(p) for p in params])
|
64 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/DeepAi.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
import js2py
|
5 |
+
from aiohttp import ClientSession
|
6 |
+
|
7 |
+
from ..typing import AsyncGenerator
|
8 |
+
from .base_provider import AsyncGeneratorProvider
|
9 |
+
|
10 |
+
|
11 |
+
class DeepAi(AsyncGeneratorProvider):
|
12 |
+
url: str = "https://deepai.org"
|
13 |
+
working = True
|
14 |
+
supports_gpt_35_turbo = True
|
15 |
+
|
16 |
+
@staticmethod
|
17 |
+
async def create_async_generator(
|
18 |
+
model: str,
|
19 |
+
messages: list[dict[str, str]],
|
20 |
+
proxy: str = None,
|
21 |
+
**kwargs
|
22 |
+
) -> AsyncGenerator:
|
23 |
+
|
24 |
+
token_js = """
|
25 |
+
var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'
|
26 |
+
var a, b, c, d, e, h, f, l, g, k, m, n, r, x, C, E, N, F, T, O, P, w, D, G, Q, R, W, I, aa, fa, na, oa, ha, ba, X, ia, ja, ka, J, la, K, L, ca, S, U, M, ma, B, da, V, Y;
|
27 |
+
h = Math.round(1E11 * Math.random()) + "";
|
28 |
+
f = function () {
|
29 |
+
for (var p = [], q = 0; 64 > q;) p[q] = 0 | 4294967296 * Math.sin(++q % Math.PI);
|
30 |
+
|
31 |
+
return function (t) {
|
32 |
+
var v, y, H, ea = [v = 1732584193, y = 4023233417, ~v, ~y],
|
33 |
+
Z = [],
|
34 |
+
A = unescape(encodeURI(t)) + "\u0080",
|
35 |
+
z = A.length;
|
36 |
+
t = --z / 4 + 2 | 15;
|
37 |
+
for (Z[--t] = 8 * z; ~z;) Z[z >> 2] |= A.charCodeAt(z) << 8 * z--;
|
38 |
+
for (q = A = 0; q < t; q += 16) {
|
39 |
+
for (z = ea; 64 > A; z = [H = z[3], v + ((H = z[0] + [v & y | ~v & H, H & v | ~H & y, v ^ y ^ H, y ^ (v | ~H)][z = A >> 4] + p[A] + ~~Z[q | [A, 5 * A + 1, 3 * A + 5, 7 * A][z] & 15]) << (z = [7, 12, 17, 22, 5, 9, 14, 20, 4, 11, 16, 23, 6, 10, 15, 21][4 * z + A++ % 4]) | H >>> -z), v, y]) v = z[1] | 0, y = z[2];
|
40 |
+
for (A = 4; A;) ea[--A] += z[A]
|
41 |
+
}
|
42 |
+
for (t = ""; 32 > A;) t += (ea[A >> 3] >> 4 * (1 ^ A++) & 15).toString(16);
|
43 |
+
return t.split("").reverse().join("")
|
44 |
+
}
|
45 |
+
}();
|
46 |
+
|
47 |
+
"tryit-" + h + "-" + f(agent + f(agent + f(agent + h + "x")));
|
48 |
+
"""
|
49 |
+
|
50 |
+
payload = {"chas_style": "chat", "chatHistory": json.dumps(messages)}
|
51 |
+
api_key = js2py.eval_js(token_js)
|
52 |
+
headers = {
|
53 |
+
"api-key": api_key,
|
54 |
+
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36",
|
55 |
+
}
|
56 |
+
async with ClientSession(
|
57 |
+
headers=headers
|
58 |
+
) as session:
|
59 |
+
async with session.post("https://api.deepai.org/make_me_a_sandwich", proxy=proxy, data=payload) as response:
|
60 |
+
response.raise_for_status()
|
61 |
+
async for stream in response.content.iter_any():
|
62 |
+
if stream:
|
63 |
+
yield stream.decode()
|
g4f/g4f/Provider/DfeHub.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
import re
|
5 |
+
import time
|
6 |
+
|
7 |
+
import requests
|
8 |
+
|
9 |
+
from ..typing import Any, CreateResult
|
10 |
+
from .base_provider import BaseProvider
|
11 |
+
|
12 |
+
|
13 |
+
class DfeHub(BaseProvider):
|
14 |
+
url = "https://chat.dfehub.com/"
|
15 |
+
supports_stream = True
|
16 |
+
supports_gpt_35_turbo = True
|
17 |
+
|
18 |
+
@staticmethod
|
19 |
+
def create_completion(
|
20 |
+
model: str,
|
21 |
+
messages: list[dict[str, str]],
|
22 |
+
stream: bool, **kwargs: Any) -> CreateResult:
|
23 |
+
|
24 |
+
headers = {
|
25 |
+
"authority" : "chat.dfehub.com",
|
26 |
+
"accept" : "*/*",
|
27 |
+
"accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
28 |
+
"content-type" : "application/json",
|
29 |
+
"origin" : "https://chat.dfehub.com",
|
30 |
+
"referer" : "https://chat.dfehub.com/",
|
31 |
+
"sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
32 |
+
"sec-ch-ua-mobile" : "?0",
|
33 |
+
"sec-ch-ua-platform": '"macOS"',
|
34 |
+
"sec-fetch-dest" : "empty",
|
35 |
+
"sec-fetch-mode" : "cors",
|
36 |
+
"sec-fetch-site" : "same-origin",
|
37 |
+
"user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
38 |
+
"x-requested-with" : "XMLHttpRequest",
|
39 |
+
}
|
40 |
+
|
41 |
+
json_data = {
|
42 |
+
"messages" : messages,
|
43 |
+
"model" : "gpt-3.5-turbo",
|
44 |
+
"temperature" : kwargs.get("temperature", 0.5),
|
45 |
+
"presence_penalty" : kwargs.get("presence_penalty", 0),
|
46 |
+
"frequency_penalty" : kwargs.get("frequency_penalty", 0),
|
47 |
+
"top_p" : kwargs.get("top_p", 1),
|
48 |
+
"stream" : True
|
49 |
+
}
|
50 |
+
|
51 |
+
response = requests.post("https://chat.dfehub.com/api/openai/v1/chat/completions",
|
52 |
+
headers=headers, json=json_data, timeout=3)
|
53 |
+
|
54 |
+
for chunk in response.iter_lines():
|
55 |
+
if b"detail" in chunk:
|
56 |
+
delay = re.findall(r"\d+\.\d+", chunk.decode())
|
57 |
+
delay = float(delay[-1])
|
58 |
+
time.sleep(delay)
|
59 |
+
yield from DfeHub.create_completion(model, messages, stream, **kwargs)
|
60 |
+
if b"content" in chunk:
|
61 |
+
data = json.loads(chunk.decode().split("data: ")[1])
|
62 |
+
yield (data["choices"][0]["delta"]["content"])
|
63 |
+
|
64 |
+
@classmethod
|
65 |
+
@property
|
66 |
+
def params(cls):
|
67 |
+
params = [
|
68 |
+
("model", "str"),
|
69 |
+
("messages", "list[dict[str, str]]"),
|
70 |
+
("stream", "bool"),
|
71 |
+
("temperature", "float"),
|
72 |
+
("presence_penalty", "int"),
|
73 |
+
("frequency_penalty", "int"),
|
74 |
+
("top_p", "int"),
|
75 |
+
]
|
76 |
+
param = ", ".join([": ".join(p) for p in params])
|
77 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/EasyChat.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
import random
|
5 |
+
|
6 |
+
import requests
|
7 |
+
|
8 |
+
from ..typing import Any, CreateResult
|
9 |
+
from .base_provider import BaseProvider
|
10 |
+
|
11 |
+
|
12 |
+
class EasyChat(BaseProvider):
|
13 |
+
url: str = "https://free.easychat.work"
|
14 |
+
supports_stream = True
|
15 |
+
supports_gpt_35_turbo = True
|
16 |
+
working = False
|
17 |
+
|
18 |
+
@staticmethod
|
19 |
+
def create_completion(
|
20 |
+
model: str,
|
21 |
+
messages: list[dict[str, str]],
|
22 |
+
stream: bool, **kwargs: Any) -> CreateResult:
|
23 |
+
|
24 |
+
active_servers = [
|
25 |
+
"https://chat10.fastgpt.me",
|
26 |
+
"https://chat9.fastgpt.me",
|
27 |
+
"https://chat1.fastgpt.me",
|
28 |
+
"https://chat2.fastgpt.me",
|
29 |
+
"https://chat3.fastgpt.me",
|
30 |
+
"https://chat4.fastgpt.me",
|
31 |
+
"https://gxos1h1ddt.fastgpt.me"
|
32 |
+
]
|
33 |
+
|
34 |
+
server = active_servers[kwargs.get("active_server", random.randint(0, 5))]
|
35 |
+
headers = {
|
36 |
+
"authority" : f"{server}".replace("https://", ""),
|
37 |
+
"accept" : "text/event-stream",
|
38 |
+
"accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2",
|
39 |
+
"content-type" : "application/json",
|
40 |
+
"origin" : f"{server}",
|
41 |
+
"referer" : f"{server}/",
|
42 |
+
"x-requested-with" : "XMLHttpRequest",
|
43 |
+
'plugins' : '0',
|
44 |
+
'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
|
45 |
+
'sec-ch-ua-mobile' : '?0',
|
46 |
+
'sec-ch-ua-platform': '"Windows"',
|
47 |
+
'sec-fetch-dest' : 'empty',
|
48 |
+
'sec-fetch-mode' : 'cors',
|
49 |
+
'sec-fetch-site' : 'same-origin',
|
50 |
+
'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
|
51 |
+
'usesearch' : 'false',
|
52 |
+
'x-requested-with' : 'XMLHttpRequest'
|
53 |
+
}
|
54 |
+
|
55 |
+
json_data = {
|
56 |
+
"messages" : messages,
|
57 |
+
"stream" : stream,
|
58 |
+
"model" : model,
|
59 |
+
"temperature" : kwargs.get("temperature", 0.5),
|
60 |
+
"presence_penalty" : kwargs.get("presence_penalty", 0),
|
61 |
+
"frequency_penalty" : kwargs.get("frequency_penalty", 0),
|
62 |
+
"top_p" : kwargs.get("top_p", 1)
|
63 |
+
}
|
64 |
+
|
65 |
+
session = requests.Session()
|
66 |
+
# init cookies from server
|
67 |
+
session.get(f"{server}/")
|
68 |
+
|
69 |
+
response = session.post(f"{server}/api/openai/v1/chat/completions",
|
70 |
+
headers=headers, json=json_data, stream=stream)
|
71 |
+
|
72 |
+
if response.status_code == 200:
|
73 |
+
|
74 |
+
if stream == False:
|
75 |
+
json_data = response.json()
|
76 |
+
|
77 |
+
if "choices" in json_data:
|
78 |
+
yield json_data["choices"][0]["message"]["content"]
|
79 |
+
else:
|
80 |
+
raise Exception("No response from server")
|
81 |
+
|
82 |
+
else:
|
83 |
+
|
84 |
+
for chunk in response.iter_lines():
|
85 |
+
|
86 |
+
if b"content" in chunk:
|
87 |
+
splitData = chunk.decode().split("data:")
|
88 |
+
|
89 |
+
if len(splitData) > 1:
|
90 |
+
yield json.loads(splitData[1])["choices"][0]["delta"]["content"]
|
91 |
+
else:
|
92 |
+
continue
|
93 |
+
else:
|
94 |
+
raise Exception(f"Error {response.status_code} from server : {response.reason}")
|
95 |
+
|
96 |
+
|
97 |
+
@classmethod
|
98 |
+
@property
|
99 |
+
def params(cls):
|
100 |
+
params = [
|
101 |
+
("model", "str"),
|
102 |
+
("messages", "list[dict[str, str]]"),
|
103 |
+
("stream", "bool"),
|
104 |
+
("temperature", "float"),
|
105 |
+
("presence_penalty", "int"),
|
106 |
+
("frequency_penalty", "int"),
|
107 |
+
("top_p", "int"),
|
108 |
+
("active_server", "int"),
|
109 |
+
]
|
110 |
+
param = ", ".join([": ".join(p) for p in params])
|
111 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/Equing.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
from abc import ABC, abstractmethod
|
5 |
+
|
6 |
+
import requests
|
7 |
+
|
8 |
+
from ..typing import Any, CreateResult
|
9 |
+
from .base_provider import BaseProvider
|
10 |
+
|
11 |
+
|
12 |
+
class Equing(BaseProvider):
|
13 |
+
url: str = 'https://next.eqing.tech/'
|
14 |
+
working = False
|
15 |
+
supports_stream = True
|
16 |
+
supports_gpt_35_turbo = True
|
17 |
+
supports_gpt_4 = False
|
18 |
+
|
19 |
+
@staticmethod
|
20 |
+
@abstractmethod
|
21 |
+
def create_completion(
|
22 |
+
model: str,
|
23 |
+
messages: list[dict[str, str]],
|
24 |
+
stream: bool, **kwargs: Any) -> CreateResult:
|
25 |
+
|
26 |
+
headers = {
|
27 |
+
'authority' : 'next.eqing.tech',
|
28 |
+
'accept' : 'text/event-stream',
|
29 |
+
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
30 |
+
'cache-control' : 'no-cache',
|
31 |
+
'content-type' : 'application/json',
|
32 |
+
'origin' : 'https://next.eqing.tech',
|
33 |
+
'plugins' : '0',
|
34 |
+
'pragma' : 'no-cache',
|
35 |
+
'referer' : 'https://next.eqing.tech/',
|
36 |
+
'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
|
37 |
+
'sec-ch-ua-mobile' : '?0',
|
38 |
+
'sec-ch-ua-platform': '"macOS"',
|
39 |
+
'sec-fetch-dest' : 'empty',
|
40 |
+
'sec-fetch-mode' : 'cors',
|
41 |
+
'sec-fetch-site' : 'same-origin',
|
42 |
+
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
|
43 |
+
'usesearch' : 'false',
|
44 |
+
'x-requested-with' : 'XMLHttpRequest'
|
45 |
+
}
|
46 |
+
|
47 |
+
json_data = {
|
48 |
+
'messages' : messages,
|
49 |
+
'stream' : stream,
|
50 |
+
'model' : model,
|
51 |
+
'temperature' : kwargs.get('temperature', 0.5),
|
52 |
+
'presence_penalty' : kwargs.get('presence_penalty', 0),
|
53 |
+
'frequency_penalty' : kwargs.get('frequency_penalty', 0),
|
54 |
+
'top_p' : kwargs.get('top_p', 1),
|
55 |
+
}
|
56 |
+
|
57 |
+
response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions',
|
58 |
+
headers=headers, json=json_data, stream=stream)
|
59 |
+
|
60 |
+
if not stream:
|
61 |
+
yield response.json()["choices"][0]["message"]["content"]
|
62 |
+
return
|
63 |
+
|
64 |
+
for line in response.iter_content(chunk_size=1024):
|
65 |
+
if line:
|
66 |
+
if b'content' in line:
|
67 |
+
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
|
68 |
+
token = line_json['choices'][0]['delta'].get('content')
|
69 |
+
if token:
|
70 |
+
yield token
|
71 |
+
|
72 |
+
@classmethod
|
73 |
+
@property
|
74 |
+
def params(cls):
|
75 |
+
params = [
|
76 |
+
("model", "str"),
|
77 |
+
("messages", "list[dict[str, str]]"),
|
78 |
+
("stream", "bool"),
|
79 |
+
]
|
80 |
+
param = ", ".join([": ".join(p) for p in params])
|
81 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/FastGpt.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
import random
|
5 |
+
from abc import ABC, abstractmethod
|
6 |
+
|
7 |
+
import requests
|
8 |
+
|
9 |
+
from ..typing import Any, CreateResult
|
10 |
+
|
11 |
+
|
12 |
+
class FastGpt(ABC):
|
13 |
+
url: str = 'https://chat9.fastgpt.me/'
|
14 |
+
working = False
|
15 |
+
needs_auth = False
|
16 |
+
supports_stream = True
|
17 |
+
supports_gpt_35_turbo = True
|
18 |
+
supports_gpt_4 = False
|
19 |
+
|
20 |
+
@staticmethod
|
21 |
+
@abstractmethod
|
22 |
+
def create_completion(
|
23 |
+
model: str,
|
24 |
+
messages: list[dict[str, str]],
|
25 |
+
stream: bool, **kwargs: Any) -> CreateResult:
|
26 |
+
|
27 |
+
headers = {
|
28 |
+
'authority' : 'chat9.fastgpt.me',
|
29 |
+
'accept' : 'text/event-stream',
|
30 |
+
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
31 |
+
'cache-control' : 'no-cache',
|
32 |
+
'content-type' : 'application/json',
|
33 |
+
'origin' : 'https://chat9.fastgpt.me',
|
34 |
+
'plugins' : '0',
|
35 |
+
'pragma' : 'no-cache',
|
36 |
+
'referer' : 'https://chat9.fastgpt.me/',
|
37 |
+
'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
|
38 |
+
'sec-ch-ua-mobile' : '?0',
|
39 |
+
'sec-ch-ua-platform': '"macOS"',
|
40 |
+
'sec-fetch-dest' : 'empty',
|
41 |
+
'sec-fetch-mode' : 'cors',
|
42 |
+
'sec-fetch-site' : 'same-origin',
|
43 |
+
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
|
44 |
+
'usesearch' : 'false',
|
45 |
+
'x-requested-with' : 'XMLHttpRequest',
|
46 |
+
}
|
47 |
+
|
48 |
+
json_data = {
|
49 |
+
'messages' : messages,
|
50 |
+
'stream' : stream,
|
51 |
+
'model' : model,
|
52 |
+
'temperature' : kwargs.get('temperature', 0.5),
|
53 |
+
'presence_penalty' : kwargs.get('presence_penalty', 0),
|
54 |
+
'frequency_penalty' : kwargs.get('frequency_penalty', 0),
|
55 |
+
'top_p' : kwargs.get('top_p', 1),
|
56 |
+
}
|
57 |
+
|
58 |
+
subdomain = random.choice([
|
59 |
+
'jdaen979ew',
|
60 |
+
'chat9'
|
61 |
+
])
|
62 |
+
|
63 |
+
response = requests.post(f'https://{subdomain}.fastgpt.me/api/openai/v1/chat/completions',
|
64 |
+
headers=headers, json=json_data, stream=stream)
|
65 |
+
|
66 |
+
for line in response.iter_lines():
|
67 |
+
if line:
|
68 |
+
try:
|
69 |
+
if b'content' in line:
|
70 |
+
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
|
71 |
+
token = line_json['choices'][0]['delta'].get('content')
|
72 |
+
if token:
|
73 |
+
yield token
|
74 |
+
except:
|
75 |
+
continue
|
76 |
+
|
77 |
+
@classmethod
|
78 |
+
@property
|
79 |
+
def params(cls):
|
80 |
+
params = [
|
81 |
+
("model", "str"),
|
82 |
+
("messages", "list[dict[str, str]]"),
|
83 |
+
("stream", "bool"),
|
84 |
+
]
|
85 |
+
param = ", ".join([": ".join(p) for p in params])
|
86 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/Forefront.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
|
5 |
+
import requests
|
6 |
+
|
7 |
+
from ..typing import Any, CreateResult
|
8 |
+
from .base_provider import BaseProvider
|
9 |
+
|
10 |
+
|
11 |
+
class Forefront(BaseProvider):
|
12 |
+
url = "https://forefront.com"
|
13 |
+
supports_stream = True
|
14 |
+
supports_gpt_35_turbo = True
|
15 |
+
|
16 |
+
@staticmethod
|
17 |
+
def create_completion(
|
18 |
+
model: str,
|
19 |
+
messages: list[dict[str, str]],
|
20 |
+
stream: bool, **kwargs: Any) -> CreateResult:
|
21 |
+
|
22 |
+
json_data = {
|
23 |
+
"text" : messages[-1]["content"],
|
24 |
+
"action" : "noauth",
|
25 |
+
"id" : "",
|
26 |
+
"parentId" : "",
|
27 |
+
"workspaceId" : "",
|
28 |
+
"messagePersona": "607e41fe-95be-497e-8e97-010a59b2e2c0",
|
29 |
+
"model" : "gpt-4",
|
30 |
+
"messages" : messages[:-1] if len(messages) > 1 else [],
|
31 |
+
"internetMode" : "auto",
|
32 |
+
}
|
33 |
+
|
34 |
+
response = requests.post("https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat",
|
35 |
+
json=json_data, stream=True)
|
36 |
+
|
37 |
+
response.raise_for_status()
|
38 |
+
for token in response.iter_lines():
|
39 |
+
if b"delta" in token:
|
40 |
+
yield json.loads(token.decode().split("data: ")[1])["delta"]
|
g4f/g4f/Provider/GetGpt.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
import os
|
5 |
+
import uuid
|
6 |
+
|
7 |
+
import requests
|
8 |
+
from Crypto.Cipher import AES
|
9 |
+
|
10 |
+
from ..typing import Any, CreateResult
|
11 |
+
from .base_provider import BaseProvider
|
12 |
+
|
13 |
+
|
14 |
+
class GetGpt(BaseProvider):
|
15 |
+
url = 'https://chat.getgpt.world/'
|
16 |
+
supports_stream = True
|
17 |
+
working = False
|
18 |
+
supports_gpt_35_turbo = True
|
19 |
+
|
20 |
+
@staticmethod
|
21 |
+
def create_completion(
|
22 |
+
model: str,
|
23 |
+
messages: list[dict[str, str]],
|
24 |
+
stream: bool, **kwargs: Any) -> CreateResult:
|
25 |
+
|
26 |
+
headers = {
|
27 |
+
'Content-Type' : 'application/json',
|
28 |
+
'Referer' : 'https://chat.getgpt.world/',
|
29 |
+
'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
|
30 |
+
}
|
31 |
+
|
32 |
+
data = json.dumps(
|
33 |
+
{
|
34 |
+
'messages' : messages,
|
35 |
+
'frequency_penalty' : kwargs.get('frequency_penalty', 0),
|
36 |
+
'max_tokens' : kwargs.get('max_tokens', 4000),
|
37 |
+
'model' : 'gpt-3.5-turbo',
|
38 |
+
'presence_penalty' : kwargs.get('presence_penalty', 0),
|
39 |
+
'temperature' : kwargs.get('temperature', 1),
|
40 |
+
'top_p' : kwargs.get('top_p', 1),
|
41 |
+
'stream' : True,
|
42 |
+
'uuid' : str(uuid.uuid4())
|
43 |
+
}
|
44 |
+
)
|
45 |
+
|
46 |
+
res = requests.post('https://chat.getgpt.world/api/chat/stream',
|
47 |
+
headers=headers, json={'signature': _encrypt(data)}, stream=True)
|
48 |
+
|
49 |
+
res.raise_for_status()
|
50 |
+
for line in res.iter_lines():
|
51 |
+
if b'content' in line:
|
52 |
+
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
|
53 |
+
yield (line_json['choices'][0]['delta']['content'])
|
54 |
+
|
55 |
+
@classmethod
|
56 |
+
@property
|
57 |
+
def params(cls):
|
58 |
+
params = [
|
59 |
+
('model', 'str'),
|
60 |
+
('messages', 'list[dict[str, str]]'),
|
61 |
+
('stream', 'bool'),
|
62 |
+
('temperature', 'float'),
|
63 |
+
('presence_penalty', 'int'),
|
64 |
+
('frequency_penalty', 'int'),
|
65 |
+
('top_p', 'int'),
|
66 |
+
('max_tokens', 'int'),
|
67 |
+
]
|
68 |
+
param = ', '.join([': '.join(p) for p in params])
|
69 |
+
return f'g4f.provider.{cls.__name__} supports: ({param})'
|
70 |
+
|
71 |
+
|
72 |
+
def _encrypt(e: str):
|
73 |
+
t = os.urandom(8).hex().encode('utf-8')
|
74 |
+
n = os.urandom(8).hex().encode('utf-8')
|
75 |
+
r = e.encode('utf-8')
|
76 |
+
|
77 |
+
cipher = AES.new(t, AES.MODE_CBC, n)
|
78 |
+
ciphertext = cipher.encrypt(_pad_data(r))
|
79 |
+
|
80 |
+
return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8')
|
81 |
+
|
82 |
+
|
83 |
+
def _pad_data(data: bytes) -> bytes:
|
84 |
+
block_size = AES.block_size
|
85 |
+
padding_size = block_size - len(data) % block_size
|
86 |
+
padding = bytes([padding_size] * padding_size)
|
87 |
+
|
88 |
+
return data + padding
|
g4f/g4f/Provider/GptGo.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from aiohttp import ClientSession
|
4 |
+
import json
|
5 |
+
|
6 |
+
from ..typing import AsyncGenerator
|
7 |
+
from .base_provider import AsyncGeneratorProvider, format_prompt
|
8 |
+
|
9 |
+
|
10 |
+
class GptGo(AsyncGeneratorProvider):
|
11 |
+
url = "https://gptgo.ai"
|
12 |
+
supports_gpt_35_turbo = True
|
13 |
+
working = True
|
14 |
+
|
15 |
+
@classmethod
|
16 |
+
async def create_async_generator(
|
17 |
+
cls,
|
18 |
+
model: str,
|
19 |
+
messages: list[dict[str, str]],
|
20 |
+
proxy: str = None,
|
21 |
+
**kwargs
|
22 |
+
) -> AsyncGenerator:
|
23 |
+
headers = {
|
24 |
+
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
|
25 |
+
"Accept" : "*/*",
|
26 |
+
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
27 |
+
"Origin" : cls.url,
|
28 |
+
"Referer" : cls.url + "/",
|
29 |
+
"Sec-Fetch-Dest" : "empty",
|
30 |
+
"Sec-Fetch-Mode" : "cors",
|
31 |
+
"Sec-Fetch-Site" : "same-origin",
|
32 |
+
}
|
33 |
+
async with ClientSession(
|
34 |
+
headers=headers
|
35 |
+
) as session:
|
36 |
+
async with session.get(
|
37 |
+
"https://gptgo.ai/action_get_token.php",
|
38 |
+
params={
|
39 |
+
"q": format_prompt(messages),
|
40 |
+
"hlgpt": "default",
|
41 |
+
"hl": "en"
|
42 |
+
},
|
43 |
+
proxy=proxy
|
44 |
+
) as response:
|
45 |
+
response.raise_for_status()
|
46 |
+
token = (await response.json(content_type=None))["token"]
|
47 |
+
|
48 |
+
async with session.get(
|
49 |
+
"https://gptgo.ai/action_ai_gpt.php",
|
50 |
+
params={
|
51 |
+
"token": token,
|
52 |
+
},
|
53 |
+
proxy=proxy
|
54 |
+
) as response:
|
55 |
+
response.raise_for_status()
|
56 |
+
start = "data: "
|
57 |
+
async for line in response.content:
|
58 |
+
line = line.decode()
|
59 |
+
if line.startswith("data: "):
|
60 |
+
if line.startswith("data: [DONE]"):
|
61 |
+
break
|
62 |
+
line = json.loads(line[len(start):-1])
|
63 |
+
content = line["choices"][0]["delta"].get("content")
|
64 |
+
if content:
|
65 |
+
yield content
|
66 |
+
|
67 |
+
|
68 |
+
@classmethod
|
69 |
+
@property
|
70 |
+
def params(cls):
|
71 |
+
params = [
|
72 |
+
("model", "str"),
|
73 |
+
("messages", "list[dict[str, str]]"),
|
74 |
+
("stream", "bool"),
|
75 |
+
("temperature", "float"),
|
76 |
+
]
|
77 |
+
param = ", ".join([": ".join(p) for p in params])
|
78 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/H2o.py
ADDED
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
import uuid
|
5 |
+
|
6 |
+
from aiohttp import ClientSession
|
7 |
+
|
8 |
+
from ..typing import AsyncGenerator
|
9 |
+
from .base_provider import AsyncGeneratorProvider, format_prompt
|
10 |
+
|
11 |
+
|
12 |
+
class H2o(AsyncGeneratorProvider):
|
13 |
+
url = "https://gpt-gm.h2o.ai"
|
14 |
+
working = True
|
15 |
+
model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"
|
16 |
+
|
17 |
+
@classmethod
|
18 |
+
async def create_async_generator(
|
19 |
+
cls,
|
20 |
+
model: str,
|
21 |
+
messages: list[dict[str, str]],
|
22 |
+
proxy: str = None,
|
23 |
+
**kwargs
|
24 |
+
) -> AsyncGenerator:
|
25 |
+
model = model if model else cls.model
|
26 |
+
headers = {"Referer": cls.url + "/"}
|
27 |
+
|
28 |
+
async with ClientSession(
|
29 |
+
headers=headers
|
30 |
+
) as session:
|
31 |
+
data = {
|
32 |
+
"ethicsModalAccepted": "true",
|
33 |
+
"shareConversationsWithModelAuthors": "true",
|
34 |
+
"ethicsModalAcceptedAt": "",
|
35 |
+
"activeModel": model,
|
36 |
+
"searchEnabled": "true",
|
37 |
+
}
|
38 |
+
async with session.post(
|
39 |
+
f"{cls.url}/settings",
|
40 |
+
proxy=proxy,
|
41 |
+
data=data
|
42 |
+
) as response:
|
43 |
+
response.raise_for_status()
|
44 |
+
|
45 |
+
async with session.post(
|
46 |
+
f"{cls.url}/conversation",
|
47 |
+
proxy=proxy,
|
48 |
+
json={"model": model},
|
49 |
+
) as response:
|
50 |
+
response.raise_for_status()
|
51 |
+
conversationId = (await response.json())["conversationId"]
|
52 |
+
|
53 |
+
data = {
|
54 |
+
"inputs": format_prompt(messages),
|
55 |
+
"parameters": {
|
56 |
+
"temperature": 0.4,
|
57 |
+
"truncate": 2048,
|
58 |
+
"max_new_tokens": 1024,
|
59 |
+
"do_sample": True,
|
60 |
+
"repetition_penalty": 1.2,
|
61 |
+
"return_full_text": False,
|
62 |
+
**kwargs
|
63 |
+
},
|
64 |
+
"stream": True,
|
65 |
+
"options": {
|
66 |
+
"id": str(uuid.uuid4()),
|
67 |
+
"response_id": str(uuid.uuid4()),
|
68 |
+
"is_retry": False,
|
69 |
+
"use_cache": False,
|
70 |
+
"web_search_id": "",
|
71 |
+
},
|
72 |
+
}
|
73 |
+
async with session.post(
|
74 |
+
f"{cls.url}/conversation/{conversationId}",
|
75 |
+
proxy=proxy,
|
76 |
+
json=data
|
77 |
+
) as response:
|
78 |
+
start = "data:"
|
79 |
+
async for line in response.content:
|
80 |
+
line = line.decode("utf-8")
|
81 |
+
if line and line.startswith(start):
|
82 |
+
line = json.loads(line[len(start):-1])
|
83 |
+
if not line["token"]["special"]:
|
84 |
+
yield line["token"]["text"]
|
85 |
+
|
86 |
+
async with session.delete(
|
87 |
+
f"{cls.url}/conversation/{conversationId}",
|
88 |
+
proxy=proxy,
|
89 |
+
json=data
|
90 |
+
) as response:
|
91 |
+
response.raise_for_status()
|
92 |
+
|
93 |
+
|
94 |
+
@classmethod
|
95 |
+
@property
|
96 |
+
def params(cls):
|
97 |
+
params = [
|
98 |
+
("model", "str"),
|
99 |
+
("messages", "list[dict[str, str]]"),
|
100 |
+
("stream", "bool"),
|
101 |
+
("temperature", "float"),
|
102 |
+
("truncate", "int"),
|
103 |
+
("max_new_tokens", "int"),
|
104 |
+
("do_sample", "bool"),
|
105 |
+
("repetition_penalty", "float"),
|
106 |
+
("return_full_text", "bool"),
|
107 |
+
]
|
108 |
+
param = ", ".join([": ".join(p) for p in params])
|
109 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/HuggingChat.py
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
|
5 |
+
from aiohttp import ClientSession
|
6 |
+
|
7 |
+
from ..typing import AsyncGenerator
|
8 |
+
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
|
9 |
+
|
10 |
+
|
11 |
+
class HuggingChat(AsyncGeneratorProvider):
|
12 |
+
url = "https://huggingface.co/chat"
|
13 |
+
needs_auth = True
|
14 |
+
working = True
|
15 |
+
model = "OpenAssistant/oasst-sft-6-llama-30b-xor"
|
16 |
+
|
17 |
+
@classmethod
|
18 |
+
async def create_async_generator(
|
19 |
+
cls,
|
20 |
+
model: str,
|
21 |
+
messages: list[dict[str, str]],
|
22 |
+
stream: bool = True,
|
23 |
+
proxy: str = None,
|
24 |
+
cookies: dict = None,
|
25 |
+
**kwargs
|
26 |
+
) -> AsyncGenerator:
|
27 |
+
model = model if model else cls.model
|
28 |
+
if proxy and "://" not in proxy:
|
29 |
+
proxy = f"http://{proxy}"
|
30 |
+
if not cookies:
|
31 |
+
cookies = get_cookies(".huggingface.co")
|
32 |
+
|
33 |
+
headers = {
|
34 |
+
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
35 |
+
}
|
36 |
+
async with ClientSession(
|
37 |
+
cookies=cookies,
|
38 |
+
headers=headers
|
39 |
+
) as session:
|
40 |
+
async with session.post(f"{cls.url}/conversation", proxy=proxy, json={"model": model}) as response:
|
41 |
+
conversation_id = (await response.json())["conversationId"]
|
42 |
+
|
43 |
+
send = {
|
44 |
+
"inputs": format_prompt(messages),
|
45 |
+
"parameters": {
|
46 |
+
"temperature": 0.2,
|
47 |
+
"truncate": 1000,
|
48 |
+
"max_new_tokens": 1024,
|
49 |
+
"stop": ["</s>"],
|
50 |
+
"top_p": 0.95,
|
51 |
+
"repetition_penalty": 1.2,
|
52 |
+
"top_k": 50,
|
53 |
+
"return_full_text": False,
|
54 |
+
**kwargs
|
55 |
+
},
|
56 |
+
"stream": stream,
|
57 |
+
"options": {
|
58 |
+
"id": "9e9b8bc4-6604-40c6-994e-8eb78fa32e37",
|
59 |
+
"response_id": "04ce2602-3bea-45e8-8efc-cef00680376a",
|
60 |
+
"is_retry": False,
|
61 |
+
"use_cache": False,
|
62 |
+
"web_search_id": ""
|
63 |
+
}
|
64 |
+
}
|
65 |
+
async with session.post(f"{cls.url}/conversation/{conversation_id}", proxy=proxy, json=send) as response:
|
66 |
+
if not stream:
|
67 |
+
data = await response.json()
|
68 |
+
if "error" in data:
|
69 |
+
raise RuntimeError(data["error"])
|
70 |
+
elif isinstance(data, list):
|
71 |
+
yield data[0]["generated_text"].strip()
|
72 |
+
else:
|
73 |
+
raise RuntimeError(f"Response: {data}")
|
74 |
+
else:
|
75 |
+
start = "data:"
|
76 |
+
first = True
|
77 |
+
async for line in response.content:
|
78 |
+
line = line.decode("utf-8")
|
79 |
+
if line.startswith(start):
|
80 |
+
line = json.loads(line[len(start):-1])
|
81 |
+
if "token" not in line:
|
82 |
+
raise RuntimeError(f"Response: {line}")
|
83 |
+
if not line["token"]["special"]:
|
84 |
+
if first:
|
85 |
+
yield line["token"]["text"].lstrip()
|
86 |
+
first = False
|
87 |
+
else:
|
88 |
+
yield line["token"]["text"]
|
89 |
+
|
90 |
+
async with session.delete(f"{cls.url}/conversation/{conversation_id}", proxy=proxy) as response:
|
91 |
+
response.raise_for_status()
|
92 |
+
|
93 |
+
|
94 |
+
@classmethod
|
95 |
+
@property
|
96 |
+
def params(cls):
|
97 |
+
params = [
|
98 |
+
("model", "str"),
|
99 |
+
("messages", "list[dict[str, str]]"),
|
100 |
+
("stream", "bool"),
|
101 |
+
("proxy", "str"),
|
102 |
+
]
|
103 |
+
param = ", ".join([": ".join(p) for p in params])
|
104 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/Liaobots.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
import uuid
|
5 |
+
|
6 |
+
from aiohttp import ClientSession
|
7 |
+
|
8 |
+
from ..typing import AsyncGenerator
|
9 |
+
from .base_provider import AsyncGeneratorProvider
|
10 |
+
|
11 |
+
models = {
|
12 |
+
"gpt-4": {
|
13 |
+
"id": "gpt-4",
|
14 |
+
"name": "GPT-4",
|
15 |
+
"maxLength": 24000,
|
16 |
+
"tokenLimit": 8000,
|
17 |
+
},
|
18 |
+
"gpt-3.5-turbo": {
|
19 |
+
"id": "gpt-3.5-turbo",
|
20 |
+
"name": "GPT-3.5",
|
21 |
+
"maxLength": 12000,
|
22 |
+
"tokenLimit": 4000,
|
23 |
+
},
|
24 |
+
"gpt-3.5-turbo-16k": {
|
25 |
+
"id": "gpt-3.5-turbo-16k",
|
26 |
+
"name": "GPT-3.5-16k",
|
27 |
+
"maxLength": 48000,
|
28 |
+
"tokenLimit": 16000,
|
29 |
+
},
|
30 |
+
}
|
31 |
+
|
32 |
+
class Liaobots(AsyncGeneratorProvider):
|
33 |
+
url = "https://liaobots.com"
|
34 |
+
working = False
|
35 |
+
supports_gpt_35_turbo = True
|
36 |
+
supports_gpt_4 = True
|
37 |
+
_auth_code = None
|
38 |
+
|
39 |
+
@classmethod
|
40 |
+
async def create_async_generator(
|
41 |
+
cls,
|
42 |
+
model: str,
|
43 |
+
messages: list[dict[str, str]],
|
44 |
+
auth: str = None,
|
45 |
+
proxy: str = None,
|
46 |
+
**kwargs
|
47 |
+
) -> AsyncGenerator:
|
48 |
+
model = model if model in models else "gpt-3.5-turbo"
|
49 |
+
headers = {
|
50 |
+
"authority": "liaobots.com",
|
51 |
+
"content-type": "application/json",
|
52 |
+
"origin": cls.url,
|
53 |
+
"referer": cls.url + "/",
|
54 |
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
|
55 |
+
}
|
56 |
+
async with ClientSession(
|
57 |
+
headers=headers
|
58 |
+
) as session:
|
59 |
+
auth_code = auth if isinstance(auth, str) else cls._auth_code
|
60 |
+
if not auth_code:
|
61 |
+
async with session.post(cls.url + "/api/user", proxy=proxy, json={"authcode": ""}) as response:
|
62 |
+
response.raise_for_status()
|
63 |
+
auth_code = cls._auth_code = json.loads(await response.text())["authCode"]
|
64 |
+
data = {
|
65 |
+
"conversationId": str(uuid.uuid4()),
|
66 |
+
"model": models[model],
|
67 |
+
"messages": messages,
|
68 |
+
"key": "",
|
69 |
+
"prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
|
70 |
+
}
|
71 |
+
async with session.post(cls.url + "/api/chat", proxy=proxy, json=data, headers={"x-auth-code": auth_code}) as response:
|
72 |
+
response.raise_for_status()
|
73 |
+
async for stream in response.content.iter_any():
|
74 |
+
if stream:
|
75 |
+
yield stream.decode()
|
76 |
+
|
77 |
+
|
78 |
+
@classmethod
|
79 |
+
@property
|
80 |
+
def params(cls):
|
81 |
+
params = [
|
82 |
+
("model", "str"),
|
83 |
+
("messages", "list[dict[str, str]]"),
|
84 |
+
("stream", "bool"),
|
85 |
+
("proxy", "str"),
|
86 |
+
("auth", "str"),
|
87 |
+
]
|
88 |
+
param = ", ".join([": ".join(p) for p in params])
|
89 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/Lockchat.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
|
5 |
+
import requests
|
6 |
+
|
7 |
+
from ..typing import Any, CreateResult
|
8 |
+
from .base_provider import BaseProvider
|
9 |
+
|
10 |
+
|
11 |
+
class Lockchat(BaseProvider):
|
12 |
+
url: str = "http://supertest.lockchat.app"
|
13 |
+
supports_stream = True
|
14 |
+
supports_gpt_35_turbo = True
|
15 |
+
supports_gpt_4 = True
|
16 |
+
|
17 |
+
@staticmethod
|
18 |
+
def create_completion(
|
19 |
+
model: str,
|
20 |
+
messages: list[dict[str, str]],
|
21 |
+
stream: bool, **kwargs: Any) -> CreateResult:
|
22 |
+
|
23 |
+
temperature = float(kwargs.get("temperature", 0.7))
|
24 |
+
payload = {
|
25 |
+
"temperature": temperature,
|
26 |
+
"messages" : messages,
|
27 |
+
"model" : model,
|
28 |
+
"stream" : True,
|
29 |
+
}
|
30 |
+
|
31 |
+
headers = {
|
32 |
+
"user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0",
|
33 |
+
}
|
34 |
+
response = requests.post("http://supertest.lockchat.app/v1/chat/completions",
|
35 |
+
json=payload, headers=headers, stream=True)
|
36 |
+
|
37 |
+
response.raise_for_status()
|
38 |
+
for token in response.iter_lines():
|
39 |
+
if b"The model: `gpt-4` does not exist" in token:
|
40 |
+
print("error, retrying...")
|
41 |
+
Lockchat.create_completion(
|
42 |
+
model = model,
|
43 |
+
messages = messages,
|
44 |
+
stream = stream,
|
45 |
+
temperature = temperature,
|
46 |
+
**kwargs)
|
47 |
+
|
48 |
+
if b"content" in token:
|
49 |
+
token = json.loads(token.decode("utf-8").split("data: ")[1])
|
50 |
+
token = token["choices"][0]["delta"].get("content")
|
51 |
+
if token:
|
52 |
+
yield (token)
|
53 |
+
|
54 |
+
@classmethod
|
55 |
+
@property
|
56 |
+
def params(cls):
|
57 |
+
params = [
|
58 |
+
("model", "str"),
|
59 |
+
("messages", "list[dict[str, str]]"),
|
60 |
+
("stream", "bool"),
|
61 |
+
("temperature", "float"),
|
62 |
+
]
|
63 |
+
param = ", ".join([": ".join(p) for p in params])
|
64 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/Myshell.py
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json, uuid, hashlib, time, random
|
4 |
+
|
5 |
+
from aiohttp import ClientSession
|
6 |
+
from aiohttp.http import WSMsgType
|
7 |
+
import asyncio
|
8 |
+
|
9 |
+
from ..typing import AsyncGenerator
|
10 |
+
from .base_provider import AsyncGeneratorProvider, format_prompt
|
11 |
+
|
12 |
+
|
13 |
+
models = {
|
14 |
+
"samantha": "1e3be7fe89e94a809408b1154a2ee3e1",
|
15 |
+
"gpt-3.5-turbo": "8077335db7cd47e29f7de486612cc7fd",
|
16 |
+
"gpt-4": "01c8de4fbfc548df903712b0922a4e01",
|
17 |
+
}
|
18 |
+
|
19 |
+
|
20 |
+
class Myshell(AsyncGeneratorProvider):
|
21 |
+
url = "https://app.myshell.ai/chat"
|
22 |
+
working = True
|
23 |
+
supports_gpt_35_turbo = True
|
24 |
+
supports_gpt_4 = True
|
25 |
+
|
26 |
+
@classmethod
|
27 |
+
async def create_async_generator(
|
28 |
+
cls,
|
29 |
+
model: str,
|
30 |
+
messages: list[dict[str, str]],
|
31 |
+
**kwargs
|
32 |
+
) -> AsyncGenerator:
|
33 |
+
if not model:
|
34 |
+
bot_id = models["samantha"]
|
35 |
+
elif model in models:
|
36 |
+
bot_id = models[model]
|
37 |
+
else:
|
38 |
+
raise ValueError(f"Model are not supported: {model}")
|
39 |
+
|
40 |
+
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36'
|
41 |
+
visitor_id = generate_visitor_id(user_agent)
|
42 |
+
|
43 |
+
async with ClientSession(
|
44 |
+
headers={'User-Agent': user_agent}
|
45 |
+
) as session:
|
46 |
+
async with session.ws_connect(
|
47 |
+
"wss://api.myshell.ai/ws/?EIO=4&transport=websocket",
|
48 |
+
autoping=False,
|
49 |
+
timeout=90
|
50 |
+
) as wss:
|
51 |
+
# Send and receive hello message
|
52 |
+
await wss.receive_str()
|
53 |
+
message = json.dumps({"token": None, "visitorId": visitor_id})
|
54 |
+
await wss.send_str(f"40/chat,{message}")
|
55 |
+
await wss.receive_str()
|
56 |
+
|
57 |
+
# Fix "need_verify_captcha" issue
|
58 |
+
await asyncio.sleep(5)
|
59 |
+
|
60 |
+
# Create chat message
|
61 |
+
text = format_prompt(messages)
|
62 |
+
chat_data = json.dumps(["text_chat",{
|
63 |
+
"reqId": str(uuid.uuid4()),
|
64 |
+
"botUid": bot_id,
|
65 |
+
"sourceFrom": "myshellWebsite",
|
66 |
+
"text": text,
|
67 |
+
**generate_signature(text)
|
68 |
+
}])
|
69 |
+
|
70 |
+
# Send chat message
|
71 |
+
chat_start = "42/chat,"
|
72 |
+
chat_message = f"{chat_start}{chat_data}"
|
73 |
+
await wss.send_str(chat_message)
|
74 |
+
|
75 |
+
# Receive messages
|
76 |
+
async for message in wss:
|
77 |
+
if message.type != WSMsgType.TEXT:
|
78 |
+
continue
|
79 |
+
# Ping back
|
80 |
+
if message.data == "2":
|
81 |
+
await wss.send_str("3")
|
82 |
+
continue
|
83 |
+
# Is not chat message
|
84 |
+
if not message.data.startswith(chat_start):
|
85 |
+
continue
|
86 |
+
data_type, data = json.loads(message.data[len(chat_start):])
|
87 |
+
if data_type == "text_stream":
|
88 |
+
if data["data"]["text"]:
|
89 |
+
yield data["data"]["text"]
|
90 |
+
elif data["data"]["isFinal"]:
|
91 |
+
break
|
92 |
+
elif data_type in ("message_replied", "need_verify_captcha"):
|
93 |
+
raise RuntimeError(f"Received unexpected message: {data_type}")
|
94 |
+
|
95 |
+
|
96 |
+
@classmethod
|
97 |
+
@property
|
98 |
+
def params(cls):
|
99 |
+
params = [
|
100 |
+
("model", "str"),
|
101 |
+
("messages", "list[dict[str, str]]"),
|
102 |
+
("stream", "bool"),
|
103 |
+
]
|
104 |
+
param = ", ".join([": ".join(p) for p in params])
|
105 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
106 |
+
|
107 |
+
|
108 |
+
def generate_timestamp() -> str:
|
109 |
+
return str(
|
110 |
+
int(
|
111 |
+
str(int(time.time() * 1000))[:-1]
|
112 |
+
+ str(
|
113 |
+
sum(
|
114 |
+
2 * int(digit)
|
115 |
+
if idx % 2 == 0
|
116 |
+
else 3 * int(digit)
|
117 |
+
for idx, digit in enumerate(str(int(time.time() * 1000))[:-1])
|
118 |
+
)
|
119 |
+
% 10
|
120 |
+
)
|
121 |
+
)
|
122 |
+
)
|
123 |
+
|
124 |
+
def generate_signature(text: str):
|
125 |
+
timestamp = generate_timestamp()
|
126 |
+
version = 'v1.0.0'
|
127 |
+
secret = '8@VXGK3kKHr!u2gA'
|
128 |
+
data = f"{version}#{text}#{timestamp}#{secret}"
|
129 |
+
signature = hashlib.md5(data.encode()).hexdigest()
|
130 |
+
signature = signature[::-1]
|
131 |
+
return {
|
132 |
+
"signature": signature,
|
133 |
+
"timestamp": timestamp,
|
134 |
+
"version": version
|
135 |
+
}
|
136 |
+
|
137 |
+
def xor_hash(B: str):
|
138 |
+
r = []
|
139 |
+
i = 0
|
140 |
+
|
141 |
+
def o(e, t):
|
142 |
+
o_val = 0
|
143 |
+
for i in range(len(t)):
|
144 |
+
o_val |= r[i] << (8 * i)
|
145 |
+
return e ^ o_val
|
146 |
+
|
147 |
+
for e in range(len(B)):
|
148 |
+
t = ord(B[e])
|
149 |
+
r.insert(0, 255 & t)
|
150 |
+
|
151 |
+
if len(r) >= 4:
|
152 |
+
i = o(i, r)
|
153 |
+
r = []
|
154 |
+
|
155 |
+
if len(r) > 0:
|
156 |
+
i = o(i, r)
|
157 |
+
|
158 |
+
return hex(i)[2:]
|
159 |
+
|
160 |
+
def performance() -> str:
|
161 |
+
t = int(time.time() * 1000)
|
162 |
+
e = 0
|
163 |
+
while t == int(time.time() * 1000):
|
164 |
+
e += 1
|
165 |
+
return hex(t)[2:] + hex(e)[2:]
|
166 |
+
|
167 |
+
def generate_visitor_id(user_agent: str) -> str:
|
168 |
+
f = performance()
|
169 |
+
r = hex(int(random.random() * (16**16)))[2:-2]
|
170 |
+
d = xor_hash(user_agent)
|
171 |
+
e = hex(1080 * 1920)[2:]
|
172 |
+
return f"{f}-{r}-{d}-{e}-{f}"
|
g4f/g4f/Provider/Opchatgpts.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from .ChatgptLogin import ChatgptLogin
|
4 |
+
|
5 |
+
|
6 |
+
class Opchatgpts(ChatgptLogin):
|
7 |
+
url = "https://opchatgpts.net"
|
8 |
+
working = True
|
g4f/g4f/Provider/OpenAssistant.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
|
5 |
+
from aiohttp import ClientSession
|
6 |
+
|
7 |
+
from ..typing import Any, AsyncGenerator
|
8 |
+
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
|
9 |
+
|
10 |
+
|
11 |
+
class OpenAssistant(AsyncGeneratorProvider):
|
12 |
+
url = "https://open-assistant.io/chat"
|
13 |
+
needs_auth = True
|
14 |
+
working = True
|
15 |
+
model = "OA_SFT_Llama_30B_6"
|
16 |
+
|
17 |
+
@classmethod
|
18 |
+
async def create_async_generator(
|
19 |
+
cls,
|
20 |
+
model: str,
|
21 |
+
messages: list[dict[str, str]],
|
22 |
+
proxy: str = None,
|
23 |
+
cookies: dict = None,
|
24 |
+
**kwargs: Any
|
25 |
+
) -> AsyncGenerator:
|
26 |
+
if not cookies:
|
27 |
+
cookies = get_cookies("open-assistant.io")
|
28 |
+
|
29 |
+
headers = {
|
30 |
+
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
31 |
+
}
|
32 |
+
async with ClientSession(
|
33 |
+
cookies=cookies,
|
34 |
+
headers=headers
|
35 |
+
) as session:
|
36 |
+
async with session.post("https://open-assistant.io/api/chat", proxy=proxy) as response:
|
37 |
+
chat_id = (await response.json())["id"]
|
38 |
+
|
39 |
+
data = {
|
40 |
+
"chat_id": chat_id,
|
41 |
+
"content": f"<s>[INST]\n{format_prompt(messages)}\n[/INST]",
|
42 |
+
"parent_id": None
|
43 |
+
}
|
44 |
+
async with session.post("https://open-assistant.io/api/chat/prompter_message", proxy=proxy, json=data) as response:
|
45 |
+
parent_id = (await response.json())["id"]
|
46 |
+
|
47 |
+
data = {
|
48 |
+
"chat_id": chat_id,
|
49 |
+
"parent_id": parent_id,
|
50 |
+
"model_config_name": model if model else cls.model,
|
51 |
+
"sampling_parameters":{
|
52 |
+
"top_k": 50,
|
53 |
+
"top_p": None,
|
54 |
+
"typical_p": None,
|
55 |
+
"temperature": 0.35,
|
56 |
+
"repetition_penalty": 1.1111111111111112,
|
57 |
+
"max_new_tokens": 1024,
|
58 |
+
**kwargs
|
59 |
+
},
|
60 |
+
"plugins":[]
|
61 |
+
}
|
62 |
+
async with session.post("https://open-assistant.io/api/chat/assistant_message", proxy=proxy, json=data) as response:
|
63 |
+
data = await response.json()
|
64 |
+
if "id" in data:
|
65 |
+
message_id = data["id"]
|
66 |
+
elif "message" in data:
|
67 |
+
raise RuntimeError(data["message"])
|
68 |
+
else:
|
69 |
+
response.raise_for_status()
|
70 |
+
|
71 |
+
params = {
|
72 |
+
'chat_id': chat_id,
|
73 |
+
'message_id': message_id,
|
74 |
+
}
|
75 |
+
async with session.post("https://open-assistant.io/api/chat/events", proxy=proxy, params=params) as response:
|
76 |
+
start = "data: "
|
77 |
+
async for line in response.content:
|
78 |
+
line = line.decode("utf-8")
|
79 |
+
if line and line.startswith(start):
|
80 |
+
line = json.loads(line[len(start):])
|
81 |
+
if line["event_type"] == "token":
|
82 |
+
yield line["text"]
|
83 |
+
|
84 |
+
params = {
|
85 |
+
'chat_id': chat_id,
|
86 |
+
}
|
87 |
+
async with session.delete("https://open-assistant.io/api/chat", proxy=proxy, params=params) as response:
|
88 |
+
response.raise_for_status()
|
89 |
+
|
90 |
+
@classmethod
|
91 |
+
@property
|
92 |
+
def params(cls):
|
93 |
+
params = [
|
94 |
+
("model", "str"),
|
95 |
+
("messages", "list[dict[str, str]]"),
|
96 |
+
("stream", "bool"),
|
97 |
+
("proxy", "str"),
|
98 |
+
]
|
99 |
+
param = ", ".join([": ".join(p) for p in params])
|
100 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/OpenaiChat.py
ADDED
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from curl_cffi.requests import AsyncSession
|
4 |
+
import uuid
|
5 |
+
import json
|
6 |
+
|
7 |
+
from .base_provider import AsyncProvider, get_cookies, format_prompt
|
8 |
+
from ..typing import AsyncGenerator
|
9 |
+
|
10 |
+
|
11 |
+
class OpenaiChat(AsyncProvider):
|
12 |
+
url = "https://chat.openai.com"
|
13 |
+
needs_auth = True
|
14 |
+
working = True
|
15 |
+
supports_gpt_35_turbo = True
|
16 |
+
_access_token = None
|
17 |
+
|
18 |
+
@classmethod
|
19 |
+
async def create_async(
|
20 |
+
cls,
|
21 |
+
model: str,
|
22 |
+
messages: list[dict[str, str]],
|
23 |
+
proxy: str = None,
|
24 |
+
access_token: str = None,
|
25 |
+
cookies: dict = None,
|
26 |
+
**kwargs: dict
|
27 |
+
) -> AsyncGenerator:
|
28 |
+
proxies = {"https": proxy}
|
29 |
+
if not access_token:
|
30 |
+
access_token = await cls.get_access_token(cookies, proxies)
|
31 |
+
headers = {
|
32 |
+
"Accept": "text/event-stream",
|
33 |
+
"Authorization": f"Bearer {access_token}",
|
34 |
+
}
|
35 |
+
async with AsyncSession(proxies=proxies, headers=headers, impersonate="chrome107") as session:
|
36 |
+
messages = [
|
37 |
+
{
|
38 |
+
"id": str(uuid.uuid4()),
|
39 |
+
"author": {"role": "user"},
|
40 |
+
"content": {"content_type": "text", "parts": [format_prompt(messages)]},
|
41 |
+
},
|
42 |
+
]
|
43 |
+
data = {
|
44 |
+
"action": "next",
|
45 |
+
"messages": messages,
|
46 |
+
"conversation_id": None,
|
47 |
+
"parent_message_id": str(uuid.uuid4()),
|
48 |
+
"model": "text-davinci-002-render-sha",
|
49 |
+
"history_and_training_disabled": True,
|
50 |
+
}
|
51 |
+
response = await session.post("https://chat.openai.com/backend-api/conversation", json=data)
|
52 |
+
response.raise_for_status()
|
53 |
+
last_message = None
|
54 |
+
for line in response.content.decode().splitlines():
|
55 |
+
if line.startswith("data: "):
|
56 |
+
line = line[6:]
|
57 |
+
if line == "[DONE]":
|
58 |
+
break
|
59 |
+
line = json.loads(line)
|
60 |
+
if "message" in line:
|
61 |
+
last_message = line["message"]["content"]["parts"][0]
|
62 |
+
return last_message
|
63 |
+
|
64 |
+
|
65 |
+
@classmethod
|
66 |
+
async def get_access_token(cls, cookies: dict = None, proxies: dict = None) -> str:
|
67 |
+
if not cls._access_token:
|
68 |
+
cookies = cookies if cookies else get_cookies("chat.openai.com")
|
69 |
+
async with AsyncSession(proxies=proxies, cookies=cookies, impersonate="chrome107") as session:
|
70 |
+
response = await session.get("https://chat.openai.com/api/auth/session")
|
71 |
+
response.raise_for_status()
|
72 |
+
cls._access_token = response.json()["accessToken"]
|
73 |
+
return cls._access_token
|
74 |
+
|
75 |
+
|
76 |
+
@classmethod
|
77 |
+
@property
|
78 |
+
def params(cls):
|
79 |
+
params = [
|
80 |
+
("model", "str"),
|
81 |
+
("messages", "list[dict[str, str]]"),
|
82 |
+
("stream", "bool"),
|
83 |
+
("proxy", "str"),
|
84 |
+
("access_token", "str"),
|
85 |
+
("cookies", "dict[str, str]")
|
86 |
+
]
|
87 |
+
param = ", ".join([": ".join(p) for p in params])
|
88 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/PerplexityAi.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
import time
|
5 |
+
import base64
|
6 |
+
from curl_cffi.requests import AsyncSession
|
7 |
+
|
8 |
+
from .base_provider import AsyncProvider, format_prompt
|
9 |
+
|
10 |
+
|
11 |
+
class PerplexityAi(AsyncProvider):
|
12 |
+
url = "https://www.perplexity.ai"
|
13 |
+
working = True
|
14 |
+
supports_gpt_35_turbo = True
|
15 |
+
_sources = []
|
16 |
+
|
17 |
+
@classmethod
|
18 |
+
async def create_async(
|
19 |
+
cls,
|
20 |
+
model: str,
|
21 |
+
messages: list[dict[str, str]],
|
22 |
+
proxy: str = None,
|
23 |
+
**kwargs
|
24 |
+
) -> str:
|
25 |
+
url = cls.url + "/socket.io/?EIO=4&transport=polling"
|
26 |
+
async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107") as session:
|
27 |
+
url_session = "https://www.perplexity.ai/api/auth/session"
|
28 |
+
response = await session.get(url_session)
|
29 |
+
|
30 |
+
response = await session.get(url, params={"t": timestamp()})
|
31 |
+
response.raise_for_status()
|
32 |
+
sid = json.loads(response.text[1:])["sid"]
|
33 |
+
|
34 |
+
data = '40{"jwt":"anonymous-ask-user"}'
|
35 |
+
response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data)
|
36 |
+
response.raise_for_status()
|
37 |
+
|
38 |
+
data = "424" + json.dumps([
|
39 |
+
"perplexity_ask",
|
40 |
+
format_prompt(messages),
|
41 |
+
{
|
42 |
+
"version":"2.1",
|
43 |
+
"source":"default",
|
44 |
+
"language":"en",
|
45 |
+
"timezone": time.tzname[0],
|
46 |
+
"search_focus":"internet",
|
47 |
+
"mode":"concise"
|
48 |
+
}
|
49 |
+
])
|
50 |
+
response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data)
|
51 |
+
response.raise_for_status()
|
52 |
+
|
53 |
+
while True:
|
54 |
+
response = await session.get(url, params={"t": timestamp(), "sid": sid})
|
55 |
+
response.raise_for_status()
|
56 |
+
for line in response.text.splitlines():
|
57 |
+
if line.startswith("434"):
|
58 |
+
result = json.loads(json.loads(line[3:])[0]["text"])
|
59 |
+
|
60 |
+
cls._sources = [{
|
61 |
+
"title": source["name"],
|
62 |
+
"url": source["url"],
|
63 |
+
"snippet": source["snippet"]
|
64 |
+
} for source in result["web_results"]]
|
65 |
+
|
66 |
+
return result["answer"]
|
67 |
+
|
68 |
+
@classmethod
|
69 |
+
def get_sources(cls):
|
70 |
+
return cls._sources
|
71 |
+
|
72 |
+
|
73 |
+
@classmethod
|
74 |
+
@property
|
75 |
+
def params(cls):
|
76 |
+
params = [
|
77 |
+
("model", "str"),
|
78 |
+
("messages", "list[dict[str, str]]"),
|
79 |
+
("stream", "bool"),
|
80 |
+
("proxy", "str"),
|
81 |
+
]
|
82 |
+
param = ", ".join([": ".join(p) for p in params])
|
83 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
84 |
+
|
85 |
+
|
86 |
+
def timestamp() -> str:
|
87 |
+
return base64.urlsafe_b64encode(int(time.time()-1407782612).to_bytes(4, 'big')).decode()
|
g4f/g4f/Provider/Raycast.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
|
5 |
+
import requests
|
6 |
+
|
7 |
+
from ..typing import Any, CreateResult
|
8 |
+
from .base_provider import BaseProvider
|
9 |
+
|
10 |
+
|
11 |
+
class Raycast(BaseProvider):
|
12 |
+
url = "https://raycast.com"
|
13 |
+
supports_gpt_35_turbo = True
|
14 |
+
supports_gpt_4 = True
|
15 |
+
supports_stream = True
|
16 |
+
needs_auth = True
|
17 |
+
working = True
|
18 |
+
|
19 |
+
@staticmethod
|
20 |
+
def create_completion(
|
21 |
+
model: str,
|
22 |
+
messages: list[dict[str, str]],
|
23 |
+
stream: bool,
|
24 |
+
**kwargs: Any,
|
25 |
+
) -> CreateResult:
|
26 |
+
auth = kwargs.get('auth')
|
27 |
+
headers = {
|
28 |
+
'Accept': 'application/json',
|
29 |
+
'Accept-Language': 'en-US,en;q=0.9',
|
30 |
+
'Authorization': f'Bearer {auth}',
|
31 |
+
'Content-Type': 'application/json',
|
32 |
+
'User-Agent': 'Raycast/0 CFNetwork/1410.0.3 Darwin/22.6.0',
|
33 |
+
}
|
34 |
+
parsed_messages = []
|
35 |
+
for message in messages:
|
36 |
+
parsed_messages.append({
|
37 |
+
'author': message['role'],
|
38 |
+
'content': {'text': message['content']}
|
39 |
+
})
|
40 |
+
data = {
|
41 |
+
"debug": False,
|
42 |
+
"locale": "en-CN",
|
43 |
+
"messages": parsed_messages,
|
44 |
+
"model": model,
|
45 |
+
"provider": "openai",
|
46 |
+
"source": "ai_chat",
|
47 |
+
"system_instruction": "markdown",
|
48 |
+
"temperature": 0.5
|
49 |
+
}
|
50 |
+
response = requests.post("https://backend.raycast.com/api/v1/ai/chat_completions", headers=headers, json=data, stream=True)
|
51 |
+
for token in response.iter_lines():
|
52 |
+
if b'data: ' not in token:
|
53 |
+
continue
|
54 |
+
completion_chunk = json.loads(token.decode().replace('data: ', ''))
|
55 |
+
token = completion_chunk['text']
|
56 |
+
if token != None:
|
57 |
+
yield token
|
58 |
+
|
59 |
+
@classmethod
|
60 |
+
@property
|
61 |
+
def params(cls):
|
62 |
+
params = [
|
63 |
+
("model", "str"),
|
64 |
+
("messages", "list[dict[str, str]]"),
|
65 |
+
("stream", "bool"),
|
66 |
+
("temperature", "float"),
|
67 |
+
("top_p", "int"),
|
68 |
+
("model", "str"),
|
69 |
+
("auth", "str"),
|
70 |
+
]
|
71 |
+
param = ", ".join([": ".join(p) for p in params])
|
72 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/Theb.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
import random
|
5 |
+
|
6 |
+
import requests
|
7 |
+
|
8 |
+
from ..typing import Any, CreateResult
|
9 |
+
from .base_provider import BaseProvider
|
10 |
+
|
11 |
+
|
12 |
+
class Theb(BaseProvider):
|
13 |
+
url = "https://theb.ai"
|
14 |
+
working = True
|
15 |
+
supports_stream = True
|
16 |
+
supports_gpt_35_turbo = True
|
17 |
+
needs_auth = True
|
18 |
+
|
19 |
+
@staticmethod
|
20 |
+
def create_completion(
|
21 |
+
model: str,
|
22 |
+
messages: list[dict[str, str]],
|
23 |
+
stream: bool, **kwargs: Any) -> CreateResult:
|
24 |
+
|
25 |
+
conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
|
26 |
+
conversation += "\nassistant: "
|
27 |
+
|
28 |
+
auth = kwargs.get("auth", {
|
29 |
+
"bearer_token":"free",
|
30 |
+
"org_id":"theb",
|
31 |
+
})
|
32 |
+
|
33 |
+
bearer_token = auth["bearer_token"]
|
34 |
+
org_id = auth["org_id"]
|
35 |
+
|
36 |
+
headers = {
|
37 |
+
'authority' : 'beta.theb.ai',
|
38 |
+
'accept' : 'text/event-stream',
|
39 |
+
'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
|
40 |
+
'authorization' : 'Bearer '+bearer_token,
|
41 |
+
'content-type' : 'application/json',
|
42 |
+
'origin' : 'https://beta.theb.ai',
|
43 |
+
'referer' : 'https://beta.theb.ai/home',
|
44 |
+
'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
|
45 |
+
'sec-ch-ua-mobile' : '?0',
|
46 |
+
'sec-ch-ua-platform': '"Windows"',
|
47 |
+
'sec-fetch-dest' : 'empty',
|
48 |
+
'sec-fetch-mode' : 'cors',
|
49 |
+
'sec-fetch-site' : 'same-origin',
|
50 |
+
'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
|
51 |
+
'x-ai-model' : 'ee8d4f29cb7047f78cbe84313ed6ace8',
|
52 |
+
}
|
53 |
+
|
54 |
+
req_rand = random.randint(100000000, 9999999999)
|
55 |
+
|
56 |
+
json_data: dict[str, Any] = {
|
57 |
+
"text" : conversation,
|
58 |
+
"category" : "04f58f64a4aa4191a957b47290fee864",
|
59 |
+
"model" : "ee8d4f29cb7047f78cbe84313ed6ace8",
|
60 |
+
"model_params": {
|
61 |
+
"system_prompt" : "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture.\nKnowledge cutoff: 2021-09\nCurrent date: {{YYYY-MM-DD}}",
|
62 |
+
"temperature" : kwargs.get("temperature", 1),
|
63 |
+
"top_p" : kwargs.get("top_p", 1),
|
64 |
+
"frequency_penalty" : kwargs.get("frequency_penalty", 0),
|
65 |
+
"presence_penalty" : kwargs.get("presence_penalty", 0),
|
66 |
+
"long_term_memory" : "auto"
|
67 |
+
}
|
68 |
+
}
|
69 |
+
|
70 |
+
response = requests.post(f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}",
|
71 |
+
headers=headers, json=json_data, stream=True)
|
72 |
+
|
73 |
+
response.raise_for_status()
|
74 |
+
content = ""
|
75 |
+
next_content = ""
|
76 |
+
for chunk in response.iter_lines():
|
77 |
+
if b"content" in chunk:
|
78 |
+
next_content = content
|
79 |
+
data = json.loads(chunk.decode().split("data: ")[1])
|
80 |
+
content = data["content"]
|
81 |
+
yield data["content"].replace(next_content, "")
|
82 |
+
|
83 |
+
@classmethod
|
84 |
+
@property
|
85 |
+
def params(cls):
|
86 |
+
params = [
|
87 |
+
("model", "str"),
|
88 |
+
("messages", "list[dict[str, str]]"),
|
89 |
+
("auth", "list[dict[str, str]]"),
|
90 |
+
("stream", "bool"),
|
91 |
+
("temperature", "float"),
|
92 |
+
("presence_penalty", "int"),
|
93 |
+
("frequency_penalty", "int"),
|
94 |
+
("top_p", "int")
|
95 |
+
]
|
96 |
+
param = ", ".join([": ".join(p) for p in params])
|
97 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/V50.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import uuid
|
4 |
+
|
5 |
+
import requests
|
6 |
+
|
7 |
+
from ..typing import Any, CreateResult
|
8 |
+
from .base_provider import BaseProvider
|
9 |
+
|
10 |
+
|
11 |
+
class V50(BaseProvider):
|
12 |
+
url = 'https://p5.v50.ltd'
|
13 |
+
supports_gpt_35_turbo = True
|
14 |
+
supports_stream = False
|
15 |
+
needs_auth = False
|
16 |
+
working = False
|
17 |
+
|
18 |
+
@staticmethod
|
19 |
+
def create_completion(
|
20 |
+
model: str,
|
21 |
+
messages: list[dict[str, str]],
|
22 |
+
stream: bool, **kwargs: Any) -> CreateResult:
|
23 |
+
|
24 |
+
conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages)
|
25 |
+
conversation += "\nassistant: "
|
26 |
+
|
27 |
+
payload = {
|
28 |
+
"prompt" : conversation,
|
29 |
+
"options" : {},
|
30 |
+
"systemMessage" : ".",
|
31 |
+
"temperature" : kwargs.get("temperature", 0.4),
|
32 |
+
"top_p" : kwargs.get("top_p", 0.4),
|
33 |
+
"model" : model,
|
34 |
+
"user" : str(uuid.uuid4())
|
35 |
+
}
|
36 |
+
|
37 |
+
headers = {
|
38 |
+
'authority' : 'p5.v50.ltd',
|
39 |
+
'accept' : 'application/json, text/plain, */*',
|
40 |
+
'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
|
41 |
+
'content-type' : 'application/json',
|
42 |
+
'origin' : 'https://p5.v50.ltd',
|
43 |
+
'referer' : 'https://p5.v50.ltd/',
|
44 |
+
'sec-ch-ua-platform': '"Windows"',
|
45 |
+
'sec-fetch-dest' : 'empty',
|
46 |
+
'sec-fetch-mode' : 'cors',
|
47 |
+
'sec-fetch-site' : 'same-origin',
|
48 |
+
'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
|
49 |
+
}
|
50 |
+
response = requests.post("https://p5.v50.ltd/api/chat-process",
|
51 |
+
json=payload, headers=headers, proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
|
52 |
+
|
53 |
+
if "https://fk1.v50.ltd" not in response.text:
|
54 |
+
yield response.text
|
55 |
+
|
56 |
+
@classmethod
|
57 |
+
@property
|
58 |
+
def params(cls):
|
59 |
+
params = [
|
60 |
+
("model", "str"),
|
61 |
+
("messages", "list[dict[str, str]]"),
|
62 |
+
("stream", "bool"),
|
63 |
+
("temperature", "float"),
|
64 |
+
("top_p", "int"),
|
65 |
+
]
|
66 |
+
param = ", ".join([": ".join(p) for p in params])
|
67 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/Vercel.py
ADDED
@@ -0,0 +1,377 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json, base64, requests, execjs, random, uuid
|
4 |
+
|
5 |
+
from ..typing import Any, TypedDict, CreateResult
|
6 |
+
from .base_provider import BaseProvider
|
7 |
+
from abc import abstractmethod
|
8 |
+
|
9 |
+
|
10 |
+
class Vercel(BaseProvider):
|
11 |
+
url = 'https://sdk.vercel.ai'
|
12 |
+
working = True
|
13 |
+
supports_gpt_35_turbo = True
|
14 |
+
supports_stream = True
|
15 |
+
|
16 |
+
@staticmethod
|
17 |
+
@abstractmethod
|
18 |
+
def create_completion(
|
19 |
+
model: str,
|
20 |
+
messages: list[dict[str, str]],
|
21 |
+
stream: bool,
|
22 |
+
**kwargs
|
23 |
+
) -> CreateResult:
|
24 |
+
if not model:
|
25 |
+
model = "gpt-3.5-turbo"
|
26 |
+
elif model not in model_info:
|
27 |
+
raise ValueError(f"Model are not supported: {model}")
|
28 |
+
|
29 |
+
headers = {
|
30 |
+
'authority' : 'sdk.vercel.ai',
|
31 |
+
'accept' : '*/*',
|
32 |
+
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
33 |
+
'cache-control' : 'no-cache',
|
34 |
+
'content-type' : 'application/json',
|
35 |
+
'custom-encoding' : get_anti_bot_token(),
|
36 |
+
'origin' : 'https://sdk.vercel.ai',
|
37 |
+
'pragma' : 'no-cache',
|
38 |
+
'referer' : 'https://sdk.vercel.ai/',
|
39 |
+
'sec-ch-ua' : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
|
40 |
+
'sec-ch-ua-mobile' : '?0',
|
41 |
+
'sec-ch-ua-platform': '"macOS"',
|
42 |
+
'sec-fetch-dest' : 'empty',
|
43 |
+
'sec-fetch-mode' : 'cors',
|
44 |
+
'sec-fetch-site' : 'same-origin',
|
45 |
+
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.%s.%s Safari/537.36' % (
|
46 |
+
random.randint(99, 999),
|
47 |
+
random.randint(99, 999)
|
48 |
+
)
|
49 |
+
}
|
50 |
+
|
51 |
+
json_data = {
|
52 |
+
'model' : model_info[model]['id'],
|
53 |
+
'messages' : messages,
|
54 |
+
'playgroundId': str(uuid.uuid4()),
|
55 |
+
'chatIndex' : 0} | model_info[model]['default_params']
|
56 |
+
|
57 |
+
max_retries = kwargs.get('max_retries', 20)
|
58 |
+
for i in range(max_retries):
|
59 |
+
response = requests.post('https://sdk.vercel.ai/api/generate',
|
60 |
+
headers=headers, json=json_data, stream=True)
|
61 |
+
try:
|
62 |
+
response.raise_for_status()
|
63 |
+
except:
|
64 |
+
continue
|
65 |
+
for token in response.iter_content(chunk_size=None):
|
66 |
+
yield token.decode()
|
67 |
+
break
|
68 |
+
|
69 |
+
|
70 |
+
def get_anti_bot_token() -> str:
|
71 |
+
headers = {
|
72 |
+
'authority' : 'sdk.vercel.ai',
|
73 |
+
'accept' : '*/*',
|
74 |
+
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
75 |
+
'cache-control' : 'no-cache',
|
76 |
+
'pragma' : 'no-cache',
|
77 |
+
'referer' : 'https://sdk.vercel.ai/',
|
78 |
+
'sec-ch-ua' : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
|
79 |
+
'sec-ch-ua-mobile' : '?0',
|
80 |
+
'sec-ch-ua-platform': '"macOS"',
|
81 |
+
'sec-fetch-dest' : 'empty',
|
82 |
+
'sec-fetch-mode' : 'cors',
|
83 |
+
'sec-fetch-site' : 'same-origin',
|
84 |
+
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.%s.%s Safari/537.36' % (
|
85 |
+
random.randint(99, 999),
|
86 |
+
random.randint(99, 999)
|
87 |
+
)
|
88 |
+
}
|
89 |
+
|
90 |
+
response = requests.get('https://sdk.vercel.ai/openai.jpeg',
|
91 |
+
headers=headers).text
|
92 |
+
|
93 |
+
raw_data = json.loads(base64.b64decode(response,
|
94 |
+
validate=True))
|
95 |
+
|
96 |
+
js_script = '''const globalThis={marker:"mark"};String.prototype.fontcolor=function(){return `<font>${this}</font>`};
|
97 |
+
return (%s)(%s)''' % (raw_data['c'], raw_data['a'])
|
98 |
+
|
99 |
+
raw_token = json.dumps({'r': execjs.compile(js_script).call(''), 't': raw_data['t']},
|
100 |
+
separators = (",", ":"))
|
101 |
+
|
102 |
+
return base64.b64encode(raw_token.encode('utf-16le')).decode()
|
103 |
+
|
104 |
+
class ModelInfo(TypedDict):
|
105 |
+
id: str
|
106 |
+
default_params: dict[str, Any]
|
107 |
+
|
108 |
+
model_info: dict[str, ModelInfo] = {
|
109 |
+
'claude-instant-v1': {
|
110 |
+
'id': 'anthropic:claude-instant-v1',
|
111 |
+
'default_params': {
|
112 |
+
'temperature': 1,
|
113 |
+
'maximumLength': 1024,
|
114 |
+
'topP': 1,
|
115 |
+
'topK': 1,
|
116 |
+
'presencePenalty': 1,
|
117 |
+
'frequencyPenalty': 1,
|
118 |
+
'stopSequences': ['\n\nHuman:'],
|
119 |
+
},
|
120 |
+
},
|
121 |
+
'claude-v1': {
|
122 |
+
'id': 'anthropic:claude-v1',
|
123 |
+
'default_params': {
|
124 |
+
'temperature': 1,
|
125 |
+
'maximumLength': 1024,
|
126 |
+
'topP': 1,
|
127 |
+
'topK': 1,
|
128 |
+
'presencePenalty': 1,
|
129 |
+
'frequencyPenalty': 1,
|
130 |
+
'stopSequences': ['\n\nHuman:'],
|
131 |
+
},
|
132 |
+
},
|
133 |
+
'claude-v2': {
|
134 |
+
'id': 'anthropic:claude-v2',
|
135 |
+
'default_params': {
|
136 |
+
'temperature': 1,
|
137 |
+
'maximumLength': 1024,
|
138 |
+
'topP': 1,
|
139 |
+
'topK': 1,
|
140 |
+
'presencePenalty': 1,
|
141 |
+
'frequencyPenalty': 1,
|
142 |
+
'stopSequences': ['\n\nHuman:'],
|
143 |
+
},
|
144 |
+
},
|
145 |
+
'a16z-infra/llama7b-v2-chat': {
|
146 |
+
'id': 'replicate:a16z-infra/llama7b-v2-chat',
|
147 |
+
'default_params': {
|
148 |
+
'temperature': 0.75,
|
149 |
+
'maximumLength': 3000,
|
150 |
+
'topP': 1,
|
151 |
+
'repetitionPenalty': 1,
|
152 |
+
},
|
153 |
+
},
|
154 |
+
'a16z-infra/llama13b-v2-chat': {
|
155 |
+
'id': 'replicate:a16z-infra/llama13b-v2-chat',
|
156 |
+
'default_params': {
|
157 |
+
'temperature': 0.75,
|
158 |
+
'maximumLength': 3000,
|
159 |
+
'topP': 1,
|
160 |
+
'repetitionPenalty': 1,
|
161 |
+
},
|
162 |
+
},
|
163 |
+
'replicate/llama-2-70b-chat': {
|
164 |
+
'id': 'replicate:replicate/llama-2-70b-chat',
|
165 |
+
'default_params': {
|
166 |
+
'temperature': 0.75,
|
167 |
+
'maximumLength': 3000,
|
168 |
+
'topP': 1,
|
169 |
+
'repetitionPenalty': 1,
|
170 |
+
},
|
171 |
+
},
|
172 |
+
'bigscience/bloom': {
|
173 |
+
'id': 'huggingface:bigscience/bloom',
|
174 |
+
'default_params': {
|
175 |
+
'temperature': 0.5,
|
176 |
+
'maximumLength': 1024,
|
177 |
+
'topP': 0.95,
|
178 |
+
'topK': 4,
|
179 |
+
'repetitionPenalty': 1.03,
|
180 |
+
},
|
181 |
+
},
|
182 |
+
'google/flan-t5-xxl': {
|
183 |
+
'id': 'huggingface:google/flan-t5-xxl',
|
184 |
+
'default_params': {
|
185 |
+
'temperature': 0.5,
|
186 |
+
'maximumLength': 1024,
|
187 |
+
'topP': 0.95,
|
188 |
+
'topK': 4,
|
189 |
+
'repetitionPenalty': 1.03,
|
190 |
+
},
|
191 |
+
},
|
192 |
+
'EleutherAI/gpt-neox-20b': {
|
193 |
+
'id': 'huggingface:EleutherAI/gpt-neox-20b',
|
194 |
+
'default_params': {
|
195 |
+
'temperature': 0.5,
|
196 |
+
'maximumLength': 1024,
|
197 |
+
'topP': 0.95,
|
198 |
+
'topK': 4,
|
199 |
+
'repetitionPenalty': 1.03,
|
200 |
+
'stopSequences': [],
|
201 |
+
},
|
202 |
+
},
|
203 |
+
'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5': {
|
204 |
+
'id': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
|
205 |
+
'default_params': {
|
206 |
+
'maximumLength': 1024,
|
207 |
+
'typicalP': 0.2,
|
208 |
+
'repetitionPenalty': 1,
|
209 |
+
},
|
210 |
+
},
|
211 |
+
'OpenAssistant/oasst-sft-1-pythia-12b': {
|
212 |
+
'id': 'huggingface:OpenAssistant/oasst-sft-1-pythia-12b',
|
213 |
+
'default_params': {
|
214 |
+
'maximumLength': 1024,
|
215 |
+
'typicalP': 0.2,
|
216 |
+
'repetitionPenalty': 1,
|
217 |
+
},
|
218 |
+
},
|
219 |
+
'bigcode/santacoder': {
|
220 |
+
'id': 'huggingface:bigcode/santacoder',
|
221 |
+
'default_params': {
|
222 |
+
'temperature': 0.5,
|
223 |
+
'maximumLength': 1024,
|
224 |
+
'topP': 0.95,
|
225 |
+
'topK': 4,
|
226 |
+
'repetitionPenalty': 1.03,
|
227 |
+
},
|
228 |
+
},
|
229 |
+
'command-light-nightly': {
|
230 |
+
'id': 'cohere:command-light-nightly',
|
231 |
+
'default_params': {
|
232 |
+
'temperature': 0.9,
|
233 |
+
'maximumLength': 1024,
|
234 |
+
'topP': 1,
|
235 |
+
'topK': 0,
|
236 |
+
'presencePenalty': 0,
|
237 |
+
'frequencyPenalty': 0,
|
238 |
+
'stopSequences': [],
|
239 |
+
},
|
240 |
+
},
|
241 |
+
'command-nightly': {
|
242 |
+
'id': 'cohere:command-nightly',
|
243 |
+
'default_params': {
|
244 |
+
'temperature': 0.9,
|
245 |
+
'maximumLength': 1024,
|
246 |
+
'topP': 1,
|
247 |
+
'topK': 0,
|
248 |
+
'presencePenalty': 0,
|
249 |
+
'frequencyPenalty': 0,
|
250 |
+
'stopSequences': [],
|
251 |
+
},
|
252 |
+
},
|
253 |
+
'gpt-4': {
|
254 |
+
'id': 'openai:gpt-4',
|
255 |
+
'default_params': {
|
256 |
+
'temperature': 0.7,
|
257 |
+
'maximumLength': 8192,
|
258 |
+
'topP': 1,
|
259 |
+
'presencePenalty': 0,
|
260 |
+
'frequencyPenalty': 0,
|
261 |
+
'stopSequences': [],
|
262 |
+
},
|
263 |
+
},
|
264 |
+
'gpt-4-0613': {
|
265 |
+
'id': 'openai:gpt-4-0613',
|
266 |
+
'default_params': {
|
267 |
+
'temperature': 0.7,
|
268 |
+
'maximumLength': 8192,
|
269 |
+
'topP': 1,
|
270 |
+
'presencePenalty': 0,
|
271 |
+
'frequencyPenalty': 0,
|
272 |
+
'stopSequences': [],
|
273 |
+
},
|
274 |
+
},
|
275 |
+
'code-davinci-002': {
|
276 |
+
'id': 'openai:code-davinci-002',
|
277 |
+
'default_params': {
|
278 |
+
'temperature': 0.5,
|
279 |
+
'maximumLength': 1024,
|
280 |
+
'topP': 1,
|
281 |
+
'presencePenalty': 0,
|
282 |
+
'frequencyPenalty': 0,
|
283 |
+
'stopSequences': [],
|
284 |
+
},
|
285 |
+
},
|
286 |
+
'gpt-3.5-turbo': {
|
287 |
+
'id': 'openai:gpt-3.5-turbo',
|
288 |
+
'default_params': {
|
289 |
+
'temperature': 0.7,
|
290 |
+
'maximumLength': 4096,
|
291 |
+
'topP': 1,
|
292 |
+
'topK': 1,
|
293 |
+
'presencePenalty': 1,
|
294 |
+
'frequencyPenalty': 1,
|
295 |
+
'stopSequences': [],
|
296 |
+
},
|
297 |
+
},
|
298 |
+
'gpt-3.5-turbo-16k': {
|
299 |
+
'id': 'openai:gpt-3.5-turbo-16k',
|
300 |
+
'default_params': {
|
301 |
+
'temperature': 0.7,
|
302 |
+
'maximumLength': 16280,
|
303 |
+
'topP': 1,
|
304 |
+
'topK': 1,
|
305 |
+
'presencePenalty': 1,
|
306 |
+
'frequencyPenalty': 1,
|
307 |
+
'stopSequences': [],
|
308 |
+
},
|
309 |
+
},
|
310 |
+
'gpt-3.5-turbo-16k-0613': {
|
311 |
+
'id': 'openai:gpt-3.5-turbo-16k-0613',
|
312 |
+
'default_params': {
|
313 |
+
'temperature': 0.7,
|
314 |
+
'maximumLength': 16280,
|
315 |
+
'topP': 1,
|
316 |
+
'topK': 1,
|
317 |
+
'presencePenalty': 1,
|
318 |
+
'frequencyPenalty': 1,
|
319 |
+
'stopSequences': [],
|
320 |
+
},
|
321 |
+
},
|
322 |
+
'text-ada-001': {
|
323 |
+
'id': 'openai:text-ada-001',
|
324 |
+
'default_params': {
|
325 |
+
'temperature': 0.5,
|
326 |
+
'maximumLength': 1024,
|
327 |
+
'topP': 1,
|
328 |
+
'presencePenalty': 0,
|
329 |
+
'frequencyPenalty': 0,
|
330 |
+
'stopSequences': [],
|
331 |
+
},
|
332 |
+
},
|
333 |
+
'text-babbage-001': {
|
334 |
+
'id': 'openai:text-babbage-001',
|
335 |
+
'default_params': {
|
336 |
+
'temperature': 0.5,
|
337 |
+
'maximumLength': 1024,
|
338 |
+
'topP': 1,
|
339 |
+
'presencePenalty': 0,
|
340 |
+
'frequencyPenalty': 0,
|
341 |
+
'stopSequences': [],
|
342 |
+
},
|
343 |
+
},
|
344 |
+
'text-curie-001': {
|
345 |
+
'id': 'openai:text-curie-001',
|
346 |
+
'default_params': {
|
347 |
+
'temperature': 0.5,
|
348 |
+
'maximumLength': 1024,
|
349 |
+
'topP': 1,
|
350 |
+
'presencePenalty': 0,
|
351 |
+
'frequencyPenalty': 0,
|
352 |
+
'stopSequences': [],
|
353 |
+
},
|
354 |
+
},
|
355 |
+
'text-davinci-002': {
|
356 |
+
'id': 'openai:text-davinci-002',
|
357 |
+
'default_params': {
|
358 |
+
'temperature': 0.5,
|
359 |
+
'maximumLength': 1024,
|
360 |
+
'topP': 1,
|
361 |
+
'presencePenalty': 0,
|
362 |
+
'frequencyPenalty': 0,
|
363 |
+
'stopSequences': [],
|
364 |
+
},
|
365 |
+
},
|
366 |
+
'text-davinci-003': {
|
367 |
+
'id': 'openai:text-davinci-003',
|
368 |
+
'default_params': {
|
369 |
+
'temperature': 0.5,
|
370 |
+
'maximumLength': 4097,
|
371 |
+
'topP': 1,
|
372 |
+
'presencePenalty': 0,
|
373 |
+
'frequencyPenalty': 0,
|
374 |
+
'stopSequences': [],
|
375 |
+
},
|
376 |
+
},
|
377 |
+
}
|
g4f/g4f/Provider/Vitalentum.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
from aiohttp import ClientSession
|
5 |
+
|
6 |
+
from .base_provider import AsyncGeneratorProvider
|
7 |
+
from ..typing import AsyncGenerator
|
8 |
+
|
9 |
+
class Vitalentum(AsyncGeneratorProvider):
|
10 |
+
url = "https://app.vitalentum.io"
|
11 |
+
working = True
|
12 |
+
supports_gpt_35_turbo = True
|
13 |
+
|
14 |
+
|
15 |
+
@classmethod
|
16 |
+
async def create_async_generator(
|
17 |
+
cls,
|
18 |
+
model: str,
|
19 |
+
messages: list[dict[str, str]],
|
20 |
+
proxy: str = None,
|
21 |
+
**kwargs
|
22 |
+
) -> AsyncGenerator:
|
23 |
+
headers = {
|
24 |
+
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
|
25 |
+
"Accept" : "text/event-stream",
|
26 |
+
"Accept-language" : "de,en-US;q=0.7,en;q=0.3",
|
27 |
+
"Origin" : cls.url,
|
28 |
+
"Referer" : cls.url + "/",
|
29 |
+
"Sec-Fetch-Dest" : "empty",
|
30 |
+
"Sec-Fetch-Mode" : "cors",
|
31 |
+
"Sec-Fetch-Site" : "same-origin",
|
32 |
+
}
|
33 |
+
conversation = json.dumps({"history": [{
|
34 |
+
"speaker": "human" if message["role"] == "user" else "bot",
|
35 |
+
"text": message["content"],
|
36 |
+
} for message in messages]})
|
37 |
+
data = {
|
38 |
+
"conversation": conversation,
|
39 |
+
"temperature": 0.7,
|
40 |
+
**kwargs
|
41 |
+
}
|
42 |
+
async with ClientSession(
|
43 |
+
headers=headers
|
44 |
+
) as session:
|
45 |
+
async with session.post(cls.url + "/api/converse-edge", json=data, proxy=proxy) as response:
|
46 |
+
response.raise_for_status()
|
47 |
+
async for line in response.content:
|
48 |
+
line = line.decode()
|
49 |
+
if line.startswith("data: "):
|
50 |
+
if line.startswith("data: [DONE]"):
|
51 |
+
break
|
52 |
+
line = json.loads(line[6:-1])
|
53 |
+
content = line["choices"][0]["delta"].get("content")
|
54 |
+
if content:
|
55 |
+
yield content
|
56 |
+
|
57 |
+
|
58 |
+
@classmethod
|
59 |
+
@property
|
60 |
+
def params(cls):
|
61 |
+
params = [
|
62 |
+
("model", "str"),
|
63 |
+
("messages", "list[dict[str, str]]"),
|
64 |
+
("stream", "bool"),
|
65 |
+
("temperature", "float"),
|
66 |
+
]
|
67 |
+
param = ", ".join([": ".join(p) for p in params])
|
68 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/Wewordle.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import random, string, time
|
4 |
+
from aiohttp import ClientSession
|
5 |
+
|
6 |
+
from .base_provider import AsyncProvider
|
7 |
+
|
8 |
+
|
9 |
+
class Wewordle(AsyncProvider):
|
10 |
+
url = "https://wewordle.org"
|
11 |
+
working = True
|
12 |
+
supports_gpt_35_turbo = True
|
13 |
+
|
14 |
+
@classmethod
|
15 |
+
async def create_async(
|
16 |
+
cls,
|
17 |
+
model: str,
|
18 |
+
messages: list[dict[str, str]],
|
19 |
+
proxy: str = None,
|
20 |
+
**kwargs
|
21 |
+
) -> str:
|
22 |
+
|
23 |
+
headers = {
|
24 |
+
"accept" : "*/*",
|
25 |
+
"pragma" : "no-cache",
|
26 |
+
"Content-Type" : "application/json",
|
27 |
+
"Connection" : "keep-alive"
|
28 |
+
}
|
29 |
+
|
30 |
+
_user_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=16))
|
31 |
+
_app_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=31))
|
32 |
+
_request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
|
33 |
+
data = {
|
34 |
+
"user" : _user_id,
|
35 |
+
"messages" : messages,
|
36 |
+
"subscriber": {
|
37 |
+
"originalPurchaseDate" : None,
|
38 |
+
"originalApplicationVersion" : None,
|
39 |
+
"allPurchaseDatesMillis" : {},
|
40 |
+
"entitlements" : {"active": {}, "all": {}},
|
41 |
+
"allPurchaseDates" : {},
|
42 |
+
"allExpirationDatesMillis" : {},
|
43 |
+
"allExpirationDates" : {},
|
44 |
+
"originalAppUserId" : f"$RCAnonymousID:{_app_id}",
|
45 |
+
"latestExpirationDate" : None,
|
46 |
+
"requestDate" : _request_date,
|
47 |
+
"latestExpirationDateMillis" : None,
|
48 |
+
"nonSubscriptionTransactions" : [],
|
49 |
+
"originalPurchaseDateMillis" : None,
|
50 |
+
"managementURL" : None,
|
51 |
+
"allPurchasedProductIdentifiers": [],
|
52 |
+
"firstSeen" : _request_date,
|
53 |
+
"activeSubscriptions" : [],
|
54 |
+
}
|
55 |
+
}
|
56 |
+
|
57 |
+
|
58 |
+
async with ClientSession(
|
59 |
+
headers=headers
|
60 |
+
) as session:
|
61 |
+
async with session.post(f"{cls.url}/gptapi/v1/android/turbo", proxy=proxy, json=data) as response:
|
62 |
+
response.raise_for_status()
|
63 |
+
content = (await response.json())["message"]["content"]
|
64 |
+
if content:
|
65 |
+
return content
|
g4f/g4f/Provider/Wuguokai.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import random
|
4 |
+
|
5 |
+
import requests
|
6 |
+
|
7 |
+
from ..typing import Any, CreateResult
|
8 |
+
from .base_provider import BaseProvider, format_prompt
|
9 |
+
|
10 |
+
|
11 |
+
class Wuguokai(BaseProvider):
|
12 |
+
url = 'https://chat.wuguokai.xyz'
|
13 |
+
supports_gpt_35_turbo = True
|
14 |
+
working = False
|
15 |
+
|
16 |
+
@staticmethod
|
17 |
+
def create_completion(
|
18 |
+
model: str,
|
19 |
+
messages: list[dict[str, str]],
|
20 |
+
stream: bool,
|
21 |
+
**kwargs: Any,
|
22 |
+
) -> CreateResult:
|
23 |
+
headers = {
|
24 |
+
'authority': 'ai-api.wuguokai.xyz',
|
25 |
+
'accept': 'application/json, text/plain, */*',
|
26 |
+
'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
|
27 |
+
'content-type': 'application/json',
|
28 |
+
'origin': 'https://chat.wuguokai.xyz',
|
29 |
+
'referer': 'https://chat.wuguokai.xyz/',
|
30 |
+
'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
31 |
+
'sec-ch-ua-mobile': '?0',
|
32 |
+
'sec-ch-ua-platform': '"Windows"',
|
33 |
+
'sec-fetch-dest': 'empty',
|
34 |
+
'sec-fetch-mode': 'cors',
|
35 |
+
'sec-fetch-site': 'same-site',
|
36 |
+
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
|
37 |
+
}
|
38 |
+
data ={
|
39 |
+
"prompt": format_prompt(messages),
|
40 |
+
"options": {},
|
41 |
+
"userId": f"#/chat/{random.randint(1,99999999)}",
|
42 |
+
"usingContext": True
|
43 |
+
}
|
44 |
+
response = requests.post("https://ai-api20.wuguokai.xyz/api/chat-process", headers=headers, timeout=3, json=data, proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
|
45 |
+
_split = response.text.split("> 若回答失败请重试或多刷新几次界面后重试")
|
46 |
+
if response.status_code == 200:
|
47 |
+
if len(_split) > 1:
|
48 |
+
yield _split[1].strip()
|
49 |
+
else:
|
50 |
+
yield _split[0].strip()
|
51 |
+
else:
|
52 |
+
raise Exception(f"Error: {response.status_code} {response.reason}")
|
53 |
+
|
54 |
+
@classmethod
|
55 |
+
@property
|
56 |
+
def params(cls):
|
57 |
+
params = [
|
58 |
+
("model", "str"),
|
59 |
+
("messages", "list[dict[str, str]]"),
|
60 |
+
("stream", "bool")
|
61 |
+
]
|
62 |
+
param = ", ".join([": ".join(p) for p in params])
|
63 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/Ylokh.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
from aiohttp import ClientSession
|
5 |
+
|
6 |
+
from .base_provider import AsyncGeneratorProvider
|
7 |
+
from ..typing import AsyncGenerator
|
8 |
+
|
9 |
+
class Ylokh(AsyncGeneratorProvider):
|
10 |
+
url = "https://chat.ylokh.xyz"
|
11 |
+
working = True
|
12 |
+
supports_gpt_35_turbo = True
|
13 |
+
|
14 |
+
|
15 |
+
@classmethod
|
16 |
+
async def create_async_generator(
|
17 |
+
cls,
|
18 |
+
model: str,
|
19 |
+
messages: list[dict[str, str]],
|
20 |
+
stream: bool = True,
|
21 |
+
proxy: str = None,
|
22 |
+
**kwargs
|
23 |
+
) -> AsyncGenerator:
|
24 |
+
model = model if model else "gpt-3.5-turbo"
|
25 |
+
headers = {
|
26 |
+
"User-Agent" : "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0",
|
27 |
+
"Accept" : "*/*",
|
28 |
+
"Accept-language" : "de,en-US;q=0.7,en;q=0.3",
|
29 |
+
"Origin" : cls.url,
|
30 |
+
"Referer" : cls.url + "/",
|
31 |
+
"Sec-Fetch-Dest" : "empty",
|
32 |
+
"Sec-Fetch-Mode" : "cors",
|
33 |
+
"Sec-Fetch-Site" : "same-origin",
|
34 |
+
}
|
35 |
+
data = {
|
36 |
+
"messages": messages,
|
37 |
+
"model": model,
|
38 |
+
"temperature": 1,
|
39 |
+
"presence_penalty": 0,
|
40 |
+
"top_p": 1,
|
41 |
+
"frequency_penalty": 0,
|
42 |
+
"allow_fallback": True,
|
43 |
+
"stream": stream,
|
44 |
+
**kwargs
|
45 |
+
}
|
46 |
+
async with ClientSession(
|
47 |
+
headers=headers
|
48 |
+
) as session:
|
49 |
+
async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data, proxy=proxy) as response:
|
50 |
+
response.raise_for_status()
|
51 |
+
if stream:
|
52 |
+
async for line in response.content:
|
53 |
+
line = line.decode()
|
54 |
+
if line.startswith("data: "):
|
55 |
+
if line.startswith("data: [DONE]"):
|
56 |
+
break
|
57 |
+
line = json.loads(line[6:-1])
|
58 |
+
content = line["choices"][0]["delta"].get("content")
|
59 |
+
if content:
|
60 |
+
yield content
|
61 |
+
else:
|
62 |
+
chat = await response.json()
|
63 |
+
yield chat["choices"][0]["message"].get("content")
|
64 |
+
|
65 |
+
|
66 |
+
|
67 |
+
@classmethod
|
68 |
+
@property
|
69 |
+
def params(cls):
|
70 |
+
params = [
|
71 |
+
("model", "str"),
|
72 |
+
("messages", "list[dict[str, str]]"),
|
73 |
+
("stream", "bool"),
|
74 |
+
("proxy", "str"),
|
75 |
+
("temperature", "float"),
|
76 |
+
("top_p", "float"),
|
77 |
+
]
|
78 |
+
param = ", ".join([": ".join(p) for p in params])
|
79 |
+
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
g4f/g4f/Provider/You.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
|
5 |
+
from curl_cffi.requests import AsyncSession
|
6 |
+
|
7 |
+
from ..typing import AsyncGenerator
|
8 |
+
from .base_provider import AsyncGeneratorProvider, format_prompt
|
9 |
+
|
10 |
+
|
11 |
+
class You(AsyncGeneratorProvider):
|
12 |
+
url = "https://you.com"
|
13 |
+
working = True
|
14 |
+
supports_gpt_35_turbo = True
|
15 |
+
supports_stream = False
|
16 |
+
|
17 |
+
|
18 |
+
@classmethod
|
19 |
+
async def create_async_generator(
|
20 |
+
cls,
|
21 |
+
model: str,
|
22 |
+
messages: list[dict[str, str]],
|
23 |
+
proxy: str = None,
|
24 |
+
**kwargs,
|
25 |
+
) -> AsyncGenerator:
|
26 |
+
async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107") as session:
|
27 |
+
headers = {
|
28 |
+
"Accept": "text/event-stream",
|
29 |
+
"Referer": "https://you.com/search?fromSearchBar=true&tbm=youchat",
|
30 |
+
}
|
31 |
+
response = await session.get(
|
32 |
+
"https://you.com/api/streamingSearch",
|
33 |
+
params={"q": format_prompt(messages), "domain": "youchat", "chat": ""},
|
34 |
+
headers=headers
|
35 |
+
)
|
36 |
+
response.raise_for_status()
|
37 |
+
start = 'data: {"youChatToken": '
|
38 |
+
for line in response.text.splitlines():
|
39 |
+
if line.startswith(start):
|
40 |
+
yield json.loads(line[len(start): -1])
|
g4f/g4f/Provider/Yqcloud.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from aiohttp import ClientSession
|
4 |
+
|
5 |
+
from ..typing import AsyncGenerator
|
6 |
+
from .base_provider import AsyncGeneratorProvider, format_prompt
|
7 |
+
|
8 |
+
|
9 |
+
class Yqcloud(AsyncGeneratorProvider):
|
10 |
+
url = "https://chat9.yqcloud.top/"
|
11 |
+
working = True
|
12 |
+
supports_gpt_35_turbo = True
|
13 |
+
|
14 |
+
@staticmethod
|
15 |
+
async def create_async_generator(
|
16 |
+
model: str,
|
17 |
+
messages: list[dict[str, str]],
|
18 |
+
proxy: str = None,
|
19 |
+
**kwargs,
|
20 |
+
) -> AsyncGenerator:
|
21 |
+
async with ClientSession(
|
22 |
+
headers=_create_header()
|
23 |
+
) as session:
|
24 |
+
payload = _create_payload(messages)
|
25 |
+
async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response:
|
26 |
+
response.raise_for_status()
|
27 |
+
async for stream in response.content.iter_any():
|
28 |
+
if stream:
|
29 |
+
yield stream.decode()
|
30 |
+
|
31 |
+
|
32 |
+
def _create_header():
|
33 |
+
return {
|
34 |
+
"accept" : "application/json, text/plain, */*",
|
35 |
+
"content-type" : "application/json",
|
36 |
+
"origin" : "https://chat9.yqcloud.top",
|
37 |
+
}
|
38 |
+
|
39 |
+
|
40 |
+
def _create_payload(messages: list[dict[str, str]]):
|
41 |
+
return {
|
42 |
+
"prompt": format_prompt(messages),
|
43 |
+
"network": True,
|
44 |
+
"system": "",
|
45 |
+
"withoutContext": False,
|
46 |
+
"stream": True,
|
47 |
+
"userId": "#/chat/1693025544336"
|
48 |
+
}
|
g4f/g4f/Provider/__init__.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
from .Acytoo import Acytoo
|
3 |
+
from .Aibn import Aibn
|
4 |
+
from .Aichat import Aichat
|
5 |
+
from .Ails import Ails
|
6 |
+
from .AiService import AiService
|
7 |
+
from .AItianhu import AItianhu
|
8 |
+
from .AItianhuSpace import AItianhuSpace
|
9 |
+
from .Aivvm import Aivvm
|
10 |
+
from .Bard import Bard
|
11 |
+
from .Bing import Bing
|
12 |
+
from .ChatBase import ChatBase
|
13 |
+
from .ChatgptAi import ChatgptAi
|
14 |
+
from .ChatgptDuo import ChatgptDuo
|
15 |
+
from .ChatgptLogin import ChatgptLogin
|
16 |
+
from .CodeLinkAva import CodeLinkAva
|
17 |
+
from .DeepAi import DeepAi
|
18 |
+
from .DfeHub import DfeHub
|
19 |
+
from .EasyChat import EasyChat
|
20 |
+
from .Forefront import Forefront
|
21 |
+
from .GetGpt import GetGpt
|
22 |
+
from .GptGo import GptGo
|
23 |
+
from .H2o import H2o
|
24 |
+
from .HuggingChat import HuggingChat
|
25 |
+
from .Liaobots import Liaobots
|
26 |
+
from .Lockchat import Lockchat
|
27 |
+
from .Myshell import Myshell
|
28 |
+
from .Opchatgpts import Opchatgpts
|
29 |
+
from .OpenaiChat import OpenaiChat
|
30 |
+
from .OpenAssistant import OpenAssistant
|
31 |
+
from .PerplexityAi import PerplexityAi
|
32 |
+
from .Raycast import Raycast
|
33 |
+
from .Theb import Theb
|
34 |
+
from .Vercel import Vercel
|
35 |
+
from .Vitalentum import Vitalentum
|
36 |
+
from .Wewordle import Wewordle
|
37 |
+
from .Ylokh import Ylokh
|
38 |
+
from .You import You
|
39 |
+
from .Yqcloud import Yqcloud
|
40 |
+
from .Equing import Equing
|
41 |
+
from .FastGpt import FastGpt
|
42 |
+
from .V50 import V50
|
43 |
+
from .Wuguokai import Wuguokai
|
44 |
+
|
45 |
+
from .base_provider import BaseProvider, AsyncProvider, AsyncGeneratorProvider
|
46 |
+
from .retry_provider import RetryProvider
|
47 |
+
|
48 |
+
__all__ = [
|
49 |
+
'BaseProvider',
|
50 |
+
'AsyncProvider',
|
51 |
+
'AsyncGeneratorProvider',
|
52 |
+
'RetryProvider',
|
53 |
+
'Acytoo',
|
54 |
+
'Aibn',
|
55 |
+
'Aichat',
|
56 |
+
'Ails',
|
57 |
+
'AiService',
|
58 |
+
'AItianhu',
|
59 |
+
'AItianhuSpace',
|
60 |
+
'Aivvm',
|
61 |
+
'Bard',
|
62 |
+
'Bing',
|
63 |
+
'ChatBase',
|
64 |
+
'ChatgptAi',
|
65 |
+
'ChatgptDuo',
|
66 |
+
'ChatgptLogin',
|
67 |
+
'CodeLinkAva',
|
68 |
+
'DeepAi',
|
69 |
+
'DfeHub',
|
70 |
+
'EasyChat',
|
71 |
+
'Forefront',
|
72 |
+
'GetGpt',
|
73 |
+
'GptGo',
|
74 |
+
'H2o',
|
75 |
+
'HuggingChat',
|
76 |
+
'Liaobots',
|
77 |
+
'Lockchat',
|
78 |
+
'Myshell',
|
79 |
+
'Opchatgpts',
|
80 |
+
'Raycast',
|
81 |
+
'OpenaiChat',
|
82 |
+
'OpenAssistant',
|
83 |
+
'PerplexityAi',
|
84 |
+
'Theb',
|
85 |
+
'Vercel',
|
86 |
+
'Vitalentum',
|
87 |
+
'Wewordle',
|
88 |
+
'Ylokh',
|
89 |
+
'You',
|
90 |
+
'Yqcloud',
|
91 |
+
'Equing',
|
92 |
+
'FastGpt',
|
93 |
+
'Wuguokai',
|
94 |
+
'V50'
|
95 |
+
]
|
g4f/g4f/Provider/__pycache__/AItianhu.cpython-310.pyc
ADDED
Binary file (1.9 kB). View file
|
|
g4f/g4f/Provider/__pycache__/AItianhuSpace.cpython-310.pyc
ADDED
Binary file (2.9 kB). View file
|
|
g4f/g4f/Provider/__pycache__/Acytoo.cpython-310.pyc
ADDED
Binary file (1.72 kB). View file
|
|
g4f/g4f/Provider/__pycache__/AiService.cpython-310.pyc
ADDED
Binary file (1.57 kB). View file
|
|
g4f/g4f/Provider/__pycache__/Aibn.cpython-310.pyc
ADDED
Binary file (2.09 kB). View file
|
|
g4f/g4f/Provider/__pycache__/Aichat.cpython-310.pyc
ADDED
Binary file (1.88 kB). View file
|
|
g4f/g4f/Provider/__pycache__/Ails.cpython-310.pyc
ADDED
Binary file (3.78 kB). View file
|
|