anon4 commited on
Commit
697e9a8
1 Parent(s): dcf4d64

Create app.js

Browse files
Files changed (1) hide show
  1. app.js +191 -0
app.js ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ const express = require("express");
2
+ const app = express();
3
+ const port = 3000;
4
+
5
+ app.use(express.json());
6
+
7
+ app.get("//models", async (req, res) => {
8
+ try {
9
+ res.status(200).json({ object: "list", data: createFakeModelsList() });
10
+ } catch {}
11
+ });
12
+
13
+ app.post("//chat/completions", async (clientRequest, clientResponse) => {
14
+ try {
15
+ const {
16
+ frequency_penalty,
17
+ presence_penalty,
18
+ max_tokens,
19
+ stop,
20
+ temperature,
21
+ top_p,
22
+ } = clientRequest.body;
23
+
24
+ const apiRequestBody = {
25
+ model: "gpt-4",
26
+ prompt: convertChatMLPrompt(clientRequest.body.messages),
27
+ frequency_penalty,
28
+ presence_penalty,
29
+ max_tokens,
30
+ stop,
31
+ temperature,
32
+ top_p,
33
+ };
34
+
35
+ const apiResponse = await fetch(process.env.API_URL, {
36
+ method: "POST",
37
+ headers: {
38
+ "Content-Type": "application/json",
39
+ },
40
+ body: JSON.stringify(apiRequestBody),
41
+ });
42
+
43
+ if (clientRequest.body.stream) {
44
+ handleResponseAsStream(clientResponse, apiResponse);
45
+ } else {
46
+ handleResponseAsNonStreamable(clientResponse, apiResponse);
47
+ }
48
+ } catch {}
49
+ });
50
+
51
+ app.listen(port, () => {
52
+ console.log(`Example app listening on port ${port}`);
53
+ });
54
+
55
+ async function handleResponseAsNonStreamable(clientResponse, apiResponse) {
56
+ const apiText = await apiResponse.text();
57
+ const clientMessage = createClientMessage(apiText);
58
+ clientResponse.send(JSON.stringify(clientMessage));
59
+ }
60
+
61
+ async function handleResponseAsStream(clientResponse, apiResponse) {
62
+ const reader = apiResponse.body.getReader();
63
+ const nextDecoder = new TextDecoder();
64
+
65
+ clientResponse.write("data: " + JSON.stringify(createBeginChunk()) + "\n\n");
66
+
67
+ new ReadableStream({
68
+ start(controller) {
69
+ return pump();
70
+ function pump() {
71
+ return reader.read().then(({ done, value }) => {
72
+ const textData = nextDecoder.decode(value);
73
+
74
+ clientResponse.write(
75
+ "data: " + JSON.stringify(createMessageChunk(textData)) + "\n\n"
76
+ );
77
+
78
+ // When no more data needs to be consumed, close the stream
79
+ if (done) {
80
+ clientResponse.write(
81
+ "data: " + JSON.stringify(createEndChunk()) + "\n\n"
82
+ );
83
+ clientResponse.end();
84
+ controller.close();
85
+ return;
86
+ }
87
+ // Enqueue the next data chunk into our target stream
88
+ controller.enqueue(value);
89
+ return pump();
90
+ });
91
+ }
92
+ },
93
+ });
94
+ }
95
+
96
+ function getCurrentDate() {
97
+ return Math.floor(new Date().getTime());
98
+ }
99
+
100
+ function convertChatMLPrompt(messages) {
101
+ const messageStrings = [];
102
+ messages.forEach((m) => {
103
+ if (m.role === "system" && m.name === undefined) {
104
+ messageStrings.push("System: " + m.content);
105
+ } else if (m.role === "system" && m.name !== undefined) {
106
+ messageStrings.push(m.name + ": " + m.content);
107
+ } else {
108
+ messageStrings.push(m.role + ": " + m.content);
109
+ }
110
+ });
111
+ return messageStrings.join("\n") + "\nassistant:";
112
+ }
113
+
114
+ const createClientMessage = (text) => ({
115
+ id: "chatcmpl-123",
116
+ object: "chat.completion",
117
+ created: getCurrentDate(),
118
+ model: "gpt-4",
119
+ choices: [
120
+ {
121
+ index: 0,
122
+ message: { role: "assistant", content: text },
123
+ logprobs: null,
124
+ finish_reason: "stop",
125
+ },
126
+ ],
127
+ });
128
+
129
+ const createBeginChunk = () => ({
130
+ id: "chatcmpl-123",
131
+ object: "chat.completion.chunk",
132
+ created: getCurrentDate(),
133
+ model: "gpt-4",
134
+ system_fingerprint: "",
135
+ choices: [
136
+ {
137
+ index: 0,
138
+ delta: { role: "assistant", content: "" },
139
+ logprobs: null,
140
+ finish_reason: null,
141
+ },
142
+ ],
143
+ });
144
+
145
+ const createMessageChunk = (text) => ({
146
+ id: "chatcmpl-123",
147
+ object: "chat.completion.chunk",
148
+ created: getCurrentDate(),
149
+ model: "gpt-4",
150
+ system_fingerprint: "",
151
+ choices: [
152
+ {
153
+ index: 0,
154
+ delta: { content: text },
155
+ logprobs: null,
156
+ finish_reason: null,
157
+ },
158
+ ],
159
+ });
160
+
161
+ const createEndChunk = () => ({
162
+ id: "chatcmpl-123",
163
+ object: "chat.completion.chunk",
164
+ created: getCurrentDate(),
165
+ model: "gpt-4",
166
+ system_fingerprint: "",
167
+ choices: [{ index: 0, delta: {}, logprobs: null, finish_reason: "stop" }],
168
+ });
169
+
170
+ function createFakeModelsList() {
171
+ return [
172
+ {
173
+ id: "gpt-4",
174
+ object: "model",
175
+ created: getCurrentDate(),
176
+ owned_by: "openai",
177
+ permission: [
178
+ {
179
+ id: "modelperm-gpt-4",
180
+ object: "model_permission",
181
+ created: getCurrentDate(),
182
+ organization: "*",
183
+ group: null,
184
+ is_blocking: false,
185
+ },
186
+ ],
187
+ root: "gpt-4",
188
+ parent: null,
189
+ },
190
+ ];
191
+ }