From 5db58fd87f230fbe5bae599bb4b120ab42cad3be Mon Sep 17 00:00:00 2001
From: "t.me/xtekky" <98614666+xtekky@users.noreply.github.com>
Date: Sat, 24 Jun 2023 03:47:00 +0200
Subject: gpt4free v2, first release

---
 interference/app.py | 86 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 86 insertions(+)
 create mode 100644 interference/app.py

(limited to 'interference/app.py')

diff --git a/interference/app.py b/interference/app.py
new file mode 100644
index 00000000..afe15df7
--- /dev/null
+++ b/interference/app.py
@@ -0,0 +1,86 @@
+import os
+import time
+import json
+import random
+
+from g4f import Model, ChatCompletion, Provider
+from flask import Flask, request, Response
+from flask_cors import CORS
+
+app = Flask(__name__)
+CORS(app)
+
+@app.route("/chat/completions", methods=['POST'])
+def chat_completions():
+    streaming = request.json.get('stream', False)
+    model = request.json.get('model', 'gpt-3.5-turbo')
+    messages = request.json.get('messages')
+    
+    response = ChatCompletion.create(model=model, stream=streaming,
+                                     messages=messages)
+    
+    if not streaming:
+        while 'curl_cffi.requests.errors.RequestsError' in response:
+            response = ChatCompletion.create(model=model, stream=streaming,
+                                             messages=messages)
+
+        completion_timestamp = int(time.time())
+        completion_id = ''.join(random.choices(
+            'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
+
+        return {
+            'id': 'chatcmpl-%s' % completion_id,
+            'object': 'chat.completion',
+            'created': completion_timestamp,
+            'model': model,
+            'usage': {
+                'prompt_tokens': None,
+                'completion_tokens': None,
+                'total_tokens': None
+            },
+            'choices': [{
+                'message': {
+                    'role': 'assistant',
+                    'content': response
+                },
+                'finish_reason': 'stop',
+                'index': 0
+            }]
+        }
+
+    def stream():
+        for token in response:
+            completion_timestamp = int(time.time())
+            completion_id = ''.join(random.choices(
+                'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
+
+            completion_data = {
+                'id': f'chatcmpl-{completion_id}',
+                'object': 'chat.completion.chunk',
+                'created': completion_timestamp,
+                'model': 'gpt-3.5-turbo-0301',
+                'choices': [
+                    {
+                        'delta': {
+                            'content': token
+                        },
+                        'index': 0,
+                        'finish_reason': None
+                    }
+                ]
+            }
+
+            yield 'data: %s\n\n' % json.dumps(completion_data, separators=(',' ':'))
+            time.sleep(0.1)
+
+    return app.response_class(stream(), mimetype='text/event-stream')
+
+
+if __name__ == '__main__':
+    config = {
+        'host': '0.0.0.0',
+        'port': 1337,
+        'debug': True
+    }
+
+    app.run(**config)
-- 
cgit v1.2.3