summaryrefslogtreecommitdiffstats
path: root/g4f/Provider/GPROChat.py
diff options
context:
space:
mode:
authorkqlio67 <kqlio67@users.noreply.github.com>2024-11-06 16:25:09 +0100
committerkqlio67 <kqlio67@users.noreply.github.com>2024-11-06 16:25:09 +0100
commite98793d0a7af43878cf023fb045dd945a82507cf (patch)
tree205f2318755db4c7ad41a6d13e735c5d48e1450b /g4f/Provider/GPROChat.py
parentUpdate (g4f/Provider/DeepInfra.py g4f/Provider/__init__.py g4f/Provider/needs_auth/) (diff)
downloadgpt4free-e98793d0a7af43878cf023fb045dd945a82507cf.tar
gpt4free-e98793d0a7af43878cf023fb045dd945a82507cf.tar.gz
gpt4free-e98793d0a7af43878cf023fb045dd945a82507cf.tar.bz2
gpt4free-e98793d0a7af43878cf023fb045dd945a82507cf.tar.lz
gpt4free-e98793d0a7af43878cf023fb045dd945a82507cf.tar.xz
gpt4free-e98793d0a7af43878cf023fb045dd945a82507cf.tar.zst
gpt4free-e98793d0a7af43878cf023fb045dd945a82507cf.zip
Diffstat (limited to 'g4f/Provider/GPROChat.py')
-rw-r--r--g4f/Provider/GPROChat.py67
1 files changed, 0 insertions, 67 deletions
diff --git a/g4f/Provider/GPROChat.py b/g4f/Provider/GPROChat.py
deleted file mode 100644
index a33c9571..00000000
--- a/g4f/Provider/GPROChat.py
+++ /dev/null
@@ -1,67 +0,0 @@
-from __future__ import annotations
-import hashlib
-import time
-from aiohttp import ClientSession
-from ..typing import AsyncResult, Messages
-from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
-from .helper import format_prompt
-
-class GPROChat(AsyncGeneratorProvider, ProviderModelMixin):
- label = "GPROChat"
- url = "https://gprochat.com"
- api_endpoint = "https://gprochat.com/api/generate"
- working = True
- supports_stream = True
- supports_message_history = True
- default_model = 'gemini-pro'
-
- @staticmethod
- def generate_signature(timestamp: int, message: str) -> str:
- secret_key = "2BC120D4-BB36-1B60-26DE-DB630472A3D8"
- hash_input = f"{timestamp}:{message}:{secret_key}"
- signature = hashlib.sha256(hash_input.encode('utf-8')).hexdigest()
- return signature
-
- @classmethod
- def get_model(cls, model: str) -> str:
- if model in cls.models:
- return model
- elif model in cls.model_aliases:
- return cls.model_aliases[model]
- else:
- return cls.default_model
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: Messages,
- proxy: str = None,
- **kwargs
- ) -> AsyncResult:
- model = cls.get_model(model)
- timestamp = int(time.time() * 1000)
- prompt = format_prompt(messages)
- sign = cls.generate_signature(timestamp, prompt)
-
- headers = {
- "accept": "*/*",
- "origin": cls.url,
- "referer": f"{cls.url}/",
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
- "content-type": "text/plain;charset=UTF-8"
- }
-
- data = {
- "messages": [{"role": "user", "parts": [{"text": prompt}]}],
- "time": timestamp,
- "pass": None,
- "sign": sign
- }
-
- async with ClientSession(headers=headers) as session:
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
- response.raise_for_status()
- async for chunk in response.content.iter_any():
- if chunk:
- yield chunk.decode()