summaryrefslogtreecommitdiffstats
path: root/g4f
diff options
context:
space:
mode:
authorkqlio67 <kqlio67@users.noreply.github.com>2024-10-21 20:45:40 +0200
committerkqlio67 <kqlio67@users.noreply.github.com>2024-10-21 20:45:40 +0200
commit7c666082bdccb2c0e4b90a4740f7e48c4f4bf478 (patch)
tree933f86b1487ce45584e0f879e688c75f2db45198 /g4f
parentRestored provider (g4f/Provider/nexra/NexraChatGPT4o.py) (diff)
downloadgpt4free-7c666082bdccb2c0e4b90a4740f7e48c4f4bf478.tar
gpt4free-7c666082bdccb2c0e4b90a4740f7e48c4f4bf478.tar.gz
gpt4free-7c666082bdccb2c0e4b90a4740f7e48c4f4bf478.tar.bz2
gpt4free-7c666082bdccb2c0e4b90a4740f7e48c4f4bf478.tar.lz
gpt4free-7c666082bdccb2c0e4b90a4740f7e48c4f4bf478.tar.xz
gpt4free-7c666082bdccb2c0e4b90a4740f7e48c4f4bf478.tar.zst
gpt4free-7c666082bdccb2c0e4b90a4740f7e48c4f4bf478.zip
Diffstat (limited to 'g4f')
-rw-r--r--g4f/Provider/nexra/NexraBing.py3
-rw-r--r--g4f/Provider/nexra/NexraBlackbox.py6
-rw-r--r--g4f/Provider/nexra/NexraChatGPT.py3
-rw-r--r--g4f/Provider/nexra/NexraChatGptV2.py3
-rw-r--r--g4f/Provider/nexra/NexraChatGptWeb.py3
5 files changed, 12 insertions, 6 deletions
diff --git a/g4f/Provider/nexra/NexraBing.py b/g4f/Provider/nexra/NexraBing.py
index 755bedd5..b7e8f73a 100644
--- a/g4f/Provider/nexra/NexraBing.py
+++ b/g4f/Provider/nexra/NexraBing.py
@@ -38,6 +38,7 @@ class NexraBing(AbstractProvider, ProviderModelMixin):
model: str,
messages: Messages,
stream: bool,
+ markdown: bool = False,
**kwargs
) -> CreateResult:
model = cls.get_model(model)
@@ -54,7 +55,7 @@ class NexraBing(AbstractProvider, ProviderModelMixin):
}
],
"conversation_style": model,
- "markdown": False,
+ "markdown": markdown,
"stream": stream,
"model": "Bing"
}
diff --git a/g4f/Provider/nexra/NexraBlackbox.py b/g4f/Provider/nexra/NexraBlackbox.py
index 1b316803..cbe26584 100644
--- a/g4f/Provider/nexra/NexraBlackbox.py
+++ b/g4f/Provider/nexra/NexraBlackbox.py
@@ -33,6 +33,8 @@ class NexraBlackbox(AbstractProvider, ProviderModelMixin):
model: str,
messages: Messages,
stream: bool,
+ markdown: bool = False,
+ websearch: bool = False,
**kwargs
) -> CreateResult:
model = cls.get_model(model)
@@ -48,9 +50,9 @@ class NexraBlackbox(AbstractProvider, ProviderModelMixin):
"content": format_prompt(messages)
}
],
- "websearch": False,
+ "websearch": websearch,
"stream": stream,
- "markdown": False,
+ "markdown": markdown,
"model": model
}
diff --git a/g4f/Provider/nexra/NexraChatGPT.py b/g4f/Provider/nexra/NexraChatGPT.py
index b9592aac..4039c17e 100644
--- a/g4f/Provider/nexra/NexraChatGPT.py
+++ b/g4f/Provider/nexra/NexraChatGPT.py
@@ -56,6 +56,7 @@ class NexraChatGPT(AbstractProvider, ProviderModelMixin):
cls,
model: str,
messages: Messages,
+ markdown: bool = False,
**kwargs
) -> CreateResult:
model = cls.get_model(model)
@@ -68,7 +69,7 @@ class NexraChatGPT(AbstractProvider, ProviderModelMixin):
"messages": [],
"prompt": format_prompt(messages),
"model": model,
- "markdown": False
+ "markdown": markdown
}
response = requests.post(cls.api_endpoint, headers=headers, json=data)
diff --git a/g4f/Provider/nexra/NexraChatGptV2.py b/g4f/Provider/nexra/NexraChatGptV2.py
index ed40f070..98e98008 100644
--- a/g4f/Provider/nexra/NexraChatGptV2.py
+++ b/g4f/Provider/nexra/NexraChatGptV2.py
@@ -33,6 +33,7 @@ class NexraChatGptV2(AbstractProvider, ProviderModelMixin):
model: str,
messages: Messages,
stream: bool,
+ markdown: bool = False,
**kwargs
) -> CreateResult:
model = cls.get_model(model)
@@ -49,7 +50,7 @@ class NexraChatGptV2(AbstractProvider, ProviderModelMixin):
}
],
"stream": stream,
- "markdown": False,
+ "markdown": markdown,
"model": model
}
diff --git a/g4f/Provider/nexra/NexraChatGptWeb.py b/g4f/Provider/nexra/NexraChatGptWeb.py
index 653c8904..258ce7f5 100644
--- a/g4f/Provider/nexra/NexraChatGptWeb.py
+++ b/g4f/Provider/nexra/NexraChatGptWeb.py
@@ -31,6 +31,7 @@ class NexraChatGptWeb(AbstractProvider, ProviderModelMixin):
cls,
model: str,
messages: Messages,
+ markdown: bool = False,
**kwargs
) -> CreateResult:
model = cls.get_model(model)
@@ -42,7 +43,7 @@ class NexraChatGptWeb(AbstractProvider, ProviderModelMixin):
data = {
"prompt": format_prompt(messages),
- "markdown": False
+ "markdown": markdown
}
response = requests.post(api_endpoint, headers=headers, json=data)