Skip to content

Commit

Permalink
resolve conflict.
Browse files Browse the repository at this point in the history
  • Loading branch information
GarfieldDai committed Jan 2, 2024
2 parents ed6c031 + f1c8364 commit c785cfe
Show file tree
Hide file tree
Showing 29 changed files with 1,096 additions and 130 deletions.
15 changes: 15 additions & 0 deletions api/.vscode/launch.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,21 @@
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Python: Celery",
"type": "python",
"request": "launch",
"module": "celery",
"justMyCode": true,
"args": ["-A", "app.celery", "worker", "-P", "gevent", "-c", "1", "--loglevel", "info", "-Q", "dataset,generation,mail"],
"envFile": "${workspaceFolder}/.env",
"env": {
"FLASK_APP": "app.py",
"FLASK_DEBUG": "1",
"GEVENT_SUPPORT": "True"
},
"console": "integratedTerminal"
},
{
"name": "Python: Flask",
"type": "python",
Expand Down
18 changes: 4 additions & 14 deletions api/core/model_runtime/model_providers/chatglm/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,21 +81,11 @@ def validate_credentials(self, model: str, credentials: dict) -> None:
:return:
"""
try:
response = post(join(credentials['api_base'], "v1/chat/completions"), data=dumps({
"model": model,
"messages": [
{
"role": "user",
"content": "ping"
}
],
}),
headers={
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0"
self._invoke(model=model, credentials=credentials, prompt_messages=[
UserPromptMessage(content="ping"),
], model_parameters={
"max_tokens": 16,
})
if response.status_code != 200:
raise CredentialsValidateFailedError("Invalid credentials")
except Exception as e:
raise CredentialsValidateFailedError(str(e))

Expand Down
5 changes: 4 additions & 1 deletion api/tests/integration_tests/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -60,4 +60,7 @@ LOCALAI_SERVER_URL=
COHERE_API_KEY=

# Jina Credentials
JINA_API_KEY=
JINA_API_KEY=

# Mock Switch
MOCK_SWITCH=false
68 changes: 68 additions & 0 deletions api/tests/integration_tests/model_runtime/__mock/anthropic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
import anthropic
from anthropic import Anthropic
from anthropic.resources.completions import Completions
from anthropic.types import completion_create_params, Completion
from anthropic._types import NOT_GIVEN, NotGiven, Headers, Query, Body

from _pytest.monkeypatch import MonkeyPatch

from typing import List, Union, Literal, Any, Generator
from time import sleep

import pytest
import os

MOCK = os.getenv('MOCK_SWITCH', 'false') == 'true'

class MockAnthropicClass(object):
@staticmethod
def mocked_anthropic_chat_create_sync(model: str) -> Completion:
return Completion(
completion='hello, I\'m a chatbot from anthropic',
model=model,
stop_reason='stop_sequence'
)

@staticmethod
def mocked_anthropic_chat_create_stream(model: str) -> Generator[Completion, None, None]:
full_response_text = "hello, I'm a chatbot from anthropic"

for i in range(0, len(full_response_text) + 1):
sleep(0.1)
if i == len(full_response_text):
yield Completion(
completion='',
model=model,
stop_reason='stop_sequence'
)
else:
yield Completion(
completion=full_response_text[i],
model=model,
stop_reason=''
)

def mocked_anthropic(self: Completions, *,
max_tokens_to_sample: int,
model: Union[str, Literal["claude-2.1", "claude-instant-1"]],
prompt: str,
stream: Literal[True],
**kwargs: Any
) -> Union[Completion, Generator[Completion, None, None]]:
if len(self._client.api_key) < 18:
raise anthropic.AuthenticationError('Invalid API key')

if stream:
return MockAnthropicClass.mocked_anthropic_chat_create_stream(model=model)
else:
return MockAnthropicClass.mocked_anthropic_chat_create_sync(model=model)

@pytest.fixture
def setup_anthropic_mock(request, monkeypatch: MonkeyPatch):
if MOCK:
monkeypatch.setattr(Completions, 'create', MockAnthropicClass.mocked_anthropic)

yield

if MOCK:
monkeypatch.undo()
127 changes: 127 additions & 0 deletions api/tests/integration_tests/model_runtime/__mock/google.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
from google.generativeai import GenerativeModel
from google.generativeai.types import GenerateContentResponse
from google.generativeai.types.generation_types import BaseGenerateContentResponse
import google.generativeai.types.generation_types as generation_config_types
import google.generativeai.types.content_types as content_types
import google.generativeai.types.safety_types as safety_types
from google.generativeai.client import _ClientManager, configure

from google.ai import generativelanguage as glm

from typing import Generator, List
from _pytest.monkeypatch import MonkeyPatch

import pytest

current_api_key = ''

class MockGoogleResponseClass(object):
_done = False

def __iter__(self):
full_response_text = 'it\'s google!'

for i in range(0, len(full_response_text) + 1, 1):
if i == len(full_response_text):
self._done = True
yield GenerateContentResponse(
done=True,
iterator=None,
result=glm.GenerateContentResponse({

}),
chunks=[]
)
else:
yield GenerateContentResponse(
done=False,
iterator=None,
result=glm.GenerateContentResponse({

}),
chunks=[]
)

class MockGoogleResponseCandidateClass(object):
finish_reason = 'stop'

class MockGoogleClass(object):
@staticmethod
def generate_content_sync() -> GenerateContentResponse:
return GenerateContentResponse(
done=True,
iterator=None,
result=glm.GenerateContentResponse({

}),
chunks=[]
)

@staticmethod
def generate_content_stream() -> Generator[GenerateContentResponse, None, None]:
return MockGoogleResponseClass()

def generate_content(self: GenerativeModel,
contents: content_types.ContentsType,
*,
generation_config: generation_config_types.GenerationConfigType | None = None,
safety_settings: safety_types.SafetySettingOptions | None = None,
stream: bool = False,
**kwargs,
) -> GenerateContentResponse:
global current_api_key

if len(current_api_key) < 16:
raise Exception('Invalid API key')

if stream:
return MockGoogleClass.generate_content_stream()

return MockGoogleClass.generate_content_sync()

@property
def generative_response_text(self) -> str:
return 'it\'s google!'

@property
def generative_response_candidates(self) -> List[MockGoogleResponseCandidateClass]:
return [MockGoogleResponseCandidateClass()]

def make_client(self: _ClientManager, name: str):
global current_api_key

if name.endswith("_async"):
name = name.split("_")[0]
cls = getattr(glm, name.title() + "ServiceAsyncClient")
else:
cls = getattr(glm, name.title() + "ServiceClient")

# Attempt to configure using defaults.
if not self.client_config:
configure()

client_options = self.client_config.get("client_options", None)
if client_options:
current_api_key = client_options.api_key

def nop(self, *args, **kwargs):
pass

original_init = cls.__init__
cls.__init__ = nop
client: glm.GenerativeServiceClient = cls(**self.client_config)
cls.__init__ = original_init

if not self.default_metadata:
return client

@pytest.fixture
def setup_google_mock(request, monkeypatch: MonkeyPatch):
monkeypatch.setattr(BaseGenerateContentResponse, "text", MockGoogleClass.generative_response_text)
monkeypatch.setattr(BaseGenerateContentResponse, "candidates", MockGoogleClass.generative_response_candidates)
monkeypatch.setattr(GenerativeModel, "generate_content", MockGoogleClass.generate_content)
monkeypatch.setattr(_ClientManager, "make_client", MockGoogleClass.make_client)

yield

monkeypatch.undo()
63 changes: 63 additions & 0 deletions api/tests/integration_tests/model_runtime/__mock/openai.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
from tests.integration_tests.model_runtime.__mock.openai_completion import MockCompletionsClass
from tests.integration_tests.model_runtime.__mock.openai_chat import MockChatClass
from tests.integration_tests.model_runtime.__mock.openai_remote import MockModelClass
from tests.integration_tests.model_runtime.__mock.openai_moderation import MockModerationClass
from tests.integration_tests.model_runtime.__mock.openai_speech2text import MockSpeech2TextClass
from tests.integration_tests.model_runtime.__mock.openai_embeddings import MockEmbeddingsClass
from openai.resources.completions import Completions
from openai.resources.chat import Completions as ChatCompletions
from openai.resources.models import Models
from openai.resources.moderations import Moderations
from openai.resources.audio.transcriptions import Transcriptions
from openai.resources.embeddings import Embeddings

# import monkeypatch
from _pytest.monkeypatch import MonkeyPatch
from typing import Literal, Callable, List

import os
import pytest

def mock_openai(monkeypatch: MonkeyPatch, methods: List[Literal["completion", "chat", "remote", "moderation", "speech2text", "text_embedding"]]) -> Callable[[], None]:
"""
mock openai module
:param monkeypatch: pytest monkeypatch fixture
:return: unpatch function
"""
def unpatch() -> None:
monkeypatch.undo()

if "completion" in methods:
monkeypatch.setattr(Completions, "create", MockCompletionsClass.completion_create)

if "chat" in methods:
monkeypatch.setattr(ChatCompletions, "create", MockChatClass.chat_create)

if "remote" in methods:
monkeypatch.setattr(Models, "list", MockModelClass.list)

if "moderation" in methods:
monkeypatch.setattr(Moderations, "create", MockModerationClass.moderation_create)

if "speech2text" in methods:
monkeypatch.setattr(Transcriptions, "create", MockSpeech2TextClass.speech2text_create)

if "text_embedding" in methods:
monkeypatch.setattr(Embeddings, "create", MockEmbeddingsClass.create_embeddings)

return unpatch


MOCK = os.getenv('MOCK_SWITCH', 'false').lower() == 'true'

@pytest.fixture
def setup_openai_mock(request, monkeypatch):
methods = request.param if hasattr(request, 'param') else []
if MOCK:
unpatch = mock_openai(monkeypatch, methods=methods)

yield

if MOCK:
unpatch()
Loading

0 comments on commit c785cfe

Please sign in to comment.