Skip to content

Commit

Permalink
quality: Comply with Ruff
Browse files Browse the repository at this point in the history
  • Loading branch information
clemlesne committed Sep 5, 2024
1 parent b47504f commit cdab881
Show file tree
Hide file tree
Showing 42 changed files with 310 additions and 362 deletions.
10 changes: 5 additions & 5 deletions examples/blocklist.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -63,14 +63,14 @@
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"from azure.ai.contentsafety import BlocklistClient\n",
"from azure.ai.contentsafety.models import (\n",
" AddOrUpdateTextBlocklistItemsOptions,\n",
" TextBlocklist,\n",
" TextBlocklistItem,\n",
" AddOrUpdateTextBlocklistItemsOptions,\n",
")\n",
"from azure.core.credentials import AzureKeyCredential\n",
"import pandas as pd\n",
"\n",
"key = AzureKeyCredential(\"xxx\")\n",
"client = BlocklistClient(\n",
Expand Down Expand Up @@ -102,10 +102,10 @@
"blocklists = client.list_text_blocklists()\n",
"\n",
"if not blocklists:\n",
" print(\"There are no blocklists.\")\n",
" print(\"There are no blocklists.\") # noqa: T201\n",
"\n",
"for blocklist in blocklists:\n",
" print(f\"{blocklist.blocklist_name}: {blocklist.description}\")"
" print(f\"{blocklist.blocklist_name}: {blocklist.description}\") # noqa: T201"
]
},
{
Expand Down Expand Up @@ -135,7 +135,7 @@
" block_items[row[\"blocklist\"]].append(text)\n",
"\n",
"for blocklist, words in block_items.items():\n",
" print(f\"Creating blocklist {blocklist} with {len(words)} words\")\n",
" print(f\"Creating blocklist {blocklist} with {len(words)} words\") # noqa: T201\n",
" client.create_or_update_text_blocklist(\n",
" blocklist_name=blocklist,\n",
" options=TextBlocklist(blocklist_name=blocklist),\n",
Expand Down
51 changes: 22 additions & 29 deletions function_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
from datetime import timedelta
from http import HTTPStatus
from os import getenv
from typing import Any, Optional, Union
from urllib.parse import quote_plus, urljoin
from uuid import UUID

Expand Down Expand Up @@ -66,7 +65,7 @@
) # pyright: ignore

# Azure Communication Services
_automation_client: Optional[CallAutomationClient] = None
_automation_client: CallAutomationClient | None = None
_source_caller = PhoneNumberIdentifier(CONFIG.communication_services.phone_number)
logger.info("Using phone number %s", CONFIG.communication_services.phone_number)
_communication_services_jwks_client = jwt.PyJWKClient(
Expand Down Expand Up @@ -97,18 +96,19 @@
methods=["GET"],
)
@tracer.start_as_current_span("openapi_get")
async def openapi_get(req: func.HttpRequest) -> func.HttpResponse:
async def openapi_get(
req: func.HttpRequest, # noqa: ARG001
) -> func.HttpResponse:
"""
Generate the OpenAPI specification for the API.
No parameters are expected.
Returns a JSON object with the OpenAPI specification.
"""
with open(
with open( # noqa: ASYNC230
encoding="utf-8",
file=resources_dir("openapi.json"),
mode="r",
) as f:
openapi = json.load(f)
openapi["info"]["version"] = CONFIG.version
Expand All @@ -130,7 +130,9 @@ async def openapi_get(req: func.HttpRequest) -> func.HttpResponse:
methods=["GET"],
)
@tracer.start_as_current_span("health_liveness_get")
async def health_liveness_get(req: func.HttpRequest) -> func.HttpResponse:
async def health_liveness_get(
req: func.HttpRequest, # noqa: ARG001
) -> func.HttpResponse:
"""
Check if the service is running.
Expand All @@ -146,7 +148,9 @@ async def health_liveness_get(req: func.HttpRequest) -> func.HttpResponse:
methods=["GET"],
)
@tracer.start_as_current_span("health_readiness_get")
async def health_readiness_get(req: func.HttpRequest) -> func.HttpResponse:
async def health_readiness_get(
req: func.HttpRequest, # noqa: ARG001
) -> func.HttpResponse:
"""
Check if the service is ready to serve requests.
Expand Down Expand Up @@ -415,9 +419,7 @@ async def call_post(req: func.HttpRequest) -> func.HttpResponse:
cognitive_services_endpoint=CONFIG.cognitive_service.endpoint,
source_caller_id_number=_source_caller,
# deepcode ignore AttributeLoadOnNone: Phone number is validated with Pydantic
target_participant=PhoneNumberIdentifier(
initiate.phone_number
), # pyright: ignore
target_participant=PhoneNumberIdentifier(initiate.phone_number), # pyright: ignore
)
logger.info(
"Created call with connection id: %s",
Expand Down Expand Up @@ -556,7 +558,7 @@ async def communicationservices_event_post(
Returns a 204 No Content if the events are properly fomatted. A 401 Unauthorized if the JWT token is invalid. Otherwise, returns a 400 Bad Request.
"""
# Validate JWT token
service_jwt: Union[str, None] = req.headers.get("Authorization")
service_jwt: str | None = req.headers.get("Authorization")
if not service_jwt:
return _standard_error(
message="Authorization header missing",
Expand Down Expand Up @@ -614,7 +616,8 @@ async def communicationservices_event_post(
return func.HttpResponse(status_code=HTTPStatus.NO_CONTENT)


async def _communicationservices_event_worker(
# TODO: Refacto, too long (and remove PLR0912/PLR0915 ignore)
async def _communicationservices_event_worker( # noqa: PLR0912, PLR0915
call_id: UUID,
event_dict: dict,
post: func.Out[str],
Expand Down Expand Up @@ -688,7 +691,7 @@ async def _trainings_callback(_call: CallStateModel) -> None:
recognition_result: str = event.data["recognitionType"]

if recognition_result == "speech": # Handle voice
speech_text: Optional[str] = event.data["speechResult"]["speech"]
speech_text: str | None = event.data["speechResult"]["speech"]
if speech_text:
await on_speech_recognized(
call=call,
Expand Down Expand Up @@ -835,7 +838,7 @@ def _trigger_post_event(


async def _communicationservices_event_url(
phone_number: PhoneNumber, initiate: Optional[CallInitiateModel] = None
phone_number: PhoneNumber, initiate: CallInitiateModel | None = None
) -> tuple[str, CallStateModel]:
"""
Generate the callback URL for a call.
Expand Down Expand Up @@ -933,7 +936,7 @@ async def _trainings_callback(_call: CallStateModel) -> None:
)


def _str_to_contexts(value: Optional[str]) -> Optional[set[CallContextEnum]]:
def _str_to_contexts(value: str | None) -> set[CallContextEnum] | None:
"""
Convert a string to a set of contexts.
Expand Down Expand Up @@ -965,12 +968,7 @@ def _validation_error(
Response body is a JSON object with the following structure:
```
{
"error": {
"message": "Validation error",
"details": ["Error message"]
}
}
{"error": {"message": "Validation error", "details": ["Error message"]}}
```
Returns a 400 Bad Request with a JSON body.
Expand All @@ -991,7 +989,7 @@ def _validation_error(

def _standard_error(
message: str,
details: Optional[list[str]] = None,
details: list[str] | None = None,
status_code: HTTPStatus = HTTPStatus.BAD_REQUEST,
) -> func.HttpResponse:
"""
Expand All @@ -1000,12 +998,7 @@ def _standard_error(
Response body is a JSON object with the following structure:
```
{
"error": {
"message": "Error message",
"details": ["Error details"]
}
}
{"error": {"message": "Error message", "details": ["Error details"]}}
```
Returns a JOSN with a JSON body and the specified status code.
Expand All @@ -1031,7 +1024,7 @@ async def _use_automation_client() -> CallAutomationClient:
Returns a `CallAutomationClient` instance.
"""
global _automation_client # pylint: disable=global-statement
global _automation_client # noqa: PLW0603
if not isinstance(_automation_client, CallAutomationClient):
_automation_client = CallAutomationClient(
# Deployment
Expand Down
4 changes: 2 additions & 2 deletions helpers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@
def init_env():
path = find_dotenv()
if not path:
print("Env file not found")
print("Env file not found") # noqa: T201
return
load_dotenv(path)
print(f'Env file loaded from "{path}"')
print(f'Env file loaded from "{path}"') # noqa: T201


init_env()
35 changes: 19 additions & 16 deletions helpers/call_events.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import asyncio
from typing import Awaitable, Callable, Optional
from collections.abc import Awaitable, Callable

from azure.communication.callautomation import DtmfTone, RecognitionChoice
from azure.communication.callautomation.aio import CallAutomationClient
Expand Down Expand Up @@ -154,7 +154,7 @@ async def on_speech_recognized(
async def on_recognize_timeout_error(
call: CallStateModel,
client: CallAutomationClient,
contexts: Optional[set[CallContextEnum]],
contexts: set[CallContextEnum] | None,
) -> None:
if (
contexts and CallContextEnum.IVR_LANG_SELECT in contexts
Expand Down Expand Up @@ -224,7 +224,8 @@ async def on_recognize_unknown_error(
) -> None:
span_attribute(CallAttributes.CALL_CHANNEL, "voice")

if error_code == 8511: # Failure while trying to play the prompt
if error_code == 8511: # noqa: PLR2004
# Failure while trying to play the prompt
logger.warning("Failed to play prompt")
else:
logger.warning(
Expand All @@ -245,7 +246,7 @@ async def on_recognize_unknown_error(
async def on_play_completed(
call: CallStateModel,
client: CallAutomationClient,
contexts: Optional[set[CallContextEnum]],
contexts: set[CallContextEnum] | None,
post_callback: Callable[[CallStateModel], Awaitable[None]],
) -> None:
logger.debug("Play completed")
Expand Down Expand Up @@ -279,15 +280,19 @@ async def on_play_error(error_code: int) -> None:
logger.debug("Play failed")
span_attribute(CallAttributes.CALL_CHANNEL, "voice")
# See: https://github.com/MicrosoftDocs/azure-docs/blob/main/articles/communication-services/how-tos/call-automation/play-action.md
if error_code == 8535: # Action failed, file format
if error_code == 8535: # noqa: PLR2004
# Action failed, file format
logger.warning("Error during media play, file format is invalid")
elif error_code == 8536: # Action failed, file downloaded
elif error_code == 8536: # noqa: PLR2004
# Action failed, file downloaded
logger.warning("Error during media play, file could not be downloaded")
elif error_code == 8565: # Action failed, AI services config
elif error_code == 8565: # noqa: PLR2004
# Action failed, AI services config
logger.error(
"Error during media play, impossible to connect with Azure AI services"
)
elif error_code == 9999: # Unknown
elif error_code == 9999: # noqa: PLR2004
# Unknown error code
logger.warning("Error during media play, unknown internal server error")
else:
logger.warning("Error during media play, unknown error code %s", error_code)
Expand Down Expand Up @@ -429,7 +434,7 @@ async def on_end_call(
Shortcut to run all post-call intelligence tasks in background.
"""
if (
len(call.messages) >= 3
len(call.messages) >= 3 # noqa: PLR2004
and call.messages[-3].action == MessageActionEnum.CALL
and call.messages[-2].persona == MessagePersonaEnum.ASSISTANT
and call.messages[-1].action == MessageActionEnum.HANGUP
Expand All @@ -451,11 +456,9 @@ async def _intelligence_sms(call: CallStateModel) -> None:
Send an SMS report to the customer.
"""

def _validate(req: Optional[str]) -> tuple[bool, Optional[str], Optional[str]]:
def _validate(req: str | None) -> tuple[bool, str | None, str | None]:
if not req:
return False, "No SMS content", None
if len(req) < 10:
return False, "SMS content too short", None
return True, None, req

content = await completion_sync(
Expand Down Expand Up @@ -503,8 +506,8 @@ async def _intelligence_synthesis(call: CallStateModel) -> None:
logger.debug("Synthesizing call")

def _validate(
req: Optional[str],
) -> tuple[bool, Optional[str], Optional[SynthesisModel]]:
req: str | None,
) -> tuple[bool, str | None, SynthesisModel | None]:
if not req:
return False, "Empty response", None
try:
Expand Down Expand Up @@ -534,8 +537,8 @@ async def _intelligence_next(call: CallStateModel) -> None:
logger.debug("Generating next action")

def _validate(
req: Optional[str],
) -> tuple[bool, Optional[str], Optional[NextModel]]:
req: str | None,
) -> tuple[bool, str | None, NextModel | None]:
if not req:
return False, "Empty response", None
try:
Expand Down
14 changes: 7 additions & 7 deletions helpers/call_llm.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import asyncio
import time
from typing import Awaitable, Callable
from collections.abc import Awaitable, Callable

from azure.communication.callautomation.aio import CallAutomationClient
from openai import APIError
Expand Down Expand Up @@ -35,8 +35,9 @@
_db = CONFIG.database.instance()


# TODO: Refacto, this function is too long (and remove PLR0912/PLR0915 ignore)
@tracer.start_as_current_span("call_load_llm_chat")
async def load_llm_chat(
async def load_llm_chat( # noqa: PLR0912, PLR0915
call: CallStateModel,
client: CallAutomationClient,
post_callback: Callable[[CallStateModel], Awaitable[None]],
Expand Down Expand Up @@ -171,9 +172,7 @@ def _clear_tasks() -> None:
text=await CONFIG.prompts.tts.timeout_loading(call),
)

elif (
loading_task.done()
): # Do not play timeout prompt plus loading, it can be frustrating for the user
elif loading_task.done(): # Do not play timeout prompt plus loading, it can be frustrating for the user
loading_task = _loading_task()
await handle_media(
call=call,
Expand All @@ -184,7 +183,7 @@ def _clear_tasks() -> None:
# Wait to not block the event loop for other requests
await asyncio.sleep(1)

except Exception: # pylint: disable=broad-exception-caught
except Exception:
logger.warning("Error loading intelligence", exc_info=True)

if is_error: # Error during chat
Expand Down Expand Up @@ -233,8 +232,9 @@ def _clear_tasks() -> None:
return call


# TODO: Refacto, this function is too long (and remove PLR0911/PLR0912/PLR0915 ignore)
@tracer.start_as_current_span("call_execute_llm_chat")
async def _execute_llm_chat(
async def _execute_llm_chat( # noqa: PLR0911, PLR0912, PLR0915
call: CallStateModel,
client: CallAutomationClient,
post_callback: Callable[[CallStateModel], Awaitable[None]],
Expand Down
Loading

0 comments on commit cdab881

Please sign in to comment.