From 65ecc1206a77d713bbdaa890952f28a4bbbcad4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9mence=20Lesn=C3=A9?= Date: Fri, 6 Dec 2024 13:00:53 +0100 Subject: [PATCH 1/2] security: Upgrade deps --- uv.lock | 137 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 69 insertions(+), 68 deletions(-) diff --git a/uv.lock b/uv.lock index 6b49362d..e1e0c4cc 100644 --- a/uv.lock +++ b/uv.lock @@ -28,7 +28,7 @@ wheels = [ [[package]] name = "aiohttp" -version = "3.11.9" +version = "3.11.10" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohappyeyeballs" }, @@ -39,38 +39,38 @@ dependencies = [ { name = "propcache" }, { name = "yarl" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/3f/24/d5c0aed3ed90896f8505786e3a1e348fd9c61284ef21f54ee9cdf8b92e4f/aiohttp-3.11.9.tar.gz", hash = "sha256:a9266644064779840feec0e34f10a89b3ff1d2d6b751fe90017abcad1864fa7c", size = 7668012 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/fa/43/b3c28a7e8f8b5e8ef0bea9fcabe8e99787c70fa526e5bc8185fd89f46434/aiohttp-3.11.9-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:c1f2d7fd583fc79c240094b3e7237d88493814d4b300d013a42726c35a734bc9", size = 703661 }, - { url = "https://files.pythonhosted.org/packages/f3/2c/be4624671e5ed344fca9196d0823eb6a17383cbe13d051d22d3a1f6ecbf7/aiohttp-3.11.9-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d4b8a1b6c7a68c73191f2ebd3bf66f7ce02f9c374e309bdb68ba886bbbf1b938", size = 463054 }, - { url = "https://files.pythonhosted.org/packages/d6/21/8d14fa0bdae468ebe419df1764583ecc9e995a2ccd8a11ee8146a09fb5e5/aiohttp-3.11.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bd3f711f4c99da0091ced41dccdc1bcf8be0281dc314d6d9c6b6cf5df66f37a9", size = 455006 }, - { url = "https://files.pythonhosted.org/packages/42/de/3fc5e94a24bf079709e9fed3572ebb5efb32f0995baf08a985ee9f517b0b/aiohttp-3.11.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44cb1a1326a0264480a789e6100dc3e07122eb8cd1ad6b784a3d47d13ed1d89c", size = 1681364 }, - { url = "https://files.pythonhosted.org/packages/69/e0/bd9346efcdd3344284e4b4088bc2c720065176bd9180517bdc7097218903/aiohttp-3.11.9-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7a7ddf981a0b953ade1c2379052d47ccda2f58ab678fca0671c7c7ca2f67aac2", size = 1735986 }, - { url = "https://files.pythonhosted.org/packages/9b/a5/549ce29e21ebf555dcf5c81e19e6eb30eb8de26f8da304f05a28d6d66d8c/aiohttp-3.11.9-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6ffa45cc55b18d4ac1396d1ddb029f139b1d3480f1594130e62bceadf2e1a838", size = 1792263 }, - { url = "https://files.pythonhosted.org/packages/7a/2b/23124c04701e0d2e215be59bf445c33602b1ccc4d9acb7bccc2ec20c892d/aiohttp-3.11.9-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cca505829cdab58c2495ff418c96092d225a1bbd486f79017f6de915580d3c44", size = 1690838 }, - { url = "https://files.pythonhosted.org/packages/af/a6/ebb8be53787c57dd7dd8b9617357af60d603ccd2fbf7a9e306f33178894b/aiohttp-3.11.9-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44d323aa80a867cb6db6bebb4bbec677c6478e38128847f2c6b0f70eae984d72", size = 1618311 }, - { url = "https://files.pythonhosted.org/packages/9b/3c/cb8e5af30e33775539b4a6ea818eb16b0b01f68ce7a2fa77dff5df3dee80/aiohttp-3.11.9-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b2fab23003c4bb2249729a7290a76c1dda38c438300fdf97d4e42bf78b19c810", size = 1640417 }, - { url = "https://files.pythonhosted.org/packages/16/2d/62593ce65e5811ea46e521644e03d0c47345bf9b6c2e6efcb759915d6aa3/aiohttp-3.11.9-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:be0c7c98e38a1e3ad7a6ff64af8b6d6db34bf5a41b1478e24c3c74d9e7f8ed42", size = 1645507 }, - { url = "https://files.pythonhosted.org/packages/4f/6b/810981c99932665a225d7bdffacbda512dde6f11364ce11477662e457115/aiohttp-3.11.9-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5cc5e0d069c56645446c45a4b5010d4b33ac6c5ebfd369a791b5f097e46a3c08", size = 1701090 }, - { url = "https://files.pythonhosted.org/packages/1c/01/79c8d156534c034207ccbb94a51f1ae4a625834a31e27670175f1e1e79b2/aiohttp-3.11.9-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9bcf97b971289be69638d8b1b616f7e557e1342debc7fc86cf89d3f08960e411", size = 1733598 }, - { url = "https://files.pythonhosted.org/packages/c0/8f/873f0d3a47ec203ccd04dbd623f2428b6010ba6b11107aa9b44ad0ebfc86/aiohttp-3.11.9-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c7333e7239415076d1418dbfb7fa4df48f3a5b00f8fdf854fca549080455bc14", size = 1693573 }, - { url = "https://files.pythonhosted.org/packages/2f/8c/a4964108383eb8f0e5a85ee0fdc00f9f0bdf28bb6a751be05a63c047ccbe/aiohttp-3.11.9-cp312-cp312-win32.whl", hash = "sha256:9384b07cfd3045b37b05ed002d1c255db02fb96506ad65f0f9b776b762a7572e", size = 410354 }, - { url = "https://files.pythonhosted.org/packages/c8/9e/79aed1b3e110a02081ca47ba4a27d7e20040af241643a2e527c668634f22/aiohttp-3.11.9-cp312-cp312-win_amd64.whl", hash = "sha256:f5252ba8b43906f206048fa569debf2cd0da0316e8d5b4d25abe53307f573941", size = 436657 }, - { url = "https://files.pythonhosted.org/packages/33/ec/217d8918032703639d64360e4534a33899cc1a5eda89268d4fa621e18b67/aiohttp-3.11.9-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:282e0a7ddd36ebc411f156aeaa0491e8fe7f030e2a95da532cf0c84b0b70bc66", size = 696994 }, - { url = "https://files.pythonhosted.org/packages/48/e4/262211b96cba78614be9bae7086af0dba8e8050c43996f2912992173eb57/aiohttp-3.11.9-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ebd3e6b0c7d4954cca59d241970011f8d3327633d555051c430bd09ff49dc494", size = 459669 }, - { url = "https://files.pythonhosted.org/packages/51/f5/ef76735af2d69671aa8cb185c07da84973a2ca74bb44af9fdb980207118f/aiohttp-3.11.9-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:30f9f89ae625d412043f12ca3771b2ccec227cc93b93bb1f994db6e1af40a7d3", size = 451949 }, - { url = "https://files.pythonhosted.org/packages/ba/83/867487d4ca86327060b93f3eea70963996a7ebb0c16f61c214f801351d4a/aiohttp-3.11.9-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a3b5b2c012d70c63d9d13c57ed1603709a4d9d7d473e4a9dfece0e4ea3d5f51", size = 1664171 }, - { url = "https://files.pythonhosted.org/packages/ca/7d/b185b4b6b01bf66bcaf1b23afff3073fc85d2f0765203269ee4976be2cf8/aiohttp-3.11.9-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6ef1550bb5f55f71b97a6a395286db07f7f2c01c8890e613556df9a51da91e8d", size = 1716933 }, - { url = "https://files.pythonhosted.org/packages/a9/b3/70d7f26a874e96f932237e53017b048ecd754f06a29947bdf7ce39cade98/aiohttp-3.11.9-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:317251b9c9a2f1a9ff9cd093775b34c6861d1d7df9439ce3d32a88c275c995cd", size = 1774117 }, - { url = "https://files.pythonhosted.org/packages/a5/6e/457acf09ac5bd6db5ae8b1fa68beb3000c989a2a20dc265a507123f7a689/aiohttp-3.11.9-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21cbe97839b009826a61b143d3ca4964c8590d7aed33d6118125e5b71691ca46", size = 1676168 }, - { url = "https://files.pythonhosted.org/packages/e8/e8/2b4719633d0a8189dfce343af800d23163b8831cb5aa175d4c400b03895b/aiohttp-3.11.9-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:618b18c3a2360ac940a5503da14fa4f880c5b9bc315ec20a830357bcc62e6bae", size = 1602187 }, - { url = "https://files.pythonhosted.org/packages/d8/0c/8938b85edaf0a8fee2ede7bbffd32e09b056475f7586b0852973749c5fff/aiohttp-3.11.9-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a0cf4d814689e58f57ecd5d8c523e6538417ca2e72ff52c007c64065cef50fb2", size = 1617286 }, - { url = "https://files.pythonhosted.org/packages/1e/5c/825714aa554c4ef331a8c1a16b3183c5e4bf27c66073955d4f51344907dc/aiohttp-3.11.9-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:15c4e489942d987d5dac0ba39e5772dcbed4cc9ae3710d1025d5ba95e4a5349c", size = 1615518 }, - { url = "https://files.pythonhosted.org/packages/c8/1c/6c821e7cf956e833a72a5284ff19484c7dedb749224e16fda297fa38bbc2/aiohttp-3.11.9-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:ec8df0ff5a911c6d21957a9182402aad7bf060eaeffd77c9ea1c16aecab5adbf", size = 1684466 }, - { url = "https://files.pythonhosted.org/packages/6b/47/3e921cbf7d7c4edfe95ddb7e8315a8f3645d824863ef2c2eab5dfa0342bc/aiohttp-3.11.9-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:ed95d66745f53e129e935ad726167d3a6cb18c5d33df3165974d54742c373868", size = 1714304 }, - { url = "https://files.pythonhosted.org/packages/25/89/e68e3efd357f233265abcf22c48c4d1e81f992f264cd4dc69b96c5a13c47/aiohttp-3.11.9-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:647ec5bee7e4ec9f1034ab48173b5fa970d9a991e565549b965e93331f1328fe", size = 1671774 }, - { url = "https://files.pythonhosted.org/packages/79/e1/4adaed8c8ef93c2ae54b001cd0e8dd6c84b40044038acb322b649150dc96/aiohttp-3.11.9-cp313-cp313-win32.whl", hash = "sha256:ef2c9499b7bd1e24e473dc1a85de55d72fd084eea3d8bdeec7ee0720decb54fa", size = 409216 }, - { url = "https://files.pythonhosted.org/packages/00/9b/bf33704ac9b438d6dad417f86f1e9439e2538180189b0e347a95ff819011/aiohttp-3.11.9-cp313-cp313-win_amd64.whl", hash = "sha256:84de955314aa5e8d469b00b14d6d714b008087a0222b0f743e7ffac34ef56aff", size = 435069 }, +sdist = { url = "https://files.pythonhosted.org/packages/94/c4/3b5a937b16f6c2a0ada842a9066aad0b7a5708427d4a202a07bf09c67cbb/aiohttp-3.11.10.tar.gz", hash = "sha256:b1fc6b45010a8d0ff9e88f9f2418c6fd408c99c211257334aff41597ebece42e", size = 7668832 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/17/1dbe2f619f77795409c1a13ab395b98ed1b215d3e938cacde9b8ffdac53d/aiohttp-3.11.10-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b78f053a7ecfc35f0451d961dacdc671f4bcbc2f58241a7c820e9d82559844cf", size = 704448 }, + { url = "https://files.pythonhosted.org/packages/e3/9b/112247ad47e9d7f6640889c6e42cc0ded8c8345dd0033c66bcede799b051/aiohttp-3.11.10-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ab7485222db0959a87fbe8125e233b5a6f01f4400785b36e8a7878170d8c3138", size = 463829 }, + { url = "https://files.pythonhosted.org/packages/8a/36/a64b583771fc673062a7a1374728a6241d49e2eda5a9041fbf248e18c804/aiohttp-3.11.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cf14627232dfa8730453752e9cdc210966490992234d77ff90bc8dc0dce361d5", size = 455774 }, + { url = "https://files.pythonhosted.org/packages/e5/75/ee1b8f510978b3de5f185c62535b135e4fc3f5a247ca0c2245137a02d800/aiohttp-3.11.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:076bc454a7e6fd646bc82ea7f98296be0b1219b5e3ef8a488afbdd8e81fbac50", size = 1682134 }, + { url = "https://files.pythonhosted.org/packages/87/46/65e8259432d5f73ca9ebf5edb645ef90e5303724e4e52477516cb4042240/aiohttp-3.11.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:482cafb7dc886bebeb6c9ba7925e03591a62ab34298ee70d3dd47ba966370d2c", size = 1736757 }, + { url = "https://files.pythonhosted.org/packages/03/f6/a6d1e791b7153fb2d101278f7146c0771b0e1569c547f8a8bc3035651984/aiohttp-3.11.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf3d1a519a324af764a46da4115bdbd566b3c73fb793ffb97f9111dbc684fc4d", size = 1793033 }, + { url = "https://files.pythonhosted.org/packages/a8/e9/1ac90733e36e7848693aece522936a13bf17eeb617da662f94adfafc1c25/aiohttp-3.11.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24213ba85a419103e641e55c27dc7ff03536c4873470c2478cce3311ba1eee7b", size = 1691609 }, + { url = "https://files.pythonhosted.org/packages/6d/a6/77b33da5a0bc04566c7ddcca94500f2c2a2334eecab4885387fffd1fc600/aiohttp-3.11.10-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b99acd4730ad1b196bfb03ee0803e4adac371ae8efa7e1cbc820200fc5ded109", size = 1619082 }, + { url = "https://files.pythonhosted.org/packages/48/94/5bf5f927d9a2fedd2c978adfb70a3680e16f46d178361685b56244eb52ed/aiohttp-3.11.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:14cdb5a9570be5a04eec2ace174a48ae85833c2aadc86de68f55541f66ce42ab", size = 1641186 }, + { url = "https://files.pythonhosted.org/packages/99/2d/e85103aa01d1064e51bc50cb51e7b40150a8ff5d34e5a3173a46b241860b/aiohttp-3.11.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:7e97d622cb083e86f18317282084bc9fbf261801b0192c34fe4b1febd9f7ae69", size = 1646280 }, + { url = "https://files.pythonhosted.org/packages/7b/e0/44651fda8c1d865a51b3a81f1956ea55ce16fc568fe7a3e05db7fc22f139/aiohttp-3.11.10-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:012f176945af138abc10c4a48743327a92b4ca9adc7a0e078077cdb5dbab7be0", size = 1701862 }, + { url = "https://files.pythonhosted.org/packages/4e/1e/0804459ae325a5b95f6f349778fb465f29d2b863e522b6a349db0aaad54c/aiohttp-3.11.10-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44224d815853962f48fe124748227773acd9686eba6dc102578defd6fc99e8d9", size = 1734373 }, + { url = "https://files.pythonhosted.org/packages/07/87/b8f6721668cad74bcc9c7cfe6d0230b304d1250196b221e54294a0d78dbe/aiohttp-3.11.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c87bf31b7fdab94ae3adbe4a48e711bfc5f89d21cf4c197e75561def39e223bc", size = 1694343 }, + { url = "https://files.pythonhosted.org/packages/4b/20/42813fc60d9178ba9b1b86c58a5441ddb6cf8ffdfe66387345bff173bcff/aiohttp-3.11.10-cp312-cp312-win32.whl", hash = "sha256:06a8e2ee1cbac16fe61e51e0b0c269400e781b13bcfc33f5425912391a542985", size = 411118 }, + { url = "https://files.pythonhosted.org/packages/3a/51/df9c263c861ce93998b5ad2ba3212caab2112d5b66dbe91ddbe90c41ded4/aiohttp-3.11.10-cp312-cp312-win_amd64.whl", hash = "sha256:be2b516f56ea883a3e14dda17059716593526e10fb6303189aaf5503937db408", size = 437424 }, + { url = "https://files.pythonhosted.org/packages/8c/1d/88bfdbe28a3d1ba5b94a235f188f27726caf8ade9a0e13574848f44fe0fe/aiohttp-3.11.10-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8cc5203b817b748adccb07f36390feb730b1bc5f56683445bfe924fc270b8816", size = 697755 }, + { url = "https://files.pythonhosted.org/packages/86/00/4c4619d6fe5c5be32f74d1422fc719b3e6cd7097af0c9e03877ca9bd4ebc/aiohttp-3.11.10-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5ef359ebc6949e3a34c65ce20230fae70920714367c63afd80ea0c2702902ccf", size = 460440 }, + { url = "https://files.pythonhosted.org/packages/aa/1c/2f927408f50593a29465d198ec3c57c835c8602330233163e8d89c1093db/aiohttp-3.11.10-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9bca390cb247dbfaec3c664326e034ef23882c3f3bfa5fbf0b56cad0320aaca5", size = 452726 }, + { url = "https://files.pythonhosted.org/packages/06/6a/ff00ed0a2ba45c34b3c366aa5b0004b1a4adcec5a9b5f67dd0648ee1c88a/aiohttp-3.11.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:811f23b3351ca532af598405db1093f018edf81368e689d1b508c57dcc6b6a32", size = 1664944 }, + { url = "https://files.pythonhosted.org/packages/02/c2/61923f2a7c2e14d7424b3a526e054f0358f57ccdf5573d4d3d033b01921a/aiohttp-3.11.10-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddf5f7d877615f6a1e75971bfa5ac88609af3b74796ff3e06879e8422729fd01", size = 1717707 }, + { url = "https://files.pythonhosted.org/packages/8a/08/0d3d074b24d377569ec89d476a95ca918443099c0401bb31b331104e35d1/aiohttp-3.11.10-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6ab29b8a0beb6f8eaf1e5049252cfe74adbaafd39ba91e10f18caeb0e99ffb34", size = 1774890 }, + { url = "https://files.pythonhosted.org/packages/e8/49/052ada2b6e90ed65f0e6a7e548614621b5f8dcd193cb9415d2e6bcecc94a/aiohttp-3.11.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c49a76c1038c2dd116fa443eba26bbb8e6c37e924e2513574856de3b6516be99", size = 1676945 }, + { url = "https://files.pythonhosted.org/packages/7c/9e/0c48e1a48e072a869b8b5e3920c9f6a8092861524a4a6f159cd7e6fda939/aiohttp-3.11.10-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f3dc0e330575f5b134918976a645e79adf333c0a1439dcf6899a80776c9ab39", size = 1602959 }, + { url = "https://files.pythonhosted.org/packages/ab/98/791f979093ff7f67f80344c182cb0ca4c2c60daed397ecaf454cc8d7a5cd/aiohttp-3.11.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:efb15a17a12497685304b2d976cb4939e55137df7b09fa53f1b6a023f01fcb4e", size = 1618058 }, + { url = "https://files.pythonhosted.org/packages/7b/5d/2d4b05feb3fd68eb7c8335f73c81079b56e582633b91002da695ccb439ef/aiohttp-3.11.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:db1d0b28fcb7f1d35600150c3e4b490775251dea70f894bf15c678fdd84eda6a", size = 1616289 }, + { url = "https://files.pythonhosted.org/packages/50/83/68cc28c00fe681dce6150614f105efe98282da19252cd6e32dfa893bb328/aiohttp-3.11.10-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:15fccaf62a4889527539ecb86834084ecf6e9ea70588efde86e8bc775e0e7542", size = 1685239 }, + { url = "https://files.pythonhosted.org/packages/16/f9/68fc5c8928f63238ce9314f04f3f59d9190a4db924998bb9be99c7aacce8/aiohttp-3.11.10-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:593c114a2221444f30749cc5e5f4012488f56bd14de2af44fe23e1e9894a9c60", size = 1715078 }, + { url = "https://files.pythonhosted.org/packages/3f/e0/3dd3f0451c532c77e35780bafb2b6469a046bc15a6ec2e039475a1d2f161/aiohttp-3.11.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7852bbcb4d0d2f0c4d583f40c3bc750ee033265d80598d0f9cb6f372baa6b836", size = 1672544 }, + { url = "https://files.pythonhosted.org/packages/a5/b1/3530ab040dd5d7fb016b47115016f9b3a07ea29593b0e07e53dbe06a380c/aiohttp-3.11.10-cp313-cp313-win32.whl", hash = "sha256:65e55ca7debae8faaffee0ebb4b47a51b4075f01e9b641c31e554fd376595c6c", size = 409984 }, + { url = "https://files.pythonhosted.org/packages/49/1f/deed34e9fca639a7f873d01150d46925d3e1312051eaa591c1aa1f2e6ddc/aiohttp-3.11.10-cp313-cp313-win_amd64.whl", hash = "sha256:beb39a6d60a709ae3fb3516a1581777e7e8b76933bb88c8f4420d875bb0267c6", size = 435837 }, ] [package.optional-dependencies] @@ -136,15 +136,16 @@ wheels = [ [[package]] name = "anyio" -version = "4.6.2.post1" +version = "4.7.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "idna" }, { name = "sniffio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/9f/09/45b9b7a6d4e45c6bcb5bf61d19e3ab87df68e0601fa8c5293de3542546cc/anyio-4.6.2.post1.tar.gz", hash = "sha256:4c8bc31ccdb51c7f7bd251f51c609e038d63e34219b44aa86e47576389880b4c", size = 173422 } +sdist = { url = "https://files.pythonhosted.org/packages/f6/40/318e58f669b1a9e00f5c4453910682e2d9dd594334539c7b7817dabb765f/anyio-4.7.0.tar.gz", hash = "sha256:2f834749c602966b7d456a7567cafcb309f96482b5081d14ac93ccd457f9dd48", size = 177076 } wheels = [ - { url = "https://files.pythonhosted.org/packages/e4/f5/f2b75d2fc6f1a260f340f0e7c6a060f4dd2961cc16884ed851b0d18da06a/anyio-4.6.2.post1-py3-none-any.whl", hash = "sha256:6d170c36fba3bdd840c73d3868c1e777e33676a69c3a72cf0a0d5d6d8009b61d", size = 90377 }, + { url = "https://files.pythonhosted.org/packages/a0/7a/4daaf3b6c08ad7ceffea4634ec206faeff697526421c20f07628c7372156/anyio-4.7.0-py3-none-any.whl", hash = "sha256:ea60c3723ab42ba6fff7e8ccb0488c898ec538ff4df1f1d5e642c3601d07e352", size = 93052 }, ] [[package]] @@ -1625,7 +1626,7 @@ wheels = [ [[package]] name = "openai" -version = "1.56.2" +version = "1.57.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -1637,9 +1638,9 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e1/97/302669f5888d1adf8ce0e93f1e5b2337b6c9db47d9877bd344a29db314be/openai-1.56.2.tar.gz", hash = "sha256:17312af69bc7670d4048f98ab5849f8784d98c39ac64fcde19406e3774a0c1e5", size = 315404 } +sdist = { url = "https://files.pythonhosted.org/packages/fa/64/4acd9331b3c0e1069f36692d4c29d2c8deea6649a1e150f45a096f91b339/openai-1.57.0.tar.gz", hash = "sha256:76f91971c4bdbd78380c9970581075e0337b5d497c2fbf7b5255078f4b31abf9", size = 315514 } wheels = [ - { url = "https://files.pythonhosted.org/packages/23/36/c60fffa518d82952335e2ef4a6b93b0427c57eb49469879dee1cbe59d551/openai-1.56.2-py3-none-any.whl", hash = "sha256:82d0c48f9504e04c7797e9b799dcf7f49a246d99b6cbfd90f3193ea80815b69e", size = 389854 }, + { url = "https://files.pythonhosted.org/packages/ab/2d/eb8539a2d5809eb78508633a8faa8df7745960e99af0388310c43b2c0be1/openai-1.57.0-py3-none-any.whl", hash = "sha256:972e36960b821797952da3dc4532f486c28e28a2a332d7d0c5407f242e9d9c39", size = 389854 }, ] [[package]] @@ -2547,18 +2548,18 @@ dependencies = [ { name = "pysbd" }, { name = "tiktoken" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/95/20/b6e0e43897e5cf66659443aa647521d61c8045bb57930933bc63fed45c29/ragas-0.2.6.tar.gz", hash = "sha256:877e723e4bbf29eab8e1b12f7bf6f63bb2145d63ea4c3ce21620b14f9dbfb421", size = 5035876 } +sdist = { url = "https://files.pythonhosted.org/packages/18/34/412edad89a2a3be2f401c1c8724715a68a4f82dd4185f10debbae74efbb3/ragas-0.2.7.tar.gz", hash = "sha256:26137158db551ff32b90b6b225675c2f902ba12cb833a4e7adbef0bfa5c8353a", size = 5054047 } wheels = [ - { url = "https://files.pythonhosted.org/packages/8b/1a/7f4fba14367ba769cf606edd017993faf919b2bbaef3e88f0929daed8b00/ragas-0.2.6-py3-none-any.whl", hash = "sha256:2d40a6af196df7346486e2eeb203bb0a542efa0827e839812f6c66123fd3319f", size = 157481 }, + { url = "https://files.pythonhosted.org/packages/ac/18/c2427061ee910fc8f8b21e7f94cb29e8068bdaf9b94c5a28d53760a23579/ragas-0.2.7-py3-none-any.whl", hash = "sha256:1a06fa50bcf80e23dcccd36c41d0b601f1caa93155260de8c0879f0a8231e099", size = 163157 }, ] [[package]] name = "redis" -version = "5.2.0" +version = "5.2.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/53/17/2f4a87ffa4cd93714cf52edfa3ea94589e9de65f71e9f99cbcfa84347a53/redis-5.2.0.tar.gz", hash = "sha256:0b1087665a771b1ff2e003aa5bdd354f15a70c9e25d5a7dbf9c722c16528a7b0", size = 4607878 } +sdist = { url = "https://files.pythonhosted.org/packages/47/da/d283a37303a995cd36f8b92db85135153dc4f7a8e4441aa827721b442cfb/redis-5.2.1.tar.gz", hash = "sha256:16f2e22dff21d5125e8481515e386711a34cbec50f0e44413dd7d9c060a54e0f", size = 4608355 } wheels = [ - { url = "https://files.pythonhosted.org/packages/12/f5/ffa560ecc4bafbf25f7961c3d6f50d627a90186352e27e7d0ba5b1f6d87d/redis-5.2.0-py3-none-any.whl", hash = "sha256:ae174f2bb3b1bf2b09d54bf3e51fbc1469cf6c10aa03e21141f51969801a7897", size = 261428 }, + { url = "https://files.pythonhosted.org/packages/3c/5f/fa26b9b2672cbe30e07d9a5bdf39cf16e3b80b42916757c5f92bca88e4ba/redis-5.2.1-py3-none-any.whl", hash = "sha256:ee7e1056b9aea0f04c6c2ed59452947f34c4940ee025f5dd83e6a6418b6989e4", size = 261502 }, ] [[package]] @@ -2667,40 +2668,40 @@ wheels = [ [[package]] name = "ruff" -version = "0.8.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/95/d0/8ff5b189d125f4260f2255d143bf2fa413b69c2610c405ace7a0a8ec81ec/ruff-0.8.1.tar.gz", hash = "sha256:3583db9a6450364ed5ca3f3b4225958b24f78178908d5c4bc0f46251ccca898f", size = 3313222 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/a2/d6/1a6314e568db88acdbb5121ed53e2c52cebf3720d3437a76f82f923bf171/ruff-0.8.1-py3-none-linux_armv6l.whl", hash = "sha256:fae0805bd514066f20309f6742f6ee7904a773eb9e6c17c45d6b1600ca65c9b5", size = 10532605 }, - { url = "https://files.pythonhosted.org/packages/89/a8/a957a8812e31facffb6a26a30be0b5b4af000a6e30c7d43a22a5232a3398/ruff-0.8.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b8a4f7385c2285c30f34b200ca5511fcc865f17578383db154e098150ce0a087", size = 10278243 }, - { url = "https://files.pythonhosted.org/packages/a8/23/9db40fa19c453fabf94f7a35c61c58f20e8200b4734a20839515a19da790/ruff-0.8.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:cd054486da0c53e41e0086e1730eb77d1f698154f910e0cd9e0d64274979a209", size = 9917739 }, - { url = "https://files.pythonhosted.org/packages/e2/a0/6ee2d949835d5701d832fc5acd05c0bfdad5e89cfdd074a171411f5ccad5/ruff-0.8.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2029b8c22da147c50ae577e621a5bfbc5d1fed75d86af53643d7a7aee1d23871", size = 10779153 }, - { url = "https://files.pythonhosted.org/packages/7a/25/9c11dca9404ef1eb24833f780146236131a3c7941de394bc356912ef1041/ruff-0.8.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2666520828dee7dfc7e47ee4ea0d928f40de72056d929a7c5292d95071d881d1", size = 10304387 }, - { url = "https://files.pythonhosted.org/packages/c8/b9/84c323780db1b06feae603a707d82dbbd85955c8c917738571c65d7d5aff/ruff-0.8.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:333c57013ef8c97a53892aa56042831c372e0bb1785ab7026187b7abd0135ad5", size = 11360351 }, - { url = "https://files.pythonhosted.org/packages/6b/e1/9d4bbb2ace7aad14ded20e4674a48cda5b902aed7a1b14e6b028067060c4/ruff-0.8.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:288326162804f34088ac007139488dcb43de590a5ccfec3166396530b58fb89d", size = 12022879 }, - { url = "https://files.pythonhosted.org/packages/75/28/752ff6120c0e7f9981bc4bc275d540c7f36db1379ba9db9142f69c88db21/ruff-0.8.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b12c39b9448632284561cbf4191aa1b005882acbc81900ffa9f9f471c8ff7e26", size = 11610354 }, - { url = "https://files.pythonhosted.org/packages/ba/8c/967b61c2cc8ebd1df877607fbe462bc1e1220b4a30ae3352648aec8c24bd/ruff-0.8.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:364e6674450cbac8e998f7b30639040c99d81dfb5bbc6dfad69bc7a8f916b3d1", size = 12813976 }, - { url = "https://files.pythonhosted.org/packages/7f/29/e059f945d6bd2d90213387b8c360187f2fefc989ddcee6bbf3c241329b92/ruff-0.8.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b22346f845fec132aa39cd29acb94451d030c10874408dbf776af3aaeb53284c", size = 11154564 }, - { url = "https://files.pythonhosted.org/packages/55/47/cbd05e5a62f3fb4c072bc65c1e8fd709924cad1c7ec60a1000d1e4ee8307/ruff-0.8.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:b2f2f7a7e7648a2bfe6ead4e0a16745db956da0e3a231ad443d2a66a105c04fa", size = 10760604 }, - { url = "https://files.pythonhosted.org/packages/bb/ee/4c3981c47147c72647a198a94202633130cfda0fc95cd863a553b6f65c6a/ruff-0.8.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:adf314fc458374c25c5c4a4a9270c3e8a6a807b1bec018cfa2813d6546215540", size = 10391071 }, - { url = "https://files.pythonhosted.org/packages/6b/e6/083eb61300214590b188616a8ac6ae1ef5730a0974240fb4bec9c17de78b/ruff-0.8.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:a885d68342a231b5ba4d30b8c6e1b1ee3a65cf37e3d29b3c74069cdf1ee1e3c9", size = 10896657 }, - { url = "https://files.pythonhosted.org/packages/77/bd/aacdb8285d10f1b943dbeb818968efca35459afc29f66ae3bd4596fbf954/ruff-0.8.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:d2c16e3508c8cc73e96aa5127d0df8913d2290098f776416a4b157657bee44c5", size = 11228362 }, - { url = "https://files.pythonhosted.org/packages/39/72/fcb7ad41947f38b4eaa702aca0a361af0e9c2bf671d7fd964480670c297e/ruff-0.8.1-py3-none-win32.whl", hash = "sha256:93335cd7c0eaedb44882d75a7acb7df4b77cd7cd0d2255c93b28791716e81790", size = 8803476 }, - { url = "https://files.pythonhosted.org/packages/e4/ea/cae9aeb0f4822c44651c8407baacdb2e5b4dcd7b31a84e1c5df33aa2cc20/ruff-0.8.1-py3-none-win_amd64.whl", hash = "sha256:2954cdbe8dfd8ab359d4a30cd971b589d335a44d444b6ca2cb3d1da21b75e4b6", size = 9614463 }, - { url = "https://files.pythonhosted.org/packages/eb/76/fbb4bd23dfb48fa7758d35b744413b650a9fd2ddd93bca77e30376864414/ruff-0.8.1-py3-none-win_arm64.whl", hash = "sha256:55873cc1a473e5ac129d15eccb3c008c096b94809d693fc7053f588b67822737", size = 8959621 }, +version = "0.8.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5e/2b/01245f4f3a727d60bebeacd7ee6d22586c7f62380a2597ddb22c2f45d018/ruff-0.8.2.tar.gz", hash = "sha256:b84f4f414dda8ac7f75075c1fa0b905ac0ff25361f42e6d5da681a465e0f78e5", size = 3349020 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/91/29/366be70216dba1731a00a41f2f030822b0c96c7c4f3b2c0cdce15cbace74/ruff-0.8.2-py3-none-linux_armv6l.whl", hash = "sha256:c49ab4da37e7c457105aadfd2725e24305ff9bc908487a9bf8d548c6dad8bb3d", size = 10530649 }, + { url = "https://files.pythonhosted.org/packages/63/82/a733956540bb388f00df5a3e6a02467b16c0e529132625fe44ce4c5fb9c7/ruff-0.8.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:ec016beb69ac16be416c435828be702ee694c0d722505f9c1f35e1b9c0cc1bf5", size = 10274069 }, + { url = "https://files.pythonhosted.org/packages/3d/12/0b3aa14d1d71546c988a28e1b412981c1b80c8a1072e977a2f30c595cc4a/ruff-0.8.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:f05cdf8d050b30e2ba55c9b09330b51f9f97d36d4673213679b965d25a785f3c", size = 9909400 }, + { url = "https://files.pythonhosted.org/packages/23/08/f9f08cefb7921784c891c4151cce6ed357ff49e84b84978440cffbc87408/ruff-0.8.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:60f578c11feb1d3d257b2fb043ddb47501ab4816e7e221fbb0077f0d5d4e7b6f", size = 10766782 }, + { url = "https://files.pythonhosted.org/packages/e4/71/bf50c321ec179aa420c8ec40adac5ae9cc408d4d37283a485b19a2331ceb/ruff-0.8.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cbd5cf9b0ae8f30eebc7b360171bd50f59ab29d39f06a670b3e4501a36ba5897", size = 10286316 }, + { url = "https://files.pythonhosted.org/packages/f2/83/c82688a2a6117539aea0ce63fdf6c08e60fe0202779361223bcd7f40bd74/ruff-0.8.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b402ddee3d777683de60ff76da801fa7e5e8a71038f57ee53e903afbcefdaa58", size = 11338270 }, + { url = "https://files.pythonhosted.org/packages/7f/d7/bc6a45e5a22e627640388e703160afb1d77c572b1d0fda8b4349f334fc66/ruff-0.8.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:705832cd7d85605cb7858d8a13d75993c8f3ef1397b0831289109e953d833d29", size = 12058579 }, + { url = "https://files.pythonhosted.org/packages/da/3b/64150c93946ec851e6f1707ff586bb460ca671581380c919698d6a9267dc/ruff-0.8.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:32096b41aaf7a5cc095fa45b4167b890e4c8d3fd217603f3634c92a541de7248", size = 11615172 }, + { url = "https://files.pythonhosted.org/packages/e4/9e/cf12b697ea83cfe92ec4509ae414dc4c9b38179cc681a497031f0d0d9a8e/ruff-0.8.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e769083da9439508833cfc7c23e351e1809e67f47c50248250ce1ac52c21fb93", size = 12882398 }, + { url = "https://files.pythonhosted.org/packages/a9/27/96d10863accf76a9c97baceac30b0a52d917eb985a8ac058bd4636aeede0/ruff-0.8.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fe716592ae8a376c2673fdfc1f5c0c193a6d0411f90a496863c99cd9e2ae25d", size = 11176094 }, + { url = "https://files.pythonhosted.org/packages/eb/10/cd2fd77d4a4e7f03c29351be0f53278a393186b540b99df68beb5304fddd/ruff-0.8.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:81c148825277e737493242b44c5388a300584d73d5774defa9245aaef55448b0", size = 10771884 }, + { url = "https://files.pythonhosted.org/packages/71/5d/beabb2ff18870fc4add05fa3a69a4cb1b1d2d6f83f3cf3ae5ab0d52f455d/ruff-0.8.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d261d7850c8367704874847d95febc698a950bf061c9475d4a8b7689adc4f7fa", size = 10382535 }, + { url = "https://files.pythonhosted.org/packages/ae/29/6b3fdf3ad3e35b28d87c25a9ff4c8222ad72485ab783936b2b267250d7a7/ruff-0.8.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:1ca4e3a87496dc07d2427b7dd7ffa88a1e597c28dad65ae6433ecb9f2e4f022f", size = 10886995 }, + { url = "https://files.pythonhosted.org/packages/e9/dc/859d889b4d9356a1a2cdbc1e4a0dda94052bc5b5300098647e51a58c430b/ruff-0.8.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:729850feed82ef2440aa27946ab39c18cb4a8889c1128a6d589ffa028ddcfc22", size = 11220750 }, + { url = "https://files.pythonhosted.org/packages/0b/08/e8f519f61f1d624264bfd6b8829e4c5f31c3c61193bc3cff1f19dbe7626a/ruff-0.8.2-py3-none-win32.whl", hash = "sha256:ac42caaa0411d6a7d9594363294416e0e48fc1279e1b0e948391695db2b3d5b1", size = 8729396 }, + { url = "https://files.pythonhosted.org/packages/f8/d4/ba1c7ab72aba37a2b71fe48ab95b80546dbad7a7f35ea28cf66fc5cea5f6/ruff-0.8.2-py3-none-win_amd64.whl", hash = "sha256:2aae99ec70abf43372612a838d97bfe77d45146254568d94926e8ed5bbb409ea", size = 9594729 }, + { url = "https://files.pythonhosted.org/packages/23/34/db20e12d3db11b8a2a8874258f0f6d96a9a4d631659d54575840557164c8/ruff-0.8.2-py3-none-win_arm64.whl", hash = "sha256:fb88e2a506b70cfbc2de6fae6681c4f944f7dd5f2fe87233a7233d888bad73e8", size = 9035131 }, ] [[package]] name = "sentry-sdk" -version = "2.19.0" +version = "2.19.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "certifi" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a2/0e/cc0e60f0e0cfd5a9e42622ff5a227301c6475a56bcfa82e8e893bc209f20/sentry_sdk-2.19.0.tar.gz", hash = "sha256:ee4a4d2ae8bfe3cac012dcf3e4607975904c137e1738116549fc3dbbb6ff0e36", size = 298045 } +sdist = { url = "https://files.pythonhosted.org/packages/36/4a/eccdcb8c2649d53440ae1902447b86e2e2ad1bc84207c80af9696fa07614/sentry_sdk-2.19.2.tar.gz", hash = "sha256:467df6e126ba242d39952375dd816fbee0f217d119bf454a8ce74cf1e7909e8d", size = 299047 } wheels = [ - { url = "https://files.pythonhosted.org/packages/c6/6b/191ca63f05d3ecc7600b5b3abd493a4c1b8468289c9737a7735ade1fedca/sentry_sdk-2.19.0-py2.py3-none-any.whl", hash = "sha256:7b0b3b709dee051337244a09a30dbf6e95afe0d34a1f8b430d45e0982a7c125b", size = 322158 }, + { url = "https://files.pythonhosted.org/packages/31/4d/74597bb6bcc23abc774b8901277652c61331a9d4d0a8d1bdb20679b9bbcb/sentry_sdk-2.19.2-py2.py3-none-any.whl", hash = "sha256:ebdc08228b4d131128e568d696c210d846e5b9d70aa0327dec6b1272d9d40b84", size = 322942 }, ] [[package]] From ce57ca6c286d1e34de28ca55a10f1f85cfc4f9fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9mence=20Lesn=C3=A9?= Date: Fri, 6 Dec 2024 12:58:57 +0100 Subject: [PATCH 2/2] perf+fix+breaking+refacto: Interaction timeout, db perfs, remove SQLite --- README.md | 4 +- app/helpers/call_events.py | 161 ++++++++----- app/helpers/call_llm.py | 114 ++++----- app/helpers/call_utils.py | 10 +- app/helpers/config_models/cache.py | 2 +- app/helpers/config_models/database.py | 61 +---- app/helpers/config_models/root.py | 2 +- app/helpers/features.py | 6 +- app/helpers/llm_tools.py | 26 +- app/helpers/translation.py | 4 +- app/main.py | 68 +++--- app/models/call.py | 26 +- app/models/message.py | 21 +- app/persistence/ai_search.py | 12 +- app/persistence/communication_services.py | 10 +- app/persistence/cosmos_db.py | 155 +++++++++--- app/persistence/icache.py | 16 +- app/persistence/isearch.py | 8 +- app/persistence/isms.py | 8 +- app/persistence/istore.py | 26 +- app/persistence/memory.py | 8 +- app/persistence/redis.py | 8 +- app/persistence/sqlite.py | 274 ---------------------- app/persistence/twilio.py | 6 +- cicd/bicep/app.bicep | 3 +- pyproject.toml | 2 - tests/cache.py | 6 +- tests/llm.py | 5 + tests/store.py | 32 +-- uv.lock | 32 +-- 30 files changed, 465 insertions(+), 651 deletions(-) delete mode 100644 app/persistence/sqlite.py diff --git a/README.md b/README.md index 4b13cd52..9fc52f65 100644 --- a/README.md +++ b/README.md @@ -179,7 +179,7 @@ graph LR ada["Embedding
(ADA)"] app["App
(Container App)"] communication_services["Call & SMS gateway
(Communication Services)"] - db[("Conversations and claims
(Cosmos DB / SQLite)")] + db[("Conversations and claims
(Cosmos DB)")] eventgrid["Broker
(Event Grid)"] gpt["LLM
(GPT-4o)"] queues[("Queues
(Azure Storage)")] @@ -627,7 +627,7 @@ Conversation options are represented as features. They can be configured from Ap | `answer_hard_timeout_sec` | The hard timeout for the bot answer in seconds. | `int` | 180 | | `answer_soft_timeout_sec` | The soft timeout for the bot answer in seconds. | `int` | 30 | | `callback_timeout_hour` | The timeout for a callback in hours. | `int` | 3 | -| `phone_silence_timeout_sec` | The timeout for phone silence in seconds. | `int` | 10 | +| `phone_silence_timeout_sec` | The timeout for phone silence in seconds. | `int` | 20 | | `recognition_retry_max` | The maximum number of retries for voice recognition. | `int` | 2 | | `recording_enabled` | Whether call recording is enabled. | `bool` | false | | `slow_llm_for_chat` | Whether to use the slow LLM for chat. | `bool` | false | diff --git a/app/helpers/call_events.py b/app/helpers/call_events.py index 505efac6..fde93bdb 100644 --- a/app/helpers/call_events.py +++ b/app/helpers/call_events.py @@ -1,5 +1,6 @@ import asyncio from collections.abc import Awaitable, Callable +from datetime import UTC, datetime from azure.communication.callautomation import ( AzureBlobContainerRecordingStorage, @@ -76,19 +77,17 @@ async def on_new_call( return True except ClientAuthenticationError: - logger.error( - "Authentication error with Communication Services, check the credentials", - exc_info=True, + logger.exception( + "Authentication error with Communication Services, check the credentials" ) except HttpResponseError as e: if "lifetime validation of the signed http request failed" in e.message.lower(): logger.debug("Old call event received, ignoring") else: - logger.error( + logger.exception( "Unknown error answering call with %s", phone_number, - exc_info=True, ) return False @@ -101,23 +100,25 @@ async def on_call_connected( server_call_id: str, ) -> None: logger.info("Call connected, asking for language") - call.recognition_retry = 0 # Reset recognition retry counter - call.in_progress = True - call.messages.append( - MessageModel( - action=MessageActionEnum.CALL, - content="", - persona=MessagePersonaEnum.HUMAN, + + # Add define the call as in progress + async with _db.call_transac(call): + call.in_progress = True + call.recognition_retry = 0 + call.messages.append( + MessageModel( + action=MessageActionEnum.CALL, + content="", + persona=MessagePersonaEnum.HUMAN, + ) ) - ) + + # Execute business logic await asyncio.gather( _handle_ivr_language( call=call, client=client, ), # First, every time a call is answered, confirm the language - _db.call_aset( - call - ), # Second, save in DB allowing SMS answers to be more "in-sync", should be quick enough to be in sync with the next message _handle_recording( call=call, client=client, @@ -172,7 +173,10 @@ async def on_recognize_error( ) -> None: # Retry IVR recognition if contexts and CallContextEnum.IVR_LANG_SELECT in contexts: + # Enrich span span_attribute(CallAttributes.CALL_CHANNEL, "ivr") + + # Retry IVR recognition if call.recognition_retry < await recognition_retry_max(): logger.info( "Timeout, retrying language selection (%s/%s)", @@ -183,13 +187,16 @@ async def on_recognize_error( call=call, client=client, ) - else: # IVR retries are exhausted, end call + + # IVR retries are exhausted, end call + else: logger.info("Timeout, ending call") await _handle_goodbye( call=call, client=client, post_callback=post_callback, ) + return # Voice retries are exhausted, end call @@ -203,7 +210,8 @@ async def on_recognize_error( return # Increment the recognition retry counter - call.recognition_retry += 1 + async with _db.call_transac(call): + call.recognition_retry += 1 # Play a timeout prompt await handle_play_text( @@ -236,6 +244,20 @@ async def _handle_goodbye( ) +@tracer.start_as_current_span("on_play_started") +async def on_play_started( + call: CallStateModel, +) -> None: + logger.debug("Play started") + + # Enrich span + span_attribute(CallAttributes.CALL_CHANNEL, "voice") + + # Update last interaction + async with _db.call_transac(call): + call.last_interaction_at = datetime.now(UTC) + + @tracer.start_as_current_span("on_play_completed") async def on_play_completed( call: CallStateModel, @@ -244,35 +266,52 @@ async def on_play_completed( post_callback: Callable[[CallStateModel], Awaitable[None]], ) -> None: logger.debug("Play completed") + + # Enrich span span_attribute(CallAttributes.CALL_CHANNEL, "voice") + # Update last interaction + async with _db.call_transac(call): + call.last_interaction_at = datetime.now(UTC) + + # Skip if no context data if not contexts: return + # Call ended context if ( CallContextEnum.TRANSFER_FAILED in contexts or CallContextEnum.GOODBYE in contexts - ): # Call ended + ): logger.info("Ending call") await _handle_hangup( call=call, client=client, post_callback=post_callback, ) + return - elif CallContextEnum.CONNECT_AGENT in contexts: # Call transfer + # Call transfer context + if CallContextEnum.CONNECT_AGENT in contexts: logger.info("Initiating transfer call initiated") await handle_transfer( call=call, client=client, target=call.initiate.agent_phone_number, ) + return + + logger.warning("Unknown context %s", contexts) @tracer.start_as_current_span("on_play_error") async def on_play_error(error_code: int) -> None: logger.debug("Play failed") + + # Enrich span span_attribute(CallAttributes.CALL_CHANNEL, "voice") + + # Suppress known errors # See: https://github.com/MicrosoftDocs/azure-docs/blob/main/articles/communication-services/how-tos/call-automation/play-action.md match error_code: case 8535: @@ -300,9 +339,12 @@ async def on_ivr_recognized( label: str, ) -> None: logger.info("IVR recognized: %s", label) + + # Enrich span span_attribute(CallAttributes.CALL_CHANNEL, "ivr") span_attribute(CallAttributes.CALL_MESSAGE, label) - call.recognition_retry = 0 # Reset recognition retry counter + + # Parse language from label try: lang = next( (x for x in call.initiate.lang.availables if x.short_code == label), @@ -313,8 +355,9 @@ async def on_ivr_recognized( return logger.info("Setting call language to %s", lang) - call.lang = lang.short_code - persist_coro = _db.call_aset(call) + async with _db.call_transac(call): + call.lang = lang.short_code + call.recognition_retry = 0 if len(call.messages) <= 1: # First call, or only the call action await asyncio.gather( @@ -322,12 +365,11 @@ async def on_ivr_recognized( call=call, client=client, text=await CONFIG.prompts.tts.hello(call), - ), # First, greet the user - persist_coro, # Second, persist language change for next messages, should be quick enough to be in sync with the next message + ), # First, greet the userwith the next message start_audio_streaming( call=call, client=client, - ), # Third, the conversation with the LLM should start + ), # Second, the conversation with the LLM should start ) # All in parallel to lower the response latency else: # Returning call @@ -338,11 +380,10 @@ async def on_ivr_recognized( style=MessageStyleEnum.CHEERFUL, text=await CONFIG.prompts.tts.welcome_back(call), ), # First, welcome back the user - persist_coro, # Second, persist language change for next messages, should be quick enough to be in sync with the next message start_audio_streaming( call=call, client=client, - ), # Third, the conversation with the LLM should start + ), # Second, the conversation with the LLM should start ) @@ -373,18 +414,26 @@ async def on_sms_received( message: str, ) -> bool: logger.info("SMS received from %s: %s", call.initiate.phone_number, message) + + # Enrich span span_attribute(CallAttributes.CALL_CHANNEL, "sms") span_attribute(CallAttributes.CALL_MESSAGE, message) - call.messages.append( - MessageModel( - action=MessageActionEnum.SMS, - content=message, - persona=MessagePersonaEnum.HUMAN, + + # Add the SMS to the call history + async with _db.call_transac(call): + call.messages.append( + MessageModel( + action=MessageActionEnum.SMS, + content=message, + persona=MessagePersonaEnum.HUMAN, + ) ) - ) - await _db.call_aset(call) # save in DB allowing SMS answers to be more "in-sync" + + # If the call is not in progress, answer with SMS if not call.in_progress: logger.info("Call not in progress, answering with SMS") + + # If the call is in progress, answer with voice else: logger.info("Call in progress, answering with voice") # TODO: Reimplement SMS answers in voice @@ -393,6 +442,7 @@ async def on_sms_received( # client=client, # post_callback=post_callback, # ) + return True @@ -402,14 +452,17 @@ async def _handle_hangup( post_callback: Callable[[CallStateModel], Awaitable[None]], ) -> None: await handle_hangup(client=client, call=call) - call.in_progress = False - call.messages.append( - MessageModel( - action=MessageActionEnum.HANGUP, - content="", - persona=MessagePersonaEnum.HUMAN, + + async with _db.call_transac(call): + call.in_progress = False + call.messages.append( + MessageModel( + action=MessageActionEnum.HANGUP, + content="", + persona=MessagePersonaEnum.HUMAN, + ) ) - ) + await post_callback(call) @@ -467,21 +520,21 @@ def _validate(req: str | None) -> tuple[bool, str | None, str | None]: ): if not number: continue - res = await _sms.asend(content, number) + res = await _sms.send(content, number) if not res: logger.warning("Failed sending SMS report to %s", number) continue success = True if success: - call.messages.append( - MessageModel( - action=MessageActionEnum.SMS, - content=content, - persona=MessagePersonaEnum.ASSISTANT, + async with _db.call_transac(call): + call.messages.append( + MessageModel( + action=MessageActionEnum.SMS, + content=content, + persona=MessagePersonaEnum.ASSISTANT, + ) ) - ) - await _db.call_aset(call) async def _intelligence_synthesis(call: CallStateModel) -> None: @@ -511,8 +564,8 @@ def _validate( return logger.info("Synthesis: %s", model) - call.synthesis = model - await _db.call_aset(call) + async with _db.call_transac(call): + call.synthesis = model async def _intelligence_next(call: CallStateModel) -> None: @@ -542,8 +595,8 @@ def _validate( return logger.info("Next action: %s", model) - call.next = model - await _db.call_aset(call) + async with _db.call_transac(call): + call.next = model async def _handle_ivr_language( diff --git a/app/helpers/call_llm.py b/app/helpers/call_llm.py index 6b8647ea..fef9c4d0 100644 --- a/app/helpers/call_llm.py +++ b/app/helpers/call_llm.py @@ -164,14 +164,16 @@ async def _response_callback() -> None: if not recognizer_text: return - # Add it to the call history + # Add it to the call history and update last interaction logger.info("Voice stored: %s", recognizer_buffer) - call.messages.append( - MessageModel( - content=recognizer_text, - persona=MessagePersonaEnum.HUMAN, + async with _db.call_transac(call): + call.last_interaction_at = datetime.now(UTC) + call.messages.append( + MessageModel( + content=recognizer_text, + persona=MessagePersonaEnum.HUMAN, + ) ) - ) # Clear the recognition buffer recognizer_buffer.clear() @@ -226,7 +228,8 @@ async def _out_answer( # noqa: PLR0915 span_attribute(CallAttributes.CALL_MESSAGE, call.messages[-1].content) # Reset recognition retry counter - call.recognition_retry = 0 + async with _db.call_transac(call): + call.recognition_retry = 0 # By default, play the loading sound play_loading_sound = True @@ -287,9 +290,10 @@ def _clear_tasks() -> None: continue_chat = True try: while True: - logger.debug("Chat task status: %s", chat_task.done()) + # logger.debug("Chat task status: %s", chat_task.done()) - if chat_task.done(): # Break when chat coroutine is done + # Break when chat coroutine is done + if chat_task.done(): # Clean up _clear_tasks() # Get result @@ -297,12 +301,10 @@ def _clear_tasks() -> None: chat_task.result() ) # Store updated chat model await training_callback(call) # Trigger trainings generation - await _db.call_aset( - call - ) # Save ASAP in DB allowing (1) user to cut off the Assistant and (2) SMS answers to be in order break - if hard_timeout_task.done(): # Break when hard timeout is reached + # Break when hard timeout is reached + if hard_timeout_task.done(): logger.warning( "Hard timeout of %ss reached", await answer_hard_timeout_sec(), @@ -311,10 +313,10 @@ def _clear_tasks() -> None: _clear_tasks() break - if play_loading_sound: # Catch timeout if async loading is not started - if ( - soft_timeout_task.done() and not soft_timeout_triggered - ): # Speak when soft timeout is reached + # Catch timeout if async loading is not started + if play_loading_sound: + # Speak when soft timeout is reached + if soft_timeout_task.done() and not soft_timeout_triggered: logger.warning( "Soft timeout of %ss reached", await answer_soft_timeout_sec(), @@ -330,7 +332,8 @@ def _clear_tasks() -> None: ) ) - elif loading_task.done(): # Do not play timeout prompt plus loading, it can be frustrating for the user + # Do not play timeout prompt plus loading, it can be frustrating for the user + elif loading_task.done(): loading_task = _loading_task() await scheduler.spawn( handle_media( @@ -344,7 +347,8 @@ def _clear_tasks() -> None: await asyncio.sleep(1) except Exception: - logger.warning("Error loading intelligence", exc_info=True) + # TODO: Remove last message + logger.exception("Error loading intelligence") if is_error: # Error during chat if not continue_chat or _iterations_remaining < 1: # Maximum retries reached @@ -408,27 +412,20 @@ async def _execute_llm_chat( # noqa: PLR0913, PLR0911, PLR0912, PLR0915 logger.debug("Running LLM chat") content_full = "" - async def _buffer_callback(text: str, style: MessageStyleEnum) -> None: + async def _plugin_tts_callback(text: str) -> None: nonlocal content_full content_full += f" {text}" - await tts_callback(text, style) + await tts_callback(text, MessageStyleEnum.NONE) - async def _content_callback( - buffer: str, style: MessageStyleEnum - ) -> MessageStyleEnum: + async def _content_callback(buffer: str) -> None: # Remove tool calls from buffer content and detect style - local_style, local_content = extract_message_style( - remove_message_action(buffer) - ) - new_style = local_style or style - if local_content: - await tts_callback(local_content, new_style) - return new_style + style, local_content = extract_message_style(remove_message_action(buffer)) + await tts_callback(local_content, style) # Build RAG trainings = await call.trainings() logger.info("Enhancing LLM chat with %s trainings", len(trainings)) - logger.debug("Trainings: %s", trainings) + # logger.debug("Trainings: %s", trainings) # System prompts system = CONFIG.prompts.llm.chat_system( @@ -448,7 +445,7 @@ async def _content_callback( call=call, client=client, post_callback=post_callback, - tts_callback=_buffer_callback, + tts_callback=_plugin_tts_callback, ) tools = [] @@ -456,7 +453,7 @@ async def _content_callback( logger.warning("Tools disabled for this chat") else: tools = await plugins.to_openai() - logger.debug("Tools: %s", tools) + # logger.debug("Tools: %s", tools) # Execute LLM inference maximum_tokens_reached = False @@ -482,7 +479,7 @@ async def _content_callback( content_full[content_buffer_pointer:], False ): content_buffer_pointer += length - plugins.style = await _content_callback(sentence, plugins.style) + await _content_callback(sentence) except MaximumTokensReachedError: # Retry on maximum tokens reached logger.warning("Maximum tokens reached for this completion, retry asked") maximum_tokens_reached = True @@ -505,15 +502,15 @@ async def _content_callback( # Flush the remaining buffer if content_buffer_pointer < len(content_full): - plugins.style = await _content_callback( - content_full[content_buffer_pointer:], plugins.style - ) + await _content_callback(content_full[content_buffer_pointer:]) # Convert tool calls buffer tool_calls = [tool_call for _, tool_call in tool_calls_buffer.items()] # Delete action and style from the message as they are in the history and LLM hallucinates them - _, content_full = extract_message_style(remove_message_action(content_full)) + last_style, content_full = extract_message_style( + remove_message_action(content_full) + ) logger.debug("Chat response: %s", content_full) logger.debug("Tool calls: %s", tool_calls) @@ -538,17 +535,12 @@ async def _content_callback( call = plugins.call # Update call model if object reference changed # Store message - if call.messages[-1].persona == MessagePersonaEnum.ASSISTANT: - message = call.messages[-1] - message.content = content_full.strip() - message.style = plugins.style - message.tool_calls = tool_calls - else: + async with _db.call_transac(call): call.messages.append( MessageModel( - content=content_full.strip(), + content="", # Content has already been stored within the TTS callback persona=MessagePersonaEnum.ASSISTANT, - style=plugins.style, + style=last_style, tool_calls=tool_calls, ) ) @@ -586,29 +578,43 @@ async def _silence_callback() -> None: """ Flush the audio buffer if no audio is detected for a while and trigger the timeout if required. """ - # Wait and flush the audio buffer + # Wait before flushing nonlocal clear_tts_task timeout_ms = await vad_silence_timeout_ms() await asyncio.sleep(timeout_ms / 1000) + + # Cancel the clear TTS task if any if clear_tts_task: clear_tts_task.cancel() clear_tts_task = None + + # Flush the audio buffer logger.debug("Flushing audio buffer after %i ms", timeout_ms) await response_callback() # Wait for silence and trigger timeout timeout_sec = await phone_silence_timeout_sec() - while call.in_progress: + while True: + # Stop this time if the call played a message timeout_start = datetime.now(UTC) await asyncio.sleep(timeout_sec) + + # Stop if the call ended + if not call.in_progress: + break + + # Cancel if an interaction happened in the meantime if ( - call.messages[-1].created_at + timedelta(seconds=timeout_sec) + call.last_interaction_at + and call.last_interaction_at + timedelta(seconds=timeout_sec) > timeout_start ): logger.debug( "Message sent in the meantime, canceling this silence timeout" ) continue + + # Trigger the timeout logger.info("Silence triggered after %i sec", timeout_sec) await timeout_callback() @@ -691,7 +697,11 @@ async def wrapper( text: str, style: MessageStyleEnum, ) -> None: - # First, play the TTS to the user + # Skip if no text + if not text: + return + + # Play the TTS await scheduler.spawn( handle_play_text( call=call, @@ -700,7 +710,5 @@ async def wrapper( text=text, ) ) - # Second, save in DB allowing (1) user to cut off the Assistant and (2) SMS answers to be in order - await _db.call_aset(call) return wrapper diff --git a/app/helpers/call_utils.py b/app/helpers/call_utils.py index 5f605dc4..b4532630 100644 --- a/app/helpers/call_utils.py +++ b/app/helpers/call_utils.py @@ -37,6 +37,8 @@ r"[^\w\sÀ-ÿ'«»“”\"\"‘’''(),.!?;:\-\+_@/&€$%=]" # noqa: RUF001 ) # Sanitize text for TTS +_db = CONFIG.database.instance() + class ContextEnum(str, Enum): """ @@ -221,13 +223,7 @@ async def _chunk_before_tts( # Store text in call messages if store: - if ( - call.messages - and call.messages[-1].persona == MessagePersonaEnum.ASSISTANT - and call.messages[-1].style == style - ): # Append to last message if possible - call.messages[-1].content += f" {text}" - else: + async with _db.call_transac(call): call.messages.append( MessageModel( content=text, diff --git a/app/helpers/config_models/cache.py b/app/helpers/config_models/cache.py index 1928d093..454349bd 100644 --- a/app/helpers/config_models/cache.py +++ b/app/helpers/config_models/cache.py @@ -46,7 +46,7 @@ class CacheModel(BaseModel): @field_validator("redis") @classmethod - def _validate_sqlite( + def _validate_redis( cls, redis: RedisModel | None, info: ValidationInfo, diff --git a/app/helpers/config_models/database.py b/app/helpers/config_models/database.py index 0e5a84ba..347e1f42 100644 --- a/app/helpers/config_models/database.py +++ b/app/helpers/config_models/database.py @@ -1,16 +1,10 @@ -from enum import Enum from functools import cache -from pydantic import BaseModel, ValidationInfo, field_validator +from pydantic import BaseModel from app.persistence.istore import IStore -class ModeEnum(str, Enum): - COSMOS_DB = "cosmos_db" - SQLITE = "sqlite" - - class CosmosDbModel(BaseModel, frozen=True): container: str database: str @@ -26,60 +20,9 @@ def instance(self) -> IStore: return CosmosDbStore(CONFIG.cache.instance(), self) -class SqliteModel(BaseModel, frozen=True): - path: str = ".local" - schema_version: int = 3 - table: str = "calls" - - def full_path(self) -> str: - """ - Returns the full path to the sqlite database file. - - Formatted as: `{path}-v{schema_version}.sqlite`. - """ - return f"{self.path}-v{self.schema_version}.sqlite" - - @cache - def instance(self) -> IStore: - from app.helpers.config import CONFIG - from app.persistence.sqlite import ( - SqliteStore, - ) - - return SqliteStore(CONFIG.cache.instance(), self) - - class DatabaseModel(BaseModel): - cosmos_db: CosmosDbModel | None = None - mode: ModeEnum = ModeEnum.SQLITE - sqlite: SqliteModel | None = SqliteModel() # Object is fully defined by default - - @field_validator("cosmos_db") - @classmethod - def _validate_cosmos_db( - cls, - cosmos_db: CosmosDbModel | None, - info: ValidationInfo, - ) -> CosmosDbModel | None: - if not cosmos_db and info.data.get("mode", None) == ModeEnum.COSMOS_DB: - raise ValueError("Cosmos DB config required") - return cosmos_db - - @field_validator("sqlite") - @classmethod - def _validate_sqlite( - cls, - sqlite: SqliteModel | None, - info: ValidationInfo, - ) -> SqliteModel | None: - if not sqlite and info.data.get("mode", None) == ModeEnum.SQLITE: - raise ValueError("SQLite config required") - return sqlite + cosmos_db: CosmosDbModel def instance(self) -> IStore: - if self.mode == ModeEnum.SQLITE: - assert self.sqlite - return self.sqlite.instance() - assert self.cosmos_db return self.cosmos_db.instance() diff --git a/app/helpers/config_models/root.py b/app/helpers/config_models/root.py index 585017aa..66d9e8d2 100644 --- a/app/helpers/config_models/root.py +++ b/app/helpers/config_models/root.py @@ -40,7 +40,7 @@ class RootModel(BaseSettings): communication_services: CommunicationServicesModel = Field( serialization_alias="communication_service", # Compatibility with v5 ) - database: DatabaseModel = DatabaseModel() # Object is fully defined by default + database: DatabaseModel llm: LlmModel monitoring: MonitoringModel = ( MonitoringModel() diff --git a/app/helpers/features.py b/app/helpers/features.py index 6342f5de..20a63ece 100644 --- a/app/helpers/features.py +++ b/app/helpers/features.py @@ -29,7 +29,7 @@ async def callback_timeout_hour() -> int: async def phone_silence_timeout_sec() -> int: - return await _get(key="phone_silence_timeout_sec", type_res=int) or 10 + return await _get(key="phone_silence_timeout_sec", type_res=int) or 20 async def vad_silence_timeout_ms() -> int: @@ -55,7 +55,7 @@ async def recognition_retry_max() -> int: async def _get(key: str, type_res: type[T]) -> T | None: # Try cache cache_key = _cache_key(key) - cached = await _cache.aget(cache_key) + cached = await _cache.get(cache_key) if cached: return _parse(value=cached.decode(), type_res=type_res) # Try live @@ -69,7 +69,7 @@ async def _get(key: str, type_res: type[T]) -> T | None: logger.warning("Setting %s not found", key) return # Update cache - await _cache.aset( + await _cache.set( key=cache_key, ttl_sec=CONFIG.app_configuration.ttl_sec, value=setting.value, diff --git a/app/helpers/llm_tools.py b/app/helpers/llm_tools.py index 139363ea..97bd544e 100644 --- a/app/helpers/llm_tools.py +++ b/app/helpers/llm_tools.py @@ -15,7 +15,6 @@ ActionEnum as MessageActionEnum, MessageModel, PersonaEnum as MessagePersonaEnum, - StyleEnum as MessageStyleEnum, ) from app.models.reminder import ReminderModel from app.models.training import TrainingModel @@ -32,15 +31,14 @@ class UpdateClaimDict(TypedDict): class DefaultPlugin(AbstractPlugin): client: CallAutomationClient post_callback: Callable[[CallStateModel], Awaitable[None]] - style: MessageStyleEnum = MessageStyleEnum.NONE - tts_callback: Callable[[str, MessageStyleEnum], Awaitable[None]] + tts_callback: Callable[[str], Awaitable[None]] def __init__( self, call: CallStateModel, client: CallAutomationClient, post_callback: Callable[[CallStateModel], Awaitable[None]], - tts_callback: Callable[[str, MessageStyleEnum], Awaitable[None]], + tts_callback: Callable[[str], Awaitable[None]], ): super().__init__(call) self.client = client @@ -103,7 +101,7 @@ async def new_claim( - Customer wants explicitely to create a new claim - Talking about a totally different subject """ - await self.tts_callback(customer_response, self.style) + await self.tts_callback(customer_response) # Launch post-call intelligence for the current call await self.post_callback(self.call) # Store the last message and use it at first message of the new claim @@ -174,7 +172,7 @@ async def new_or_updated_reminder( - Call back for a follow-up - Wait for customer to send a document """ - await self.tts_callback(customer_response, self.style) + await self.tts_callback(customer_response) # Check if reminder already exists, if so update it for reminder in self.call.reminders: @@ -261,7 +259,7 @@ async def updated_claim( - Store details about the conversation - Update the claim with a new phone number """ - await self.tts_callback(customer_response, self.style) + await self.tts_callback(customer_response) # Update all claim fields res = "# Updated fields" for field in updates: @@ -343,11 +341,11 @@ async def search_document( - Know the procedure to declare a stolen luxury watch - Understand the requirements to ask for a cyber attack insurance """ - await self.tts_callback(customer_response, self.style) + await self.tts_callback(customer_response) # Execute in parallel tasks = await asyncio.gather( *[ - _search.training_asearch_all(text=query, lang="en-US") + _search.training_search_all(text=query, lang="en-US") for query in queries ] ) @@ -415,7 +413,7 @@ async def notify_emergencies( - A neighbor is having a heart attack - Someons is stuck in a car accident """ - await self.tts_callback(customer_response, self.style) + await self.tts_callback(customer_response) # TODO: Implement notification to emergency services for production usage logger.info( "Notifying %s, location %s, contact %s, reason %s", @@ -457,8 +455,8 @@ async def send_sms( - Confirm a detail like a reference number, if there is a misunderstanding - Send a confirmation, if the customer wants to have a written proof """ - await self.tts_callback(customer_response, self.style) - success = await _sms.asend( + await self.tts_callback(customer_response) + success = await _sms.send( content=message, phone_number=self.call.initiate.phone_number, ) @@ -512,7 +510,7 @@ async def speech_speed( initial_speed = self.call.initiate.prosody_rate self.call.initiate.prosody_rate = speed # Customer confirmation (with new speed) - await self.tts_callback(customer_response, self.style) + await self.tts_callback(customer_response) # LLM confirmation return f"Voice speed set to {speed} (was {initial_speed})" @@ -575,6 +573,6 @@ async def speech_lang( initial_lang = self.call.lang.short_code self.call.lang = lang # Customer confirmation (with new language) - await self.tts_callback(customer_response, self.style) + await self.tts_callback(customer_response) # LLM confirmation return f"Voice language set to {lang} (was {initial_lang})" diff --git a/app/helpers/translation.py b/app/helpers/translation.py index d502113c..84655b55 100644 --- a/app/helpers/translation.py +++ b/app/helpers/translation.py @@ -36,7 +36,7 @@ async def translate_text(text: str, source_lang: str, target_lang: str) -> str | # Try cache cache_key = f"{__name__}-translate_text-{text}-{source_lang}-{target_lang}" - cached = await _cache.aget(cache_key) + cached = await _cache.get(cache_key) if cached: return cached.decode() @@ -51,7 +51,7 @@ async def translate_text(text: str, source_lang: str, target_lang: str) -> str | translation = res[0].translations[0].text if res and res[0].translations else None # Update cache - await _cache.aset( + await _cache.set( key=cache_key, ttl_sec=60 * 60 * 24, # 1 day value=translation, diff --git a/app/main.py b/app/main.py index ba55c272..416237df 100644 --- a/app/main.py +++ b/app/main.py @@ -48,6 +48,7 @@ on_new_call, on_play_completed, on_play_error, + on_play_started, on_recognize_error, on_sms_received, on_transfer_completed, @@ -201,10 +202,10 @@ async def health_readiness_get() -> JSONResponse: search_check, sms_check, ) = await asyncio.gather( - _cache.areadiness(), - _db.areadiness(), - _search.areadiness(), - _sms.areadiness(), + _cache.readiness(), + _db.readiness(), + _search.readiness(), + _sms.readiness(), ) readiness = ReadinessModel( status=ReadinessEnum.OK, @@ -246,7 +247,7 @@ async def report_get(phone_number: str | None = None) -> HTMLResponse: phone_number = PhoneNumber(phone_number) if phone_number else None count = 100 calls, total = ( - await _db.call_asearch_all(count=count, phone_number=phone_number) or [] + await _db.call_search_all(count=count, phone_number=phone_number) or [] ) template = _jinja.get_template("list.html.jinja") @@ -281,7 +282,7 @@ async def report_single_get(call_id: UUID) -> HTMLResponse: Returns a single call with a web interface. """ - call = await _db.call_aget(call_id) + call = await _db.call_get(call_id) if not call: return HTMLResponse( content=f"Call {call_id} not found", @@ -319,7 +320,7 @@ async def call_list_get( """ phone_number = PhoneNumber(phone_number) if phone_number else None count = 100 - calls, _ = await _db.call_asearch_all(phone_number=phone_number, count=count) + calls, _ = await _db.call_search_all(phone_number=phone_number, count=count) if not calls: raise HTTPException( detail=f"Call {phone_number} not found", @@ -344,7 +345,7 @@ async def call_get(call_id_or_phone_number: str) -> CallGetModel: # First, try to get by call ID try: call_id = UUID(call_id_or_phone_number) - call = await _db.call_aget(call_id) + call = await _db.call_get(call_id) if call: return TypeAdapter(CallGetModel).dump_python(call) except ValueError: @@ -352,7 +353,7 @@ async def call_get(call_id_or_phone_number: str) -> CallGetModel: # Second, try to get by phone number phone_number = PhoneNumber(call_id_or_phone_number) - call = await _db.call_asearch_one(phone_number=phone_number) + call = await _db.call_search_one(phone_number=phone_number) if not call: raise HTTPException( detail=f"Call {call_id_or_phone_number} not found", @@ -482,7 +483,7 @@ async def sms_event( span_attribute(CallAttributes.CALL_PHONE_NUMBER, phone_number) # Get call - call = await _db.call_asearch_one(phone_number) + call = await _db.call_search_one(phone_number) if not call: logger.warning("Call for phone number %s not found", phone_number) return @@ -537,7 +538,7 @@ async def _communicationservices_validate_call_id( span_attribute(CallAttributes.CALL_ID, str(call_id)) # Validate call - call = await _db.call_aget(call_id) + call = await _db.call_get(call_id) if not call: raise HTTPException( detail=f"Call {call_id} not found", @@ -682,14 +683,19 @@ async def _communicationservices_event_worker( # Event parsing event = CloudEvent.from_dict(event_dict) assert isinstance(event.data, dict) + # Store connection ID connection_id = event.data["callConnectionId"] - call.voice_id = connection_id + async with _db.call_transac(call): + call.voice_id = connection_id + # Extract context event_type = event.type + # Extract event context operation_context = event.data.get("operationContext", None) operation_contexts = _str_to_contexts(operation_context) + # Client SDK automation_client = await _use_automation_client() @@ -738,6 +744,11 @@ async def _communicationservices_event_worker( post_callback=_trigger_post_event, ) + case "Microsoft.Communication.PlayStarted": # Media started + await on_play_started( + call=call, + ) + case "Microsoft.Communication.PlayCompleted": # Media played await on_play_completed( call=call, @@ -767,10 +778,6 @@ async def _communicationservices_event_worker( logger.warning("Event %s not supported", event_type) logger.debug("Event data %s", event.data) - await _db.call_aset( - call - ) # TODO: Do not persist on every event, this is simpler but not efficient - @tracer.start_as_current_span("training_event") async def training_event( @@ -806,7 +813,7 @@ async def post_event( Queue message is the UUID of a call. The event will load asynchroniously the `on_end_call` workflow. """ # Validate call - call = await _db.call_aget(UUID(post.content)) + call = await _db.call_get(UUID(post.content)) if not call: logger.warning("Call %s not found", post.content) return @@ -845,18 +852,22 @@ async def _communicationservices_urls( Returnes a tuple of the callback URL, the WebSocket URL, and the call object. """ - call = await _db.call_asearch_one(phone_number) - if not call or ( - initiate and call.initiate != initiate - ): # Create new call if initiate is different - call = CallStateModel( - initiate=initiate - or CallInitiateModel( - **CONFIG.conversation.initiate.model_dump(), - phone_number=phone_number, + # Get call + call = await _db.call_search_one(phone_number) + + # Create new call if initiate is different + if not call or (initiate and call.initiate != initiate): + call = await _db.call_create( + CallStateModel( + initiate=initiate + or CallInitiateModel( + **CONFIG.conversation.initiate.model_dump(), + phone_number=phone_number, + ) ) ) - await _db.call_aset(call) # Create for the first time + + # Format URLs wss_url = _COMMUNICATIONSERVICES_WSS_TPL.format( callback_secret=call.callback_secret, call_id=str(call.call_id), @@ -865,6 +876,7 @@ async def _communicationservices_urls( callback_secret=call.callback_secret, call_id=str(call.call_id), ) + return callaback_url, wss_url, call @@ -888,7 +900,7 @@ async def twilio_sms_post( span_attribute(CallAttributes.CALL_PHONE_NUMBER, From) # Get call - call = await _db.call_asearch_one(From) + call = await _db.call_search_one(From) if not call: logger.warning("Call for phone number %s not found", From) diff --git a/app/models/call.py b/app/models/call.py index b8df7dfa..aa4250b8 100644 --- a/app/models/call.py +++ b/app/models/call.py @@ -48,6 +48,9 @@ class CallGetModel(BaseModel): def _validate_claim( cls, claim: dict[str, Any] | None, info: ValidationInfo ) -> dict[str, Any]: + """ + Validate the claim field against the initiate data model. + """ initiate: CallInitiateModel | None = info.data.get("initiate", None) if not initiate: return {} @@ -60,6 +63,26 @@ def _validate_claim( ) ) + @field_validator("messages") + @classmethod + def _validate_messages(cls, messages: list[MessageModel]) -> list[MessageModel]: + """ + Merge messages with the same persona. + """ + merged: list[MessageModel] = [] + for new_message in messages: + if not ( + merged + and (last := merged[-1]).persona == new_message.persona + and last.action == new_message.action + ): + merged.append(new_message) + continue + last.content = (last.content + " " + new_message.content).strip() + last.style = new_message.style + last.tool_calls += new_message.tool_calls + return merged + class CallStateModel(CallGetModel, extra="ignore"): # Immutable fields @@ -71,6 +94,7 @@ class CallStateModel(CallGetModel, extra="ignore"): ) # Editable fields lang_short_code: str | None = None + last_interaction_at: datetime | None = None recognition_retry: int = 0 voice_id: str | None = None @@ -108,7 +132,7 @@ async def trainings(self, cache_only: bool = True) -> list[TrainingModel]: search = CONFIG.ai_search.instance() tasks = await asyncio.gather( *[ - search.training_asearch_all( + search.training_search_all( cache_only=cache_only, lang=self.lang.short_code, text=message.content, diff --git a/app/models/message.py b/app/models/message.py index bb4fdcd5..c5d79c87 100644 --- a/app/models/message.py +++ b/app/models/message.py @@ -132,11 +132,10 @@ async def execute_function(self, plugin: object) -> None: res = "Wrong arguments, please fix them and try again." res_log = res except Exception as e: - logger.warning( + logger.exception( "Error executing function %s with args %s", self.function_name, args, - exc_info=True, ) res = f"Error: {e}." res_log = res @@ -224,6 +223,10 @@ def to_openai( def remove_message_action(text: str) -> str: """ Remove action from content. AI often adds it by mistake event if explicitly asked not to. + + Example: + - Input: "action=talk Hello!" + - Output: "Hello!" """ # TODO: Use JSON as LLM response instead of using a regex to parse the text res = re.match(_MESSAGE_ACTION_R, text) @@ -235,18 +238,22 @@ def remove_message_action(text: str) -> str: return text -def extract_message_style(text: str) -> tuple[StyleEnum | None, str]: +def extract_message_style(text: str) -> tuple[StyleEnum, str]: """ - Detect the style of a message. + Detect the style of a message and extract it from the text. + + Example: + - Input: "style=cheerful Hello!" + - Output: (StyleEnum.CHEERFUL, "Hello!") """ - # TODO: Use JSON as LLM response instead of using a regex to parse the text + default_style = StyleEnum.NONE res = re.match(_MESSAGE_STYLE_R, text) if not res: - return None, text + return default_style, text try: return ( StyleEnum(res.group(1)), # style (res.group(2) or ""), # content ) except ValueError: # Regex failed, return original text - return None, text + return default_style, text diff --git a/app/persistence/ai_search.py b/app/persistence/ai_search.py index 3bbacdd8..63f29124 100644 --- a/app/persistence/ai_search.py +++ b/app/persistence/ai_search.py @@ -64,7 +64,7 @@ def __init__(self, cache: ICache, config: AiSearchModel): ) self._config = config - async def areadiness(self) -> ReadinessEnum: + async def readiness(self) -> ReadinessEnum: """ Check the readiness of the AI Search service. """ @@ -77,9 +77,7 @@ async def areadiness(self) -> ReadinessEnum: except ServiceRequestError: logger.exception("Error connecting to AI Search") except Exception: - logger.error( - "Unknown error while checking AI Search readiness", exc_info=True - ) + logger.exception("Unknown error while checking AI Search readiness") return ReadinessEnum.FAIL @retry( @@ -88,7 +86,7 @@ async def areadiness(self) -> ReadinessEnum: stop=stop_after_attempt(3), wait=wait_random_exponential(multiplier=0.8, max=8), ) - async def training_asearch_all( + async def training_search_all( self, lang: str, text: str, @@ -100,7 +98,7 @@ async def training_asearch_all( # Try cache cache_key = f"{self.__class__.__name__}-training_asearch_all-v2-{text}" # Cache sort method has been updated in v6, thus the v2 - cached = await self._cache.aget(cache_key) + cached = await self._cache.get(cache_key) if cached: try: return TypeAdapter(list[TrainingModel]).validate_json(cached) @@ -166,7 +164,7 @@ async def training_asearch_all( # Update cache if trainings: - await self._cache.aset( + await self._cache.set( key=cache_key, ttl_sec=60 * 60 * 24, # 1 day value=TypeAdapter(list[TrainingModel]).dump_json(trainings), diff --git a/app/persistence/communication_services.py b/app/persistence/communication_services.py index 46940005..2c3bcecf 100644 --- a/app/persistence/communication_services.py +++ b/app/persistence/communication_services.py @@ -19,14 +19,14 @@ def __init__(self, config: CommunicationServicesModel): logger.info("Using Communication Services from number %s", config.phone_number) self._config = config - async def areadiness(self) -> ReadinessEnum: + async def readiness(self) -> ReadinessEnum: """ Check the readiness of the Communication Services SMS service. """ # TODO: How to check the readiness of the SMS service? We could send a SMS for each test, but that would be damm expensive. return ReadinessEnum.OK - async def asend(self, content: str, phone_number: PhoneNumber) -> bool: + async def send(self, content: str, phone_number: PhoneNumber) -> bool: logger.info("Sending SMS to %s", phone_number) success = False logger.info("SMS content: %s", content) @@ -49,11 +49,9 @@ async def asend(self, content: str, phone_number: PhoneNumber) -> bool: response.error_message, ) except ClientAuthenticationError: - logger.error( - "Authentication error for SMS, check the credentials", exc_info=True - ) + logger.exception("Authentication error for SMS, check the credentials") except HttpResponseError: - logger.error("Error sending SMS to %s", phone_number, exc_info=True) + logger.exception("Error sending SMS to %s", phone_number) return success async def _use_client(self) -> SmsClient: diff --git a/app/persistence/cosmos_db.py b/app/persistence/cosmos_db.py index e8c12b36..4c439117 100644 --- a/app/persistence/cosmos_db.py +++ b/app/persistence/cosmos_db.py @@ -1,6 +1,7 @@ import asyncio from collections.abc import AsyncGenerator from contextlib import asynccontextmanager +from typing import Any from uuid import UUID, uuid4 from azure.cosmos import ConsistencyLevel @@ -28,7 +29,7 @@ def __init__(self, cache: ICache, config: CosmosDbModel): logger.info("Using Cosmos DB %s/%s", config.database, config.container) self._config = config - async def areadiness(self) -> ReadinessEnum: + async def readiness(self) -> ReadinessEnum: """ Check the readiness of the Cosmos DB service. @@ -68,9 +69,7 @@ async def areadiness(self) -> ReadinessEnum: except CosmosHttpResponseError: logger.exception("Error requesting CosmosDB") except Exception: - logger.error( - "Unknown error while checking Cosmos DB readiness", exc_info=True - ) + logger.exception("Unknown error while checking Cosmos DB readiness") return ReadinessEnum.FAIL async def _item_exists(self, test_id: str, partition_key: str) -> bool: @@ -83,12 +82,12 @@ async def _item_exists(self, test_id: str, partition_key: str) -> bool: pass return exist - async def call_aget(self, call_id: UUID) -> CallStateModel | None: + async def call_get(self, call_id: UUID) -> CallStateModel | None: logger.debug("Loading call %s", call_id) # Try cache cache_key = self._cache_key_call_id(call_id) - cached = await self._cache.aget(cache_key) + cached = await self._cache.get(cache_key) if cached: try: return CallStateModel.model_validate_json(cached) @@ -115,7 +114,7 @@ async def call_aget(self, call_id: UUID) -> CallStateModel | None: # Update cache if call: - await self._cache.aset( + await self._cache.set( key=cache_key, ttl_sec=await callback_timeout_hour(), value=call.model_dump_json(), @@ -123,43 +122,133 @@ async def call_aget(self, call_id: UUID) -> CallStateModel | None: return call - async def call_aset(self, call: CallStateModel) -> bool: - logger.debug("Saving call %s", call.call_id) + @asynccontextmanager + async def call_transac(self, call: CallStateModel) -> AsyncGenerator[None, None]: + # Copy and yield the updated object + init_data = call.model_copy().model_dump(mode="json", exclude_none=True) + yield + + # Compute the diff + call_data = call.model_dump(mode="json", exclude_none=True) + update_data: dict[str, Any | list[Any]] = {} + for field, new_value in call_data.items(): + init_value = init_data.get(field) + if init_value != new_value: + if isinstance(new_value, list) and isinstance(init_value, list): + update_data[field] = [ + item for item in new_value if item not in init_value + ] + else: + update_data[field] = new_value - # Update live - data = call.model_dump(mode="json", exclude_none=True) - data["id"] = str(call.call_id) # CosmosDB requires an id field - res = False + # Skip if no diff + if not update_data: + logger.debug("No update needed for call %s", call.call_id) + return + + # Update + logger.debug( + "Updating call %s with %s", + call.call_id, + update_data, + ) + refreshed_call_raw = None try: async with self._use_client() as db: - await db.upsert_item(body=data) - res = True + # See: https://learn.microsoft.com/en-us/azure/cosmos-db/partial-document-update#supported-operations + refreshed_call_raw = await db.patch_item( + item=str(call.call_id), + partition_key=call.initiate.phone_number, + patch_operations=[ + # Replace fields + *[ + { + "op": "set", + "path": f"/{field}", + "value": value, + } + for field, value in update_data.items() + if not isinstance(value, list) + ], + # Add to arrays + *[ + { + "op": "add", + "path": f"/{field}/-", + "value": value, + } + for field, values in update_data.items() + if isinstance(values, list) + for value in values + ], + ], + ) except CosmosHttpResponseError as e: logger.error("Error accessing CosmosDB: %s", e) - # Update cache - if res: - cache_key_id = self._cache_key_call_id(call.call_id) - await self._cache.aset( - key=cache_key_id, - ttl_sec=await callback_timeout_hour(), - value=call.model_dump_json(), - ) # Update for ID - cache_key_phone_number = self._cache_key_phone_number( - call.initiate.phone_number + # Skip if no refresh + if not refreshed_call_raw: + return + + # Parse refreshed object + try: + refreshed_call = CallStateModel.model_validate(refreshed_call_raw) + except ValidationError: + logger.debug("Parsing error", exc_info=True) + return + + # Refresh live object + for field in call.model_fields_set: + new_value = getattr(refreshed_call, field) + if getattr(call, field) == new_value: + continue + logger.debug( + "Updating local field %s with %s from remote", + field, + new_value, ) - await self._cache.adel( - cache_key_phone_number - ) # Invalidate for phone number because we don't know if it's the same call + setattr(call, field, new_value) + + # Update cache + cache_key_id = self._cache_key_call_id(refreshed_call.call_id) + await self._cache.set( + key=cache_key_id, + ttl_sec=await callback_timeout_hour(), + value=refreshed_call.model_dump_json(), + ) # Update for ID + cache_key_phone_number = self._cache_key_phone_number( + refreshed_call.initiate.phone_number + ) + await self._cache.delete( + cache_key_phone_number + ) # Invalidate for phone number because we don't know if it's the same call + + # TODO: Catch errors + async def call_create(self, call: CallStateModel) -> CallStateModel: + logger.debug("Creating new call %s", call.call_id) + + # Serialize + data = call.model_dump(mode="json", exclude_none=True) + data["id"] = str(call.call_id) - return res + # Persist + try: + async with self._use_client() as db: + raw = await db.create_item(body=data) + return CallStateModel.model_validate(raw) + except CosmosHttpResponseError: + logger.exception("Error accessing CosmosDB") + except ValidationError: + logger.debug("Parsing error", exc_info=True) + + return call - async def call_asearch_one(self, phone_number: str) -> CallStateModel | None: + async def call_search_one(self, phone_number: str) -> CallStateModel | None: logger.debug("Loading last call for %s", phone_number) # Try cache cache_key = self._cache_key_phone_number(phone_number) - cached = await self._cache.aget(cache_key) + cached = await self._cache.get(cache_key) if cached: try: return CallStateModel.model_validate_json(cached) @@ -192,7 +281,7 @@ async def call_asearch_one(self, phone_number: str) -> CallStateModel | None: # Update cache if call: - await self._cache.aset( + await self._cache.set( key=cache_key, ttl_sec=await callback_timeout_hour(), value=call.model_dump_json(), @@ -200,7 +289,7 @@ async def call_asearch_one(self, phone_number: str) -> CallStateModel | None: return call - async def call_asearch_all( + async def call_search_all( self, count: int, phone_number: str | None = None, diff --git a/app/persistence/icache.py b/app/persistence/icache.py index fd975ea1..d347864f 100644 --- a/app/persistence/icache.py +++ b/app/persistence/icache.py @@ -6,21 +6,21 @@ class ICache(ABC): @abstractmethod - @tracer.start_as_current_span("cache_areadiness") - async def areadiness(self) -> ReadinessEnum: + @tracer.start_as_current_span("cache_readiness") + async def readiness(self) -> ReadinessEnum: pass @abstractmethod - @tracer.start_as_current_span("cache_aget") - async def aget(self, key: str) -> bytes | None: + @tracer.start_as_current_span("cache_get") + async def get(self, key: str) -> bytes | None: pass @abstractmethod - @tracer.start_as_current_span("cache_aset") - async def aset(self, key: str, value: str | bytes | None, ttl_sec: int) -> bool: + @tracer.start_as_current_span("cache_set") + async def set(self, key: str, value: str | bytes | None, ttl_sec: int) -> bool: pass @abstractmethod - @tracer.start_as_current_span("cache_adel") - async def adel(self, key: str) -> bool: + @tracer.start_as_current_span("cache_delete") + async def delete(self, key: str) -> bool: pass diff --git a/app/persistence/isearch.py b/app/persistence/isearch.py index f354c2d6..41c18aab 100644 --- a/app/persistence/isearch.py +++ b/app/persistence/isearch.py @@ -13,13 +13,13 @@ def __init__(self, cache: ICache): self._cache = cache @abstractmethod - @tracer.start_as_current_span("search_areadiness") - async def areadiness(self) -> ReadinessEnum: + @tracer.start_as_current_span("search_readiness") + async def readiness(self) -> ReadinessEnum: pass @abstractmethod - @tracer.start_as_current_span("search_training_asearch_all") - async def training_asearch_all( + @tracer.start_as_current_span("search_training_search_all") + async def training_search_all( self, lang: str, text: str, diff --git a/app/persistence/isms.py b/app/persistence/isms.py index deeae092..d560a93d 100644 --- a/app/persistence/isms.py +++ b/app/persistence/isms.py @@ -7,11 +7,11 @@ class ISms(ABC): @abstractmethod - @tracer.start_as_current_span("sms_areadiness") - async def areadiness(self) -> ReadinessEnum: + @tracer.start_as_current_span("sms_readiness") + async def readiness(self) -> ReadinessEnum: pass @abstractmethod - @tracer.start_as_current_span("sms_asend") - async def asend(self, content: str, phone_number: PhoneNumber) -> bool: + @tracer.start_as_current_span("sms_send") + async def send(self, content: str, phone_number: PhoneNumber) -> bool: pass diff --git a/app/persistence/istore.py b/app/persistence/istore.py index 1a42a85b..5920cfd0 100644 --- a/app/persistence/istore.py +++ b/app/persistence/istore.py @@ -1,4 +1,5 @@ from abc import ABC, abstractmethod +from contextlib import AbstractAsyncContextManager from uuid import UUID from app.helpers.monitoring import tracer @@ -14,28 +15,33 @@ def __init__(self, cache: ICache): self._cache = cache @abstractmethod - @tracer.start_as_current_span("store_areadiness") - async def areadiness(self) -> ReadinessEnum: + @tracer.start_as_current_span("store_readiness") + async def readiness(self) -> ReadinessEnum: pass @abstractmethod - @tracer.start_as_current_span("store_call_aget") - async def call_aget(self, call_id: UUID) -> CallStateModel | None: + @tracer.start_as_current_span("store_call_get") + async def call_get(self, call_id: UUID) -> CallStateModel | None: pass @abstractmethod - @tracer.start_as_current_span("store_call_aset") - async def call_aset(self, call: CallStateModel) -> bool: + @tracer.start_as_current_span("store_call_transac") + def call_transac(self, call: CallStateModel) -> AbstractAsyncContextManager[None]: pass @abstractmethod - @tracer.start_as_current_span("store_call_asearch_one") - async def call_asearch_one(self, phone_number: str) -> CallStateModel | None: + @tracer.start_as_current_span("store_call_create") + async def call_create(self, call: CallStateModel) -> CallStateModel: pass @abstractmethod - @tracer.start_as_current_span("store_call_asearch_all") - async def call_asearch_all( + @tracer.start_as_current_span("store_call_search_one") + async def call_search_one(self, phone_number: str) -> CallStateModel | None: + pass + + @abstractmethod + @tracer.start_as_current_span("store_call_search_all") + async def call_search_all( self, count: int, phone_number: str | None = None, diff --git a/app/persistence/memory.py b/app/persistence/memory.py index 636347a7..95ccc43d 100644 --- a/app/persistence/memory.py +++ b/app/persistence/memory.py @@ -28,13 +28,13 @@ def __init__(self, config: MemoryModel): ) self._config = config - async def areadiness(self) -> ReadinessEnum: + async def readiness(self) -> ReadinessEnum: """ Check the readiness of the memory cache. """ return ReadinessEnum.OK # Always ready, it's memory :) - async def aget(self, key: str) -> bytes | None: + async def get(self, key: str) -> bytes | None: """ Get a value from the cache. @@ -53,7 +53,7 @@ async def aget(self, key: str) -> bytes | None: self._cache.move_to_end(sha_key, last=False) return res - async def aset(self, key: str, value: str | bytes | None, ttl_sec: int) -> bool: + async def set(self, key: str, value: str | bytes | None, ttl_sec: int) -> bool: """ Set a value in the cache. """ @@ -68,7 +68,7 @@ async def aset(self, key: str, value: str | bytes | None, ttl_sec: int) -> bool: self._ttl[sha_key] = datetime.now(UTC) + timedelta(seconds=ttl_sec) return True - async def adel(self, key: str) -> bool: + async def delete(self, key: str) -> bool: """ Delete a value from the cache. """ diff --git a/app/persistence/redis.py b/app/persistence/redis.py index a64a9224..5e2ad29b 100644 --- a/app/persistence/redis.py +++ b/app/persistence/redis.py @@ -48,7 +48,7 @@ def __init__(self, config: RedisModel): password=config.password.get_secret_value(), ) # Redis manage by itself a low level connection pool with asyncio, but be warning to not use a generator while consuming the connection, it will close it - async def areadiness(self) -> ReadinessEnum: + async def readiness(self) -> ReadinessEnum: """ Check the readiness of the Redis cache. @@ -76,7 +76,7 @@ async def areadiness(self) -> ReadinessEnum: logger.exception("Unknown error while checking Redis readiness") return ReadinessEnum.FAIL - async def aget(self, key: str) -> bytes | None: + async def get(self, key: str) -> bytes | None: """ Get a value from the cache. @@ -92,7 +92,7 @@ async def aget(self, key: str) -> bytes | None: logger.exception("Error getting value") return res - async def aset(self, key: str, value: str | bytes | None, ttl_sec: int) -> bool: + async def set(self, key: str, value: str | bytes | None, ttl_sec: int) -> bool: """ Set a value in the cache. @@ -112,7 +112,7 @@ async def aset(self, key: str, value: str | bytes | None, ttl_sec: int) -> bool: return False return True - async def adel(self, key: str) -> bool: + async def delete(self, key: str) -> bool: """ Delete a value from the cache. diff --git a/app/persistence/sqlite.py b/app/persistence/sqlite.py deleted file mode 100644 index c76943e6..00000000 --- a/app/persistence/sqlite.py +++ /dev/null @@ -1,274 +0,0 @@ -import asyncio -import os -from collections.abc import AsyncGenerator -from contextlib import asynccontextmanager -from uuid import UUID - -from aiosqlite import Connection, connect as sqlite_connect -from opentelemetry.instrumentation.sqlite3 import SQLite3Instrumentor -from pydantic import ValidationError - -from app.helpers.config_models.database import SqliteModel -from app.helpers.features import callback_timeout_hour -from app.helpers.logging import logger -from app.models.call import CallStateModel -from app.models.readiness import ReadinessEnum -from app.persistence.icache import ICache -from app.persistence.istore import IStore - -# Instrument sqlite -SQLite3Instrumentor().instrument() - - -class SqliteStore(IStore): - _config: SqliteModel - _db_path: str - _first_run_done: bool - - def __init__(self, cache: ICache, config: SqliteModel): - super().__init__(cache) - logger.info( - "Using SQLite database at %s with table %s", config.path, config.table - ) - self._config = config - - # Create folder if does not exist - self._db_path = self._config.full_path() - - # Check if first run - self._first_run_done = False - if not os.path.isfile(self._db_path): - db_folder = self._db_path[: self._db_path.rfind("/")] - os.makedirs(name=db_folder, exist_ok=True) - self._first_run_done = True - - async def areadiness(self) -> ReadinessEnum: - """ - Check the readiness of the SQLite database. - - This checks if the database is reachable and can be queried. - """ - try: - async with self._use_db() as db: - await db.execute("SELECT 1") - return ReadinessEnum.OK - except Exception: - logger.exception("Unknown error while checking SQLite readiness") - return ReadinessEnum.FAIL - - async def call_aget(self, call_id: UUID) -> CallStateModel | None: - logger.debug("Loading call %s", call_id) - - # Try cache - cache_key = self._cache_key_call_id(call_id) - call = await self._cache.aget(cache_key) - if call: - try: - return CallStateModel.model_validate_json(call) - except ValidationError: - logger.debug("Parsing error", exc_info=True) - - # Try live - call = None - async with self._use_db() as db: - cursor = await db.execute( - f"SELECT data FROM {self._config.table} WHERE id = ?", - (str(call_id),), - ) - row = await cursor.fetchone() - if row: - try: - call = CallStateModel.model_validate_json(row[0]) - except ValidationError: - logger.debug("Parsing error", exc_info=True) - - # Update cache - if call: - await self._cache.aset( - key=cache_key, - ttl_sec=60 * 60 * 24, # 1 day - value=call.model_dump_json(), - ) - - return call - - # TODO: Catch exceptions and return False if something goes wrong - async def call_aset(self, call: CallStateModel) -> bool: - logger.debug("Saving call %s", call.call_id) - - # Update live - data = call.model_dump_json(exclude_none=True) - async with self._use_db() as db: - await db.execute( - f"INSERT OR REPLACE INTO {self._config.table} VALUES (?, ?)", - ( - str(call.call_id), # id - data, # data - ), - ) - await db.commit() - - # Update cache - cache_key_id = self._cache_key_call_id(call.call_id) - await self._cache.aset( - key=cache_key_id, - ttl_sec=60 * 60 * 24, # 1 day - value=data, - ) # Update for ID - cache_key_phone_number = self._cache_key_phone_number( - call.initiate.phone_number - ) - await self._cache.adel( - cache_key_phone_number - ) # Invalidate for phone number because we don't know if it's the same call - - return True - - async def call_asearch_one(self, phone_number: str) -> CallStateModel | None: - logger.debug("Loading last call for %s", phone_number) - - # Try cache - cache_key = self._cache_key_phone_number(phone_number) - call = await self._cache.aget(cache_key) - if call: - try: - return CallStateModel.model_validate_json(call) - except ValidationError: - logger.debug("Parsing error", exc_info=True) - - # Try live - call = None - async with self._use_db() as db: - cursor = await db.execute( - f"SELECT data FROM {self._config.table} WHERE (JSON_EXTRACT(data, '$.initiate.phone_number') LIKE ? OR JSON_EXTRACT(data, '$.claim.policyholder_phone') LIKE ?) AND DATETIME(JSON_EXTRACT(data, '$.created_at')) >= DATETIME('now', '-{await callback_timeout_hour()} hours') ORDER BY DATETIME(JSON_EXTRACT(data, '$.created_at')) DESC LIMIT 1", - ( - phone_number, # data.initiate.phone_number - phone_number, # data.claim.policyholder_phone - ), - ) - row = await cursor.fetchone() - if row: - try: - call = CallStateModel.model_validate_json(row[0]) - except ValidationError: - logger.debug("Parsing error", exc_info=True) - - # Update cache - if call: - await self._cache.aset( - key=cache_key, - ttl_sec=60 * 60 * 24, # 1 day - value=call.model_dump_json(), - ) - - return call - - async def call_asearch_all( - self, - count: int, - phone_number: str | None = None, - ) -> tuple[list[CallStateModel] | None, int]: - logger.debug("Searching calls, for %s and count %s", phone_number, count) - # TODO: Cache results - calls, total = await asyncio.gather( - self._call_asearch_all_calls_worker(count, phone_number), - self._call_asearch_all_total_worker(phone_number), - ) - return calls, total - - async def _call_asearch_all_calls_worker( - self, - count: int, - phone_number: str | None = None, - ) -> list[CallStateModel] | None: - calls: list[CallStateModel] = [] - async with self._use_db() as db: - where_clause = ( - "WHERE (JSON_EXTRACT(data, '$.initiate.phone_number') LIKE ? OR JSON_EXTRACT(data, '$.claim.policyholder_phone') LIKE ?)" - if phone_number - else "" - ) - cursor = await db.execute( - f"SELECT data FROM {self._config.table} {where_clause} ORDER BY DATETIME(JSON_EXTRACT(data, '$.created_at')) DESC LIMIT ?", - ( - ( - phone_number, # data.initiate.phone_number - phone_number, # data.claim.policyholder_phone - count, # limit - ) - if phone_number - else (count,) # limit - ), - ) - rows = await cursor.fetchall() - for row in rows: - if not row: - continue - try: - calls.append(CallStateModel.model_validate_json(row[0])) - except ValidationError: - logger.debug("Parsing error", exc_info=True) - return calls - - async def _call_asearch_all_total_worker( - self, - phone_number: str | None = None, - ) -> int: - async with self._use_db() as db: - where_clause = ( - "WHERE (JSON_EXTRACT(data, '$.initiate.phone_number') LIKE ? OR JSON_EXTRACT(data, '$.claim.policyholder_phone') LIKE ?)" - if phone_number - else "" - ) - cursor = await db.execute( - f"SELECT COUNT(*) FROM {self._config.table} {where_clause}", - ( - ( - phone_number, # data.initiate.phone_number - phone_number, # data.claim.policyholder_phone - ) - if phone_number - else () - ), - ) - row = await cursor.fetchone() - return int(row[0]) if row else 0 - - async def _init_db(self, db: Connection): - """ - Initialize the database. - - See: https://sqlite.org/cgi/src/doc/wal2/doc/wal2.md - """ - logger.info("First run, init database") - # Optimize performance for concurrent writes - await db.execute("PRAGMA journal_mode=WAL") - # Create table - await db.execute( - f"CREATE TABLE IF NOT EXISTS {self._config.table} (id VARCHAR(36) PRIMARY KEY, data TEXT)" - ) - # Create indexes - await db.execute( - f"CREATE INDEX IF NOT EXISTS {self._config.table}_data_initiate_phone_number ON {self._config.table} (JSON_EXTRACT(data, '$.initiate.phone_number'))" - ) - await db.execute( - f"CREATE INDEX IF NOT EXISTS {self._config.table}_data_created_at ON {self._config.table} (DATETIME(JSON_EXTRACT(data, '$.created_at')))" - ) - await db.execute( - f"CREATE INDEX IF NOT EXISTS {self._config.table}_data_claim_policyholder_phone ON {self._config.table} (JSON_EXTRACT(data, '$.claim.policyholder_phone'))" - ) - - # Write changes to disk - await db.commit() - - @asynccontextmanager - async def _use_db(self) -> AsyncGenerator[Connection, None]: - """ - Generate the SQLite client and close it after use. - """ - async with sqlite_connect( - database=self._db_path, - ) as client: - if self._first_run_done: - await self._init_db(client) - yield client diff --git a/app/persistence/twilio.py b/app/persistence/twilio.py index 13d8817b..e52feb8d 100644 --- a/app/persistence/twilio.py +++ b/app/persistence/twilio.py @@ -17,7 +17,7 @@ def __init__(self, config: TwilioModel): logger.info("Using Twilio from number %s", config.phone_number) self._config = config - async def areadiness(self) -> ReadinessEnum: + async def readiness(self) -> ReadinessEnum: """ Check the readiness of the Twilio SMS service. @@ -36,7 +36,7 @@ async def areadiness(self) -> ReadinessEnum: logger.exception("Unknown error while checking Twilio readiness") return ReadinessEnum.FAIL - async def asend(self, content: str, phone_number: PhoneNumber) -> bool: + async def send(self, content: str, phone_number: PhoneNumber) -> bool: logger.info("Sending SMS to %s", phone_number) success = False logger.info("SMS content: %s", content) @@ -59,7 +59,7 @@ async def asend(self, content: str, phone_number: PhoneNumber) -> bool: logger.debug("SMS sent to %s", phone_number) success = True except TwilioRestException: - logger.error("Error sending SMS to %s", phone_number, exc_info=True) + logger.exception("Error sending SMS to %s", phone_number) return success async def _use_client(self) -> Client: diff --git a/cicd/bicep/app.bicep b/cicd/bicep/app.bicep index f7b666fc..0d119048 100644 --- a/cicd/bicep/app.bicep +++ b/cicd/bicep/app.bicep @@ -33,7 +33,6 @@ var phonenumberSanitized = replace(localConfig.communication_services.phone_numb var config = { public_domain: appUrl database: { - mode: 'cosmos_db' cosmos_db: { container: container.name database: database.name @@ -902,7 +901,7 @@ resource configValues 'Microsoft.AppConfiguration/configurationStores/keyValues@ answer_hard_timeout_sec: 180 answer_soft_timeout_sec: 30 callback_timeout_hour: 3 - phone_silence_timeout_sec: 10 + phone_silence_timeout_sec: 20 recognition_retry_max: 2 recording_enabled: false slow_llm_for_chat: false diff --git a/pyproject.toml b/pyproject.toml index 199f01cf..bd1d2a57 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,7 +9,6 @@ dependencies = [ "aiohttp-retry~=2.9", # Retry middleware for aiohttp, used with Twilio SDK "aiohttp[speedups]~=3.10", # Async HTTP client for Azure and Twilio SDKs, plus async DNS resolver and async Brotli compression "aiojobs~=1.3", # Async job scheduler - "aiosqlite~=0.20", # Async SQLite3 driver "azure-ai-translation-text~=1.0", # Azure Cognitive Services Text Translation "azure-appconfiguration~=1.7", # Outsourced configuration for live updates "azure-cognitiveservices-speech~=1.41", # Azure AI Speech @@ -32,7 +31,6 @@ dependencies = [ "opentelemetry-instrumentation-httpx~=0.0a0", # OpenTelemetry instrumentation for HTTPX "opentelemetry-instrumentation-openai~=0.0a0", # OpenTelemetry instrumentation for OpenAI "opentelemetry-instrumentation-redis~=0.0a0", # OpenTelemetry instrumentation for Redis - "opentelemetry-instrumentation-sqlite3~=0.0a0", # OpenTelemetry instrumentation for SQLite3 "opentelemetry-semantic-conventions~=0.0a0", # OpenTelemetry conventions, to standardize telemetry data "phonenumbers~=8.13", # Phone number parsing and formatting, used with Pydantic "pydantic-extra-types~=2.9", # Extra types for Pydantic diff --git a/tests/cache.py b/tests/cache.py index a1acc686..41e82eca 100644 --- a/tests/cache.py +++ b/tests/cache.py @@ -41,14 +41,14 @@ async def test_acid(random_text: str, cache_mode: CacheModeEnum) -> None: test_value = "lorem ipsum" # Check not exists - assume(not await cache.aget(test_key)) + assume(not await cache.get(test_key)) # Insert test call - await cache.aset( + await cache.set( key=test_key, ttl_sec=60, value=test_value, ) # Check point read - assume(await cache.aget(test_key) == test_value.encode()) + assume(await cache.get(test_key) == test_value.encode()) diff --git a/tests/llm.py b/tests/llm.py index 337bcf0c..544cf1f1 100644 --- a/tests/llm.py +++ b/tests/llm.py @@ -24,6 +24,7 @@ on_end_call, on_ivr_recognized, on_play_completed, + on_play_started, ) from app.helpers.call_llm import _out_answer from app.helpers.logging import logger @@ -305,6 +306,10 @@ async def _training_callback(_call: CallStateModel) -> None: scheduler=scheduler, training_callback=_training_callback, ) + # Play + await on_play_started( + call=call, + ) # Receip await on_play_completed( call=call, diff --git a/tests/store.py b/tests/store.py index c76f04db..9714d8d7 100644 --- a/tests/store.py +++ b/tests/store.py @@ -2,26 +2,12 @@ from pytest_assume.plugin import assume from app.helpers.config import CONFIG -from app.helpers.config_models.database import ModeEnum as DatabaseModeEnum from app.models.call import CallStateModel -@pytest.mark.parametrize( - "database_mode", - [ - pytest.param( - DatabaseModeEnum.SQLITE, - id="sqlite", - ), - pytest.param( - DatabaseModeEnum.COSMOS_DB, - id="cosmos_db", - ), - ], -) @pytest.mark.asyncio(scope="session") @pytest.mark.repeat(10) # Catch multi-threading and concurrency issues -async def test_acid(call: CallStateModel, database_mode: DatabaseModeEnum) -> None: +async def test_acid(call: CallStateModel) -> None: """ Test ACID properties of the database backend. @@ -33,18 +19,16 @@ async def test_acid(call: CallStateModel, database_mode: DatabaseModeEnum) -> No Test is repeated 10 times to catch multi-threading and concurrency issues. """ - # Set database mode - CONFIG.database.mode = database_mode db = CONFIG.database.instance() # Check not exists - assume(not await db.call_aget(call.call_id)) - assume(await db.call_asearch_one(call.initiate.phone_number) != call) + assume(not await db.call_get(call.call_id)) + assume(await db.call_search_one(call.initiate.phone_number) != call) assume( call not in ( ( - await db.call_asearch_all( + await db.call_search_all( phone_number=call.initiate.phone_number, count=1 ) )[0] @@ -53,18 +37,18 @@ async def test_acid(call: CallStateModel, database_mode: DatabaseModeEnum) -> No ) # Insert test call - await db.call_aset(call) + await db.call_create(call) # Check point read - assume(await db.call_aget(call.call_id) == call) + assume(await db.call_get(call.call_id) == call) # Check search one - assume(await db.call_asearch_one(call.initiate.phone_number) == call) + assume(await db.call_search_one(call.initiate.phone_number) == call) # Check search all assume( call in ( ( - await db.call_asearch_all( + await db.call_search_all( phone_number=call.initiate.phone_number, count=1 ) )[0] diff --git a/uv.lock b/uv.lock index e1e0c4cc..f26868e3 100644 --- a/uv.lock +++ b/uv.lock @@ -113,18 +113,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/76/ac/a7305707cb852b7e16ff80eaf5692309bde30e2b1100a1fcacdc8f731d97/aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17", size = 7617 }, ] -[[package]] -name = "aiosqlite" -version = "0.20.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "typing-extensions" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/0d/3a/22ff5415bf4d296c1e92b07fd746ad42c96781f13295a074d58e77747848/aiosqlite-0.20.0.tar.gz", hash = "sha256:6d35c8c256637f4672f843c31021464090805bf925385ac39473fb16eaaca3d7", size = 21691 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/00/c4/c93eb22025a2de6b83263dfe3d7df2e19138e345bca6f18dba7394120930/aiosqlite-0.20.0-py3-none-any.whl", hash = "sha256:36a1deaca0cac40ebe32aac9977a6e2bbc7f5189f23f4a54d5908986729e5bd6", size = 15564 }, -] - [[package]] name = "annotated-types" version = "0.7.0" @@ -466,7 +454,6 @@ dependencies = [ { name = "aiohttp", extra = ["speedups"] }, { name = "aiohttp-retry" }, { name = "aiojobs" }, - { name = "aiosqlite" }, { name = "azure-ai-translation-text" }, { name = "azure-appconfiguration" }, { name = "azure-cognitiveservices-speech" }, @@ -489,7 +476,6 @@ dependencies = [ { name = "opentelemetry-instrumentation-httpx" }, { name = "opentelemetry-instrumentation-openai" }, { name = "opentelemetry-instrumentation-redis" }, - { name = "opentelemetry-instrumentation-sqlite3" }, { name = "opentelemetry-semantic-conventions" }, { name = "phonenumbers" }, { name = "pydantic", extra = ["email"] }, @@ -529,7 +515,6 @@ requires-dist = [ { name = "aiohttp", extras = ["speedups"], specifier = "~=3.10" }, { name = "aiohttp-retry", specifier = "~=2.9" }, { name = "aiojobs", specifier = "~=1.3" }, - { name = "aiosqlite", specifier = "~=0.20" }, { name = "azure-ai-translation-text", specifier = "~=1.0" }, { name = "azure-appconfiguration", specifier = "~=1.7" }, { name = "azure-cognitiveservices-speech", specifier = "~=1.41" }, @@ -554,7 +539,6 @@ requires-dist = [ { name = "opentelemetry-instrumentation-httpx", specifier = "~=0.0a0" }, { name = "opentelemetry-instrumentation-openai", specifier = "~=0.0a0" }, { name = "opentelemetry-instrumentation-redis", specifier = "~=0.0a0" }, - { name = "opentelemetry-instrumentation-sqlite3", specifier = "~=0.0a0" }, { name = "opentelemetry-semantic-conventions", specifier = "~=0.0a0" }, { name = "phonenumbers", specifier = "~=8.13" }, { name = "pydantic", extras = ["email"], specifier = "~=2.9" }, @@ -1872,20 +1856,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/43/df/0df9226d1b14f29d23c07e6194b9fd5ad50e7d987b7fd13df7dcf718aeb1/opentelemetry_instrumentation_requests-0.48b0-py3-none-any.whl", hash = "sha256:d4f01852121d0bd4c22f14f429654a735611d4f7bf3cf93f244bdf1489b2233d", size = 12366 }, ] -[[package]] -name = "opentelemetry-instrumentation-sqlite3" -version = "0.48b0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "opentelemetry-api" }, - { name = "opentelemetry-instrumentation" }, - { name = "opentelemetry-instrumentation-dbapi" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/36/fa/ef80b55f8b2a5814fc4a868159f2b5b3c8316d20d449ba2f9f314faed9f1/opentelemetry_instrumentation_sqlite3-0.48b0.tar.gz", hash = "sha256:483b973a197890d69a25d17956d6fa66c540fc0f9f73190c93c98d2dabb3188b", size = 7530 } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9c/04/6d1b1789e1c1da35d839a0075a7cfb1ca14de4f06e75a390f8ce4402e101/opentelemetry_instrumentation_sqlite3-0.48b0-py3-none-any.whl", hash = "sha256:558ff8e7b78d0647cdffb1496c5e92f72d1f459e9ae9c6d3ae9eab3517d481e5", size = 8716 }, -] - [[package]] name = "opentelemetry-instrumentation-urllib" version = "0.48b0" @@ -2532,7 +2502,7 @@ wheels = [ [[package]] name = "ragas" -version = "0.2.6" +version = "0.2.7" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "appdirs" },