From 7e02ab164c25feae81f5c3a21ee2aff7902b9abb Mon Sep 17 00:00:00 2001 From: Javier Torres Date: Wed, 11 Dec 2024 23:30:47 +0100 Subject: [PATCH] Fix test models --- .../api/routes/test_api_workflows.py | 22 ++++++------------- .../sdk/tests/integration/test_scenarios.py | 2 +- 2 files changed, 8 insertions(+), 16 deletions(-) diff --git a/lumigator/python/mzai/backend/backend/tests/integration/api/routes/test_api_workflows.py b/lumigator/python/mzai/backend/backend/tests/integration/api/routes/test_api_workflows.py index bff14d77..c044c811 100644 --- a/lumigator/python/mzai/backend/backend/tests/integration/api/routes/test_api_workflows.py +++ b/lumigator/python/mzai/backend/backend/tests/integration/api/routes/test_api_workflows.py @@ -46,16 +46,10 @@ def test_upload_data_launch_job( eval_payload = { "name": "test_run_hugging_face", "description": "Test run for Huggingface model", - # "model": "hf-internal-testing/tiny-random-BartForCausalLM", - # "model": "mlabonne/dummy-CodeLlama-7b-hf", "model": "hf://hf-internal-testing/tiny-random-LlamaForCausalLM", "dataset": str(created_dataset.id), "config_template": simple_eval_template, "max_samples": 10, - # Investigate! - # "model_url": "string", - # "system_prompt": "string", - # "config_template": "string", } create_evaluation_job_response = local_client.post( @@ -75,24 +69,22 @@ def test_upload_data_launch_job( print(f"--> try {i}: {get_job_response_model}") if get_job_response_model.status == JobStatus.SUCCEEDED.value: succeeded = True + print("Job succeeded!!!") break if get_job_response_model.status == JobStatus.FAILED.value: succeeded = False + print("Job failed...") break - time.sleep(1) + time.sleep(30) + print("Check if job succeeded") assert succeeded infer_payload = { "name": "test_run_hugging_face", "description": "Test run for Huggingface model", - "model": "hf://hf-internal-testing/tiny-random-LlamaForCausalLM", + "model": "hf://hf-internal-testing/tiny-random-t5", "dataset": str(created_dataset.id), - # "config_template": simple_infer_template, "max_samples": 10, - # Investigate! - # "model_url": "string", - # "system_prompt": "string", - # "config_template": "string", } create_inference_job_response = local_client.post( "/jobs/inference/", headers=headers, json=infer_payload @@ -115,7 +107,7 @@ def test_upload_data_launch_job( if get_job_response_model.status == JobStatus.FAILED.value: succeeded = False break - time.sleep(20) + time.sleep(30) assert succeeded @@ -169,7 +161,7 @@ def test_full_experiment_launch( if get_job_response_model.status == JobStatus.FAILED.value: succeeded = False break - time.sleep(1) + time.sleep(30) assert succeeded diff --git a/lumigator/python/mzai/sdk/tests/integration/test_scenarios.py b/lumigator/python/mzai/sdk/tests/integration/test_scenarios.py index 7bbf3ef5..09a8cefb 100644 --- a/lumigator/python/mzai/sdk/tests/integration/test_scenarios.py +++ b/lumigator/python/mzai/sdk/tests/integration/test_scenarios.py @@ -82,7 +82,7 @@ def test_job_lifecycle_remote_ok(lumi_client, dialog_data, simple_eval_template) assert job_creation_result is not None assert lumi_client.jobs.get_jobs() is not None - job_status = lumi_client.jobs.wait_for_job(job_creation_result.id) + job_status = lumi_client.jobs.wait_for_job(id=job_creation_result.id, retries=30, poll_wait=30) logger.info(job_status) download_info = lumi_client.jobs.get_job_download(job_creation_result.id)