Skip to content

Commit

Permalink
Fix test models
Browse files Browse the repository at this point in the history
  • Loading branch information
javiermtorres committed Dec 11, 2024
1 parent 1d25f6b commit 7e02ab1
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 16 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -46,16 +46,10 @@ def test_upload_data_launch_job(
eval_payload = {
"name": "test_run_hugging_face",
"description": "Test run for Huggingface model",
# "model": "hf-internal-testing/tiny-random-BartForCausalLM",
# "model": "mlabonne/dummy-CodeLlama-7b-hf",
"model": "hf://hf-internal-testing/tiny-random-LlamaForCausalLM",
"dataset": str(created_dataset.id),
"config_template": simple_eval_template,
"max_samples": 10,
# Investigate!
# "model_url": "string",
# "system_prompt": "string",
# "config_template": "string",
}

create_evaluation_job_response = local_client.post(
Expand All @@ -75,24 +69,22 @@ def test_upload_data_launch_job(
print(f"--> try {i}: {get_job_response_model}")
if get_job_response_model.status == JobStatus.SUCCEEDED.value:
succeeded = True
print("Job succeeded!!!")
break
if get_job_response_model.status == JobStatus.FAILED.value:
succeeded = False
print("Job failed...")
break
time.sleep(1)
time.sleep(30)
print("Check if job succeeded")
assert succeeded

infer_payload = {
"name": "test_run_hugging_face",
"description": "Test run for Huggingface model",
"model": "hf://hf-internal-testing/tiny-random-LlamaForCausalLM",
"model": "hf://hf-internal-testing/tiny-random-t5",
"dataset": str(created_dataset.id),
# "config_template": simple_infer_template,
"max_samples": 10,
# Investigate!
# "model_url": "string",
# "system_prompt": "string",
# "config_template": "string",
}
create_inference_job_response = local_client.post(
"/jobs/inference/", headers=headers, json=infer_payload
Expand All @@ -115,7 +107,7 @@ def test_upload_data_launch_job(
if get_job_response_model.status == JobStatus.FAILED.value:
succeeded = False
break
time.sleep(20)
time.sleep(30)
assert succeeded


Expand Down Expand Up @@ -169,7 +161,7 @@ def test_full_experiment_launch(
if get_job_response_model.status == JobStatus.FAILED.value:
succeeded = False
break
time.sleep(1)
time.sleep(30)
assert succeeded


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def test_job_lifecycle_remote_ok(lumi_client, dialog_data, simple_eval_template)
assert job_creation_result is not None
assert lumi_client.jobs.get_jobs() is not None

job_status = lumi_client.jobs.wait_for_job(job_creation_result.id)
job_status = lumi_client.jobs.wait_for_job(id=job_creation_result.id, retries=30, poll_wait=30)
logger.info(job_status)

download_info = lumi_client.jobs.get_job_download(job_creation_result.id)
Expand Down

0 comments on commit 7e02ab1

Please sign in to comment.