Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Some more fixes to ARS children getting #22

Merged
merged 3 commits into from
Jun 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

setup(
name="sri-test-harness",
version="0.2.2",
version="0.2.3",
author="Max Wang",
author_email="max@covar.com",
url="https://github.com/TranslatorSRI/TestHarness",
Expand Down
18 changes: 14 additions & 4 deletions test_harness/result_collector.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
"""The Collector of Results."""

import logging
from typing import Union
from translator_testing_model.datamodel.pydanticmodel import TestAsset, TestCase

Expand All @@ -9,8 +10,9 @@
class ResultCollector:
"""Collect results for easy dissemination."""

def __init__(self):
def __init__(self, logger: logging.Logger):
"""Initialize the Collector."""
self.logger = logger
self.agents = [
"ars",
"aragorn",
Expand All @@ -20,7 +22,7 @@ def __init__(self):
"unsecret-agent",
"cqs",
]
query_types = ["TopAnswer", "Acceptable", "BadButForgivable", "NeverShow"]
self.query_types = ["TopAnswer", "Acceptable", "BadButForgivable", "NeverShow"]
self.result_types = {
"PASSED": "PASSED",
"FAILED": "FAILED",
Expand All @@ -30,7 +32,7 @@ def __init__(self):
self.stats = {}
for agent in self.agents:
self.stats[agent] = {}
for query_type in query_types:
for query_type in self.query_types:
self.stats[agent][query_type] = {}
for result_type in self.result_types.values():
self.stats[agent][query_type][result_type] = 0
Expand All @@ -55,7 +57,15 @@ def collect_result(
result_type = self.result_types.get(
get_tag(report[agent]), "Test Error"
)
self.stats[agent][query_type][result_type] += 1
if (
query_type in self.stats[agent]
and result_type in self.stats[agent][query_type]
):
self.stats[agent][query_type][result_type] += 1
else:
self.logger.error(
f"Got {query_type} and {result_type} and can't put into stats!"
)

# add result to csv
agent_results = ",".join(
Expand Down
8 changes: 7 additions & 1 deletion test_harness/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ async def run_tests(
query_runner = QueryRunner(logger)
logger.info("Runner is getting service registry")
await query_runner.retrieve_registry(trapi_version="1.5.0")
collector = ResultCollector()
collector = ResultCollector(logger)
# loop over all tests
for test in tqdm(tests.values()):
status = "PASSED"
Expand All @@ -59,6 +59,12 @@ async def run_tests(
test_ids = []

for asset in test.test_assets:
# throw out any assets with unsupported expected outputs, i.e. OverlyGeneric
if asset.expected_output not in collector.query_types:
logger.warning(
f"Asset id {asset.id} has unsupported expected output."
)
continue
# create test in Test Dashboard
test_id = ""
try:
Expand Down
37 changes: 27 additions & 10 deletions test_harness/runner/query_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
from test_harness.utils import hash_test_asset, normalize_curies

MAX_QUERY_TIME = 600
MAX_ARA_TIME = 360

env_map = {
"dev": "development",
Expand Down Expand Up @@ -162,8 +163,9 @@ async def get_ars_responses(
current_time = time.time()

response = None
status = 500
# while we stay within the query max time
while current_time - start_time <= MAX_QUERY_TIME:
while current_time - start_time <= MAX_ARA_TIME:
# get query status of child query
async with httpx.AsyncClient(timeout=30) as client:
res = await client.get(f"{base_url}/ars/api/messages/{child_pk}")
Expand All @@ -172,23 +174,38 @@ async def get_ars_responses(
status = response.get("fields", {}).get("status")
if status == "Done":
break
if status == "Error":
elif status == "Error" or status == "Unknown":
# query errored, need to capture
break
self.logger.info(f"{infores} is not Done, waiting...")
current_time = time.time()
await asyncio.sleep(5)
elif status == "Running":
self.logger.info(f"{infores} is still Running...")
current_time = time.time()
await asyncio.sleep(10)
else:
self.logger.info(f"Got unhandled status: {status}")
break
else:
self.logger.warning(
f"Timed out getting ARS child messages after {MAX_QUERY_TIME / 60} minutes."
f"Timed out getting ARS child messages after {MAX_ARA_TIME / 60} minutes."
)

# add response to output
if response is not None:
self.logger.info(f"Got reponse for {infores}!")
status_code = response.get("fields", {}).get("code", 410)
self.logger.info(
f"Got reponse for {infores} with status code {status_code}."
)
responses[infores] = {
"response": response.get("fields", {}).get(
"data", {"message": {"results": []}}
),
"status_code": status_code,
}
else:
self.logger.warning(f"Got error from {infores}")
responses[infores] = {
"response": response.get("fields", {}).get("data", {}),
"status_code": response.get("fields", {}).get("code", 410),
"response": {"message": {"results": []}},
"status_code": status,
}

# After getting all individual ARA responses, get and save the merged version
Expand Down Expand Up @@ -223,7 +240,7 @@ async def get_ars_responses(
merged_message = res.json()
responses["ars"] = {
"response": merged_message.get("fields", {}).get(
"data", {}
"data", {"message": {"results": []}}
),
"status_code": merged_message.get("fields", {}).get(
"code", 410
Expand Down
Loading