Comment on page
Model Inferences
Inferences are how you request model predictions of annotations on your source files
All model inference requests are asynchronous, meaning you must make the request and then poll for the status.
post
https://api.annolab.ai
/v1/infer/batch
Request Inference
get
https://api.annolab.ai
/v1/infer/batch/{job_id}
Request Inference Status
This code shows how to call a specific model on 2 sources, poll status until the model inference is complete, then retrieve the results.
Python
import requests
import time
ANNO_LAB_API_KEY = 'XXXXXXX-XXXXXXX-XXXXXXX-XXXXXXX'
inferenceBody = {
'groupName': 'Company Name',
'projectIdentifier': 'My Project',
'sourceIds': [4024, 5853],
'modelIdentifier': 'Staple + Classify Documents',
'outputLayerIdentifier': 'Gold Set'
}
headers = {
'Authorization': 'Api-Key '+ANNO_LAB_API_KEY,
}
url = 'https://api.annolab.ai/v1/infer/batch'
response = requests.post(url, headers=headers, json=inferenceBody)
print(response.json())
get_url = 'https://api.annolab.ai/v1/infer/batch/'+response.json()['inferenceJobId']
maximum_timeout_seconds = 1800
time_taken = 0
inference_is_finished = False
start_time = time.time()
while not inference_is_finished and time_taken < maximum_timeout_seconds:
status_response = requests.get(get_url, headers=headers, json=inferenceBody).json()
if status_response['status'] in ['Finished', 'Errored']:
print("Inference Finished")
print(status_response)
inference_is_finished = True
time_taken = time.time() - start_time