NAV
shell python javascript

Kiri Model API v1.0.0

Scroll down for code samples, example requests and responses. Select a language for code samples from the tabs above or the mobile navigation menu.

Welcome to our API documentation! Things to note:

If anything is unclear or you find that something is not working as intended, please get in touch!

Base URLs:

Email: Support License: Apache 2.0

Authentication

Tasks

Endpoints to call tasks that implement various models for your use cases.

vectorisation

Code samples

curl --request POST \
  --url https://api.kiri.ai/vectorisation \
  --header 'Accept: application/json' \
  --header 'Content-Type: application/json' \
  --header 'x-api-key: API_KEY' \
  --data '{"text":"iPhone 12 128GB","model":"english"}'
import http.client

conn = http.client.HTTPSConnection("api.kiri.ai")

payload = "{\"text\":\"iPhone 12 128GB\",\"model\":\"english\"}"

headers = {
    'Content-Type': "application/json",
    'Accept': "application/json",
    'x-api-key': "API_KEY"
    }

conn.request("POST", "/vectorisation", payload, headers)

res = conn.getresponse()
data = res.read()

print(data.decode("utf-8"))
const data = JSON.stringify({
  "text": "iPhone 12 128GB",
  "model": "english"
});

const xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("POST", "https://api.kiri.ai/vectorisation");
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader("Accept", "application/json");
xhr.setRequestHeader("x-api-key", "API_KEY");

xhr.send(data);

POST /vectorisation

vectorises string or list of strings

Body parameter

{
  "text": "iPhone 12 128GB",
  "model": "english"
}

Parameters

Name In Type Required Description
body body VectorisationBody true text or list of text to vectorise

Example responses

200 Response

{
  "vector": [
    0.92949192,
    0.2312301
  ]
}

Responses

Status Meaning Description Schema
200 OK successfully vectorised VectorisationResponse

question answering

Code samples

curl --request POST \
  --url https://api.kiri.ai/qa \
  --header 'Accept: application/json' \
  --header 'Content-Type: application/json' \
  --header 'x-api-key: API_KEY' \
  --data '{"question":"What is the meaning of life?","context":"The meaning of life is 42.","prev_q":["What is not the meaning of life?"],"prev_a":["unknown"],"model":"english"}'
import http.client

conn = http.client.HTTPSConnection("api.kiri.ai")

payload = "{\"question\":\"What is the meaning of life?\",\"context\":\"The meaning of life is 42.\",\"prev_q\":[\"What is not the meaning of life?\"],\"prev_a\":[\"unknown\"],\"model\":\"english\"}"

headers = {
    'Content-Type': "application/json",
    'Accept': "application/json",
    'x-api-key': "API_KEY"
    }

conn.request("POST", "/qa", payload, headers)

res = conn.getresponse()
data = res.read()

print(data.decode("utf-8"))
const data = JSON.stringify({
  "question": "What is the meaning of life?",
  "context": "The meaning of life is 42.",
  "prev_q": [
    "What is not the meaning of life?"
  ],
  "prev_a": [
    "unknown"
  ],
  "model": "english"
});

const xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("POST", "https://api.kiri.ai/qa");
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader("Accept", "application/json");
xhr.setRequestHeader("x-api-key", "API_KEY");

xhr.send(data);

POST /qa

Performs QA on question, context, previous questions and previous answers. Answers "unknown" if the question cannot be answered.

Body parameter

{
  "question": "What is the meaning of life?",
  "context": "The meaning of life is 42.",
  "prev_q": [
    "What is not the meaning of life?"
  ],
  "prev_a": [
    "unknown"
  ],
  "model": "english"
}

Parameters

Name In Type Required Description
body body QABody true none

Example responses

200 Response

{
  "answer": "42"
}

Responses

Status Meaning Description Schema
200 OK successful QA QAResponse

zero shot classification

Code samples

curl --request POST \
  --url https://api.kiri.ai/classification \
  --header 'Accept: application/json' \
  --header 'Content-Type: application/json' \
  --header 'x-api-key: API_KEY' \
  --data '{"text":"I am really mad because my product broke.","labels":["product issue","furniture","space"],"model":"english"}'
import http.client

conn = http.client.HTTPSConnection("api.kiri.ai")

payload = "{\"text\":\"I am really mad because my product broke.\",\"labels\":[\"product issue\",\"furniture\",\"space\"],\"model\":\"english\"}"

headers = {
    'Content-Type': "application/json",
    'Accept': "application/json",
    'x-api-key': "API_KEY"
    }

conn.request("POST", "/classification", payload, headers)

res = conn.getresponse()
data = res.read()

print(data.decode("utf-8"))
const data = JSON.stringify({
  "text": "I am really mad because my product broke.",
  "labels": [
    "product issue",
    "furniture",
    "space"
  ],
  "model": "english"
});

const xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("POST", "https://api.kiri.ai/classification");
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader("Accept", "application/json");
xhr.setRequestHeader("x-api-key", "API_KEY");

xhr.send(data);

POST /classification

Performs zero shot classification on provided text and labels.

Body parameter

{
  "text": "I am really mad because my product broke.",
  "labels": [
    "product issue",
    "furniture",
    "space"
  ],
  "model": "english"
}

Parameters

Name In Type Required Description
body body ClassificationBody true none

Example responses

200 Response

{
  "probabilities": {
    "product issue": 0.98,
    "furniture": 0.1,
    "space": 0.05
  }
}

Responses

Status Meaning Description Schema
200 OK successful classification ClassificationResponse

zero shot image classification

Code samples

curl --request POST \
  --url https://api.kiri.ai/image-classification \
  --header 'Accept: application/json' \
  --header 'Content-Type: application/json' \
  --header 'x-api-key: API_KEY' \
  --data '{"image":"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABpUAAATOCAYAAAA","labels":["healthy brain","brain with tumor"],"model":"english"}'
import http.client

conn = http.client.HTTPSConnection("api.kiri.ai")

payload = "{\"image\":\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABpUAAATOCAYAAAA\",\"labels\":[\"healthy brain\",\"brain with tumor\"],\"model\":\"english\"}"

headers = {
    'Content-Type': "application/json",
    'Accept': "application/json",
    'x-api-key': "API_KEY"
    }

conn.request("POST", "/image-classification", payload, headers)

res = conn.getresponse()
data = res.read()

print(data.decode("utf-8"))
const data = JSON.stringify({
  "image": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABpUAAATOCAYAAAA",
  "labels": [
    "healthy brain",
    "brain with tumor"
  ],
  "model": "english"
});

const xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("POST", "https://api.kiri.ai/image-classification");
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader("Accept", "application/json");
xhr.setRequestHeader("x-api-key", "API_KEY");

xhr.send(data);

POST /image-classification

Performs zero shot image classification on provided base64 encoded image and labels. The probabilities predicted for the labels will always sum to 100%.

Body parameter

{
  "image": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABpUAAATOCAYAAAA",
  "labels": [
    "healthy brain",
    "brain with tumor"
  ],
  "model": "english"
}

Parameters

Name In Type Required Description
body body ImageClassificationBody true none

Example responses

200 Response

{
  "probabilities": {
    "brain with tumor": 0.98,
    "healthy brain": 0.02
  }
}

Responses

Status Meaning Description Schema
200 OK successful classification ImageClassificationResponse

text generation

Code samples

curl --request POST \
  --url https://api.kiri.ai/generation \
  --header 'Accept: application/json' \
  --header 'Content-Type: application/json' \
  --header 'x-api-key: API_KEY' \
  --data '{"text":"Geralt knew the signs, the monster was a","min_length":10,"max_length":20,"temperature":1,"top_k":0,"top_p":1,"repetition_penalty":1,"length_penalty":1,"num_beams":1,"num_generations":1,"model":"gpt2-large"}'
import http.client

conn = http.client.HTTPSConnection("api.kiri.ai")

payload = "{\"text\":\"Geralt knew the signs, the monster was a\",\"min_length\":10,\"max_length\":20,\"temperature\":1,\"top_k\":0,\"top_p\":1,\"repetition_penalty\":1,\"length_penalty\":1,\"num_beams\":1,\"num_generations\":1,\"model\":\"gpt2-large\"}"

headers = {
    'Content-Type': "application/json",
    'Accept': "application/json",
    'x-api-key': "API_KEY"
    }

conn.request("POST", "/generation", payload, headers)

res = conn.getresponse()
data = res.read()

print(data.decode("utf-8"))
const data = JSON.stringify({
  "text": "Geralt knew the signs, the monster was a",
  "min_length": 10,
  "max_length": 20,
  "temperature": 1,
  "top_k": 0,
  "top_p": 1,
  "repetition_penalty": 1,
  "length_penalty": 1,
  "num_beams": 1,
  "num_generations": 1,
  "model": "gpt2-large"
});

const xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("POST", "https://api.kiri.ai/generation");
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader("Accept", "application/json");
xhr.setRequestHeader("x-api-key", "API_KEY");

xhr.send(data);

POST /generation

Performs generation on the provided text with the specified parameters

Body parameter

{
  "text": "Geralt knew the signs, the monster was a",
  "min_length": 10,
  "max_length": 20,
  "temperature": 1,
  "top_k": 0,
  "top_p": 1,
  "repetition_penalty": 1,
  "length_penalty": 1,
  "num_beams": 1,
  "num_generations": 1,
  "model": "gpt2-large"
}

Parameters

Name In Type Required Description
body body GenerationBody true none

Example responses

200 Response

{
  "output": "Geralt knew the signs, the monster was a vampire that day; after the siege her companions"
}

Responses

Status Meaning Description Schema
200 OK successful generation GenerationResponse

text summarisation

Code samples

curl --request POST \
  --url https://api.kiri.ai/summarisation \
  --header 'Accept: application/json' \
  --header 'Content-Type: application/json' \
  --header 'x-api-key: API_KEY' \
  --data '{"text":"Some long text to summarise","model":"english"}'
import http.client

conn = http.client.HTTPSConnection("api.kiri.ai")

payload = "{\"text\":\"Some long text to summarise\",\"model\":\"english\"}"

headers = {
    'Content-Type': "application/json",
    'Accept': "application/json",
    'x-api-key': "API_KEY"
    }

conn.request("POST", "/summarisation", payload, headers)

res = conn.getresponse()
data = res.read()

print(data.decode("utf-8"))
const data = JSON.stringify({
  "text": "Some long text to summarise",
  "model": "english"
});

const xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("POST", "https://api.kiri.ai/summarisation");
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader("Accept", "application/json");
xhr.setRequestHeader("x-api-key", "API_KEY");

xhr.send(data);

POST /summarisation

Performs summarisation on the provided text

Body parameter

{
  "text": "Some long text to summarise",
  "model": "english"
}

Parameters

Name In Type Required Description
body body SummarisationBody true none

Example responses

200 Response

{
  "output": "Summary of long text"
}

Responses

Status Meaning Description Schema
200 OK successful summarisation SummarisationResponse

emotion detection

Code samples

curl --request POST \
  --url https://api.kiri.ai/emotion \
  --header 'Accept: application/json' \
  --header 'Content-Type: application/json' \
  --header 'x-api-key: API_KEY' \
  --data '{"text":"I hope this works","model":"english"}'
import http.client

conn = http.client.HTTPSConnection("api.kiri.ai")

payload = "{\"text\":\"I hope this works\",\"model\":\"english\"}"

headers = {
    'Content-Type': "application/json",
    'Accept': "application/json",
    'x-api-key': "API_KEY"
    }

conn.request("POST", "/emotion", payload, headers)

res = conn.getresponse()
data = res.read()

print(data.decode("utf-8"))
const data = JSON.stringify({
  "text": "I hope this works",
  "model": "english"
});

const xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function () {
  if (this.readyState === this.DONE) {
    console.log(this.responseText);
  }
});

xhr.open("POST", "https://api.kiri.ai/emotion");
xhr.setRequestHeader("Content-Type", "application/json");
xhr.setRequestHeader("Accept", "application/json");
xhr.setRequestHeader("x-api-key", "API_KEY");

xhr.send(data);

POST /emotion

Performs emotion detection on the provided text. The response is a string of comma separated emotions. The emotions are from: neutral, admiration, approval, annoyance, gratitude, disapproval, amusement, curiosity, love, optimism, disappointment, joy, realization, anger, sadness, confusion, caring, excitement, surprise, disgust, desire, fear, remorse, embarrassment, nervousness, pride, relief, grief.

Body parameter

{
  "text": "I hope this works",
  "model": "english"
}

Parameters

Name In Type Required Description
body body EmotionBody true none

Example responses

200 Response

{
  "output": "optimism"
}

Responses

Status Meaning Description Schema
200 OK successful emotion detection EmotionResponse

Schemas

VectorisationBody

{
  "text": "iPhone 12 128GB",
  "model": "english"
}

Vectorisation body

Properties

Name Type Required Restrictions Description
Vectorisation body any false none Vectorisation body variants for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous VectorisationSingle false none Single item vectorisation

xor

Name Type Required Restrictions Description
anonymous VectorisationBatch false none Batch vectorisation

VectorisationSingle

{
  "text": "iPhone 12 128GB",
  "model": "english"
}

Single item vectorisation

Properties

Name Type Required Restrictions Description
text string true none none
model string false none Model to use:
* english - English optimised vectorisation
* multilingual - Multilingual vectorisation in 50+ languages: ar, bg, ca, cs, da, de, el, es, et, fa, fi, fr, fr-ca, gl, gu, he, hi, hr, hu, hy, id, it, ja, ka, ko, ku, lt, lv, mk, mn, mr, ms, my, nb, nl, pl, pt, pt, pt-br, ro, ru, sk, sl, sq, sr, sv, th, tr, uk, ur, vi, zh-cn, zh-tw.

Enumerated Values

Property Value
model english
model multilingual

VectorisationBatch

{
  "text": [
    "iPhone 12 128GB",
    "RTX 3090"
  ],
  "model": "english"
}

Batch vectorisation

Properties

Name Type Required Restrictions Description
text [string] true none none
model string false none Model to use:
* english - English optimised vectorisation
* multilingual - Multilingual vectorisation in 50+ languages: ar, bg, ca, cs, da, de, el, es, et, fa, fi, fr, fr-ca, gl, gu, he, hi, hr, hu, hy, id, it, ja, ka, ko, ku, lt, lv, mk, mn, mr, ms, my, nb, nl, pl, pt, pt, pt-br, ro, ru, sk, sl, sq, sr, sv, th, tr, uk, ur, vi, zh-cn, zh-tw.

Enumerated Values

Property Value
model english
model multilingual

VectorisationResponse

{
  "vector": [
    0.92949192,
    0.2312301
  ]
}

Vectorisation response

Properties

Name Type Required Restrictions Description
Vectorisation response any false none Vectorisation responses for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous VectorisationSingleResponse false none none

xor

Name Type Required Restrictions Description
anonymous VectorisationBatchResponse false none none

VectorisationSingleResponse

{
  "vector": [
    0.92949192,
    0.2312301
  ]
}

Single item vectorisation response

Properties

Name Type Required Restrictions Description
vector [number] false none none

VectorisationBatchResponse

{
  "vectorList": [
    [
      0.92949192,
      0.2312301
    ],
    [
      0.82939192,
      0.5312701
    ]
  ]
}

Batch vectorisation response

Properties

Name Type Required Restrictions Description
vectorList [array] false none none

QABody

{
  "question": "What is the meaning of life?",
  "context": "The meaning of life is 42.",
  "prev_q": [
    "What is not the meaning of life?"
  ],
  "prev_a": [
    "unknown"
  ],
  "model": "english"
}

QA body

Properties

Name Type Required Restrictions Description
QA body any false none QA body variants for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous QASingle false none Single item QA

xor

Name Type Required Restrictions Description
anonymous QABatch false none Batch QA

QASingle

{
  "question": "What is the meaning of life?",
  "context": "The meaning of life is 42.",
  "prev_q": [
    "What is not the meaning of life?"
  ],
  "prev_a": [
    "unknown"
  ],
  "model": "english"
}

Single item QA

Properties

Name Type Required Restrictions Description
question string true none question to answer
context string true none context to answer based on
prev_q [string] false none none
prev_a [string] false none none
model string false none Model to use:
* english - English only QA

Enumerated Values

Property Value
model english

QABatch

{
  "question": [
    "What is the meaning of life?",
    "Where does Sally live?"
  ],
  "context": [
    "The meaning of life is 42.",
    "Sally lives in London"
  ],
  "prev_q": [
    [
      "What is not the meaning of life?"
    ],
    [
      "Where did Sally go to school?"
    ]
  ],
  "prev_a": [
    [
      "unknown"
    ],
    [
      "unknown"
    ]
  ],
  "model": "english"
}

Batch QA

Properties

Name Type Required Restrictions Description
question [string] true none list of questions to answer
context [string] true none context to answer based on
prev_q [array] false none none
prev_a [array] false none none
model string false none Model to use:
* english - English only QA

Enumerated Values

Property Value
model english

QAResponse

{
  "answer": "42"
}

QA response

Properties

Name Type Required Restrictions Description
QA response any false none QA responses for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous QASingleResponse false none none

xor

Name Type Required Restrictions Description
anonymous QABatchResponse false none none

QASingleResponse

{
  "answer": "42"
}

Single item QA response

Properties

Name Type Required Restrictions Description
answer string false none none

QABatchResponse

{
  "answer": [
    "42",
    "London"
  ]
}

Batch QA response

Properties

Name Type Required Restrictions Description
answer [string] false none none

ClassificationBody

{
  "text": "I am really mad because my product broke.",
  "labels": [
    "product issue",
    "furniture",
    "space"
  ],
  "model": "english"
}

Classification body

Properties

Name Type Required Restrictions Description
Classification body any false none Classification body variants for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous ClassificationSingle false none Single item Classification

xor

Name Type Required Restrictions Description
anonymous ClassificationBatch false none Batch Classification

ClassificationSingle

{
  "text": "I am really mad because my product broke.",
  "labels": [
    "product issue",
    "furniture",
    "space"
  ],
  "model": "english"
}

Single item Classification

Properties

Name Type Required Restrictions Description
text string true none text to classify
labels [string] true none labels to predict probabilities for
model string false none Model to use:
* english - English only classification
* multilingual - Multilingual classification in 100+ languages: Afrikaans, Albanian, Amharic, Arabic, Armenian, Assamese, Azerbaijani, Basque, Belarusian, Bengali, Bengali Romanized, Bosnian, Breton, Bulgarian, Burmese, Burmese, Catalan, Chinese (Simplified), Chinese (Traditional), Croatian, Czech, Danish, Dutch, English, Esperanto, Estonian, Filipino, Finnish, French, Galician, Georgian, German, Greek, Gujarati, Hausa, Hebrew, Hindi, Hindi Romanized, Hungarian, Icelandic, Indonesian, Irish, Italian, Japanese, Javanese, Kannada, Kazakh, Khmer, Korean, Kurdish (Kurmanji), Kyrgyz, Lao, Latin, Latvian, Lithuanian, Macedonian, Malagasy, Malay, Malayalam, Marathi, Mongolian, Nepali, Norwegian, Oriya, Oromo, Pashto, Persian, Polish, Portuguese, Punjabi, Romanian, Russian, Sanskri, Scottish, Gaelic, Serbian, Sindhi, Sinhala, Slovak, Slovenian, Somali, Spanish, Sundanese, Swahili, Swedish, Tamil, Tamil Romanized, Telugu, Telugu Romanized, Thai, Turkish, Ukrainian, Urdu, Urdu Romanized, Uyghur, Uzbek, Vietnamese, Welsh, Western, Frisian, Xhosa, Yiddish.

Enumerated Values

Property Value
model english
model multilingual

ClassificationBatch

{
  "text": [
    "I am really mad because my product broke.",
    "I would like to collaborate with you on social media"
  ],
  "labels": [
    [
      "product issue",
      "furniture",
      "space"
    ],
    [
      "product issue",
      "furniture",
      "sales"
    ]
  ],
  "model": "english"
}

Batch Classification

Properties

Name Type Required Restrictions Description
text [string] true none list text to classify
labels [array] true none list of list of labels to predict probabilities for
model string false none Model to use:
* english - English only classification
* multilingual - Multilingual classification in 100+ languages: Afrikaans, Albanian, Amharic, Arabic, Armenian, Assamese, Azerbaijani, Basque, Belarusian, Bengali, Bengali Romanized, Bosnian, Breton, Bulgarian, Burmese, Burmese, Catalan, Chinese (Simplified), Chinese (Traditional), Croatian, Czech, Danish, Dutch, English, Esperanto, Estonian, Filipino, Finnish, French, Galician, Georgian, German, Greek, Gujarati, Hausa, Hebrew, Hindi, Hindi Romanized, Hungarian, Icelandic, Indonesian, Irish, Italian, Japanese, Javanese, Kannada, Kazakh, Khmer, Korean, Kurdish (Kurmanji), Kyrgyz, Lao, Latin, Latvian, Lithuanian, Macedonian, Malagasy, Malay, Malayalam, Marathi, Mongolian, Nepali, Norwegian, Oriya, Oromo, Pashto, Persian, Polish, Portuguese, Punjabi, Romanian, Russian, Sanskri, Scottish, Gaelic, Serbian, Sindhi, Sinhala, Slovak, Slovenian, Somali, Spanish, Sundanese, Swahili, Swedish, Tamil, Tamil Romanized, Telugu, Telugu Romanized, Thai, Turkish, Ukrainian, Urdu, Urdu Romanized, Uyghur, Uzbek, Vietnamese, Welsh, Western, Frisian, Xhosa, Yiddish.

Enumerated Values

Property Value
model english
model multilingual

ClassificationResponse

{
  "probabilities": {
    "product issue": 0.98,
    "furniture": 0.1,
    "space": 0.05
  }
}

Classification response

Properties

Name Type Required Restrictions Description
Classification response any false none Classification responses for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous ClassificationSingleResponse false none none

xor

Name Type Required Restrictions Description
anonymous ClassificationBatchResponse false none none

ClassificationSingleResponse

{
  "probabilities": {
    "product issue": 0.98,
    "furniture": 0.1,
    "space": 0.05
  }
}

Single item classification response

Properties

Name Type Required Restrictions Description
probabilities object false none dictionary where the keys are your labels and values are probabilities

ClassificationBatchResponse

{
  "probabilities": [
    {
      "product issue": 0.98,
      "furniture": 0.1,
      "space": 0.05
    },
    {
      "product issue": 0.2,
      "furniture": 0.03,
      "sales": 0.87
    }
  ]
}

Batch classification response

Properties

Name Type Required Restrictions Description
probabilities [object] false none list of dictionaries where the keys are your labels and values are probabilities

ImageClassificationBody

{
  "image": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABpUAAATOCAYAAAA",
  "labels": [
    "healthy brain",
    "brain with tumor"
  ],
  "model": "english"
}

Image classification body

Properties

Name Type Required Restrictions Description
Image classification body ImageClassificationSingle false none Image classification body

ImageClassificationSingle

{
  "image": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABpUAAATOCAYAAAA",
  "labels": [
    "healthy brain",
    "brain with tumor"
  ],
  "model": "english"
}

Single item image classification

Properties

Name Type Required Restrictions Description
image string true none base64 encoded image
labels [string] true none labels to predict probabilities for
model string false none Model to use:
* english - Classification with English labels

Enumerated Values

Property Value
model english

ImageClassificationResponse

{
  "probabilities": {
    "brain with tumor": 0.98,
    "healthy brain": 0.02
  }
}

Image classification response

Properties

Name Type Required Restrictions Description
Image classification response ImageClassificationSingleResponse false none Image classification response

ImageClassificationSingleResponse

{
  "probabilities": {
    "brain with tumor": 0.98,
    "healthy brain": 0.02
  }
}

Single item image classification response

Properties

Name Type Required Restrictions Description
probabilities object false none dictionary where the keys are your labels and values are probabilities. Probabilities always sum to 100%.

GenerationBody

{
  "text": "Geralt knew the signs, the monster was a",
  "min_length": 10,
  "max_length": 20,
  "temperature": 1,
  "top_k": 0,
  "top_p": 1,
  "repetition_penalty": 1,
  "length_penalty": 1,
  "num_beams": 1,
  "num_generations": 1,
  "model": "gpt2-large"
}

Generation body

Properties

Name Type Required Restrictions Description
Generation body any false none Generation body variants for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous GenerationSingle false none Single item generation

xor

Name Type Required Restrictions Description
anonymous GenerationBatch false none Batch generation

GenerationSingle

{
  "text": "Geralt knew the signs, the monster was a",
  "min_length": 10,
  "max_length": 20,
  "temperature": 1,
  "top_k": 0,
  "top_p": 1,
  "repetition_penalty": 1,
  "length_penalty": 1,
  "num_beams": 1,
  "num_generations": 1,
  "model": "gpt2-large"
}

Single item generation

Properties

Name Type Required Restrictions Description
text string true none text to generate from
min_length integer false none minimum number of tokens to generate
max_length integer false none maximum number of tokens to generate
temperature number false none value that alters softmax probabilities. 0.0 is deterministic. As the temperature gets higher, the generated tokens get more random.
top_k integer false none sampling strategy in which probabilities are redistributed among top k most-likely tokens. 0 is a special value where all tokens are considered.
top_p number false none Sampling strategy in which probabilities are distributed among set of words with combined probability greater than p.
repetition_penalty number false none Penalty to be applied to tokens present in the text and tokens already generated in the sequence. Values higher than 1.0 penalise repetition, while lower than 1.0 encourage it.
length_penalty number false none Penalty applied to overall sequence length. Set to greater than 1.0 for longer sequences or smaller than 1.0 for shorter ones.
num_beams integer false none Number of beams to be used in beam search. (1 is no beam search)
num_generations integer false none Number of times to do generation for input.
model string false none Model to use:
* gpt2-large - An optimised large version of gpt2.
* t5-base-qa-summary-emotion - The T5 base model trained for question answering, summarisation and emotion detection.

Enumerated Values

Property Value
model gpt2-large
model t5-base-qa-summary-emotion

GenerationBatch

{
  "text": [
    "Geralt knew the signs, the monster was a",
    "c: Elon Musk is an entrepreneur born in 1971. q: Who is Elon Musk? a: an entrepreneur q: When was he born? a: "
  ],
  "min_length": 10,
  "max_length": 20,
  "temperature": 1,
  "top_k": 0,
  "top_p": 1,
  "repetition_penalty": 1,
  "length_penalty": 1,
  "num_beams": 1,
  "num_generations": 1,
  "model": "gpt2-large"
}

Batch generation

Properties

Name Type Required Restrictions Description
text [string] true none text to generate from
min_length integer false none minimum number of tokens to generate
max_length integer false none maximum number of tokens to generate
temperature number false none value that alters softmax probabilities. 0.0 is deterministic. As the temperature gets higher, the generated tokens get more random.
top_k integer false none sampling strategy in which probabilities are redistributed among top k most-likely tokens. 0 is a special value where all tokens are considered.
top_p number false none Sampling strategy in which probabilities are distributed among set of words with combined probability greater than p.
repetition_penalty number false none Penalty to be applied to tokens present in the text and tokens already generated in the sequence. Values higher than 1.0 penalise repetition, while lower than 1.0 encourage it.
length_penalty number false none Penalty applied to overall sequence length. Set to greater than 1.0 for longer sequences or smaller than 1.0 for shorter ones.
num_beams integer false none Number of beams to be used in beam search. (1 is no beam search)
num_generations integer false none Number of times to do generation for input.
model string false none Model to use:
* gpt2-large - An optimised large version of gpt2.
* t5-base-qa-summary-emotion - The T5 base model trained for question answering, summarisation and emotion detection.

Enumerated Values

Property Value
model gpt2-large
model t5-base-qa-summary-emotion

GenerationResponse

{
  "output": "Geralt knew the signs, the monster was a vampire that day; after the siege her companions"
}

Generation response

Properties

Name Type Required Restrictions Description
Generation response any false none Generation responses for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous GenerationSingleResponse false none none

xor

Name Type Required Restrictions Description
anonymous GenerationBatchResponse false none none

GenerationSingleResponse

{
  "output": "Geralt knew the signs, the monster was a vampire that day; after the siege her companions"
}

Single item generation response

Properties

Name Type Required Restrictions Description
output string false none none

GenerationBatchResponse

{
  "output": [
    "Geralt knew the signs, the monster was a vampire that day; after the siege her companions",
    "1971"
  ]
}

Batch generation response

Properties

Name Type Required Restrictions Description
output [string] false none none

SummarisationBody

{
  "text": "Some long text to summarise",
  "model": "english"
}

Summarisation body

Properties

Name Type Required Restrictions Description
Summarisation body any false none Summarisation body variants for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous SummarisationSingle false none Single item summarisation

xor

Name Type Required Restrictions Description
anonymous SummarisationBatch false none Batch summarisation

SummarisationSingle

{
  "text": "Some long text to summarise",
  "model": "english"
}

Single item summarisation

Properties

Name Type Required Restrictions Description
text string true none none
model string false none Model to use:
* english - English text summarisation

Enumerated Values

Property Value
model english

SummarisationBatch

{
  "text": [
    "Some long text to summarise",
    "Some more long text that needs summarising"
  ],
  "model": "english"
}

Batch summarisation

Properties

Name Type Required Restrictions Description
text [string] true none none
model string false none Model to use:
* english - English text summarisation

Enumerated Values

Property Value
model english

SummarisationResponse

{
  "output": "Summary of long text"
}

Summarisation response

Properties

Name Type Required Restrictions Description
Summarisation response any false none Summarisation responses for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous SummarisationSingleResponse false none none

xor

Name Type Required Restrictions Description
anonymous SummarisationBatchResponse false none none

SummarisationSingleResponse

{
  "output": "Summary of long text"
}

Single item summarisation response

Properties

Name Type Required Restrictions Description
output string false none none

SummarisationBatchResponse

{
  "output": [
    "Summary of long text",
    "Summary of some more long text"
  ]
}

Batch summarisation response

Properties

Name Type Required Restrictions Description
output [string] false none none

EmotionBody

{
  "text": "I hope this works",
  "model": "english"
}

Emotion body

Properties

Name Type Required Restrictions Description
Emotion body any false none Emotion body variants for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous EmotionSingle false none Single item emotion detection

xor

Name Type Required Restrictions Description
anonymous EmotionBatch false none Batch emotion detection

EmotionSingle

{
  "text": "I hope this works",
  "model": "english"
}

Single item emotion detection

Properties

Name Type Required Restrictions Description
text string true none none
model string false none Model to use:
* english - English text emotion detection

Enumerated Values

Property Value
model english

EmotionBatch

{
  "text": [
    "I hope this works",
    "I'll be most upset if things go wrong"
  ],
  "model": "english"
}

Batch emotion detection

Properties

Name Type Required Restrictions Description
text [string] true none none
model string false none Model to use:
* english - English text emotion detection

Enumerated Values

Property Value
model english

EmotionResponse

{
  "output": "optimism"
}

Emotion detection response

Properties

Name Type Required Restrictions Description
Emotion detection response any false none Emotion detection responses for single and batch requests

oneOf

Name Type Required Restrictions Description
anonymous EmotionSingleResponse false none none

xor

Name Type Required Restrictions Description
anonymous EmotionBatchResponse false none none

EmotionSingleResponse

{
  "output": "optimism"
}

Single item emotion detection response

Properties

Name Type Required Restrictions Description
output string false none none

EmotionBatchResponse

{
  "output": [
    "optimism",
    "disappointment, sadness"
  ]
}

Batch emotion detection response

Properties

Name Type Required Restrictions Description
output [string] false none none