Try Jurassic-2 Instruct API here

Jurassic-2 Instruct models were developed to handle instruction-only prompts ("zero-shot") without requiring examples ("few-shot"). This approach provides a more intuitive way to interact with large language models, allowing users to obtain the best possible output for their task without any examples. With their specialized training, Jurassic-2 Instruct models excel at generating coherent and precise text based solely on instructions provided by the user.

Jurassic-2 Instruct Models

Jurassic-2 Instruct models come in two variations - Mid-Instruct and Ultra-Instruct.

Mid-Instruct: optimized for generating precise text based on minimal context, which makes it ideal for use cases such as chatbots and other conversational interfaces.

Ultra-Instruct: offers superior language understanding and response generation capabilities, making it ideal for advanced conversational interface needs.

Example API Request

fetch("https://api.ai21.com/studio/v1/j2-grande-instruct/complete", {
  headers: {
    "Authorization": "Bearer YOUR_API_KEY",
    "Content-Type": "application/json"
  },
  body: JSON.stringify({
      "prompt": "Write a tweet about the future of NLP\n",
      "numResults": 1,
      "maxTokens": 50,
      "temperature": 0.8,
      "topKReturn": 0,
      "topP":1,
      "countPenalty": {
        "scale": 0,
        "applyToNumbers": false,
        "applyToPunctuations": false,
        "applyToStopwords": false,
        "applyToWhitespaces": false,
        "applyToEmojis": false
      },
      "frequencyPenalty": {
        "scale": 0,
        "applyToNumbers": false,
        "applyToPunctuations": false,
        "applyToStopwords": false,
        "applyToWhitespaces": false,
        "applyToEmojis": false
      },
      "presencePenalty": {
        "scale": 0,
        "applyToNumbers": false,
        "applyToPunctuations": false,
        "applyToStopwords": false,
        "applyToWhitespaces": false,
        "applyToEmojis": false
      },
      "stopSequences":["##"]
    }),
  method: "POST"
});

Example Response

{
  "id": "029cd08c-f65b-06de-49d9-60b47c15e23a",
  "prompt": {
    "text": "Write a tweet about the future of NLP\\n",
    "tokens": [
      {
        "generatedToken": {
          "token": "▁Write",
          "logprob": -9.832908630371094,
          "raw_logprob": -9.832908630371094
        },
        "topTokens": null,
        "textRange": {
          "start": 0,
          "end": 5
        }
      },
      {
        "generatedToken": {
          "token": "▁a▁tweet",
          "logprob": -9.605907440185547,
          "raw_logprob": -9.605907440185547
        },
        "topTokens": null,
        "textRange": {
          "start": 5,
          "end": 13
        }
      },
      {
        "generatedToken": {
          "token": "▁about▁the▁future▁of",
          "logprob": -11.053253173828125,
          "raw_logprob": -11.053253173828125
        },
        "topTokens": null,
        "textRange": {
          "start": 13,
          "end": 33
        }
      },
      {
        "generatedToken": {
          "token": "▁NLP",
          "logprob": -9.101167678833008,
          "raw_logprob": -9.101167678833008
        },
        "topTokens": null,
        "textRange": {
          "start": 33,
          "end": 37
        }
      },
      {
        "generatedToken": {
          "token": "\\n",
          "logprob": -12.08486557006836,
          "raw_logprob": -12.08486557006836
        },
        "topTokens": null,
        "textRange": {
          "start": 37,
          "end": 39
        }
      }
    ]
  },
  "completions": [
    {
      "data": {
        "text": "The future of NLP is bright. Using NLP, we can train machines to understand language, reason about knowledge, and interact with humans naturally. #END",
        "tokens": [
          {
            "generatedToken": {
              "token": "▁",
              "logprob": -0.009251699782907963,
              "raw_logprob": -0.025562729686498642
            },
            "topTokens": null,
            "textRange": {
              "start": 0,
              "end": 1
            }
          },
          {
            "generatedToken": {
              "token": "▁I▁think",
              "logprob": -2.7593750953674316,
              "raw_logprob": -2.778052806854248
            },
            "topTokens": null,
            "textRange": {
              "start": 1,
              "end": 9
            }
          },
          {
            "generatedToken": {
              "token": "▁the▁future▁of",
              "logprob": -0.01882636919617653,
              "raw_logprob": -0.0687691792845726
            },
            "topTokens": null,
            "textRange": {
              "start": 9,
              "end": 23
            }
          },
          {
            "generatedToken": {
              "token": "▁NLP",
              "logprob": -0.0019030333496630192,
              "raw_logprob": -0.007231252733618021
            },
            "topTokens": null,
            "textRange": {
              "start": 23,
              "end": 27
            }
          },
          {
            "generatedToken": {
              "token": "▁is",
              "logprob": -0.05679452046751976,
              "raw_logprob": -0.12373676151037216
            },
            "topTokens": null,
            "textRange": {
              "start": 27,
              "end": 30
            }
          },
          {
            "generatedToken": {
              "token": "▁in▁building",
              "logprob": -2.596593141555786,
              "raw_logprob": -3.1734254360198975
            },
            "topTokens": null,
            "textRange": {
              "start": 30,
              "end": 42
            }
          },
          {
            "generatedToken": {
              "token": "▁systems▁that",
              "logprob": -1.0248959064483643,
              "raw_logprob": -1.5406049489974976
            },
            "topTokens": null,
            "textRange": {
              "start": 42,
              "end": 55
            }
          },
          {
            "generatedToken": {
              "token": "▁understand",
              "logprob": -0.4593545198440552,
              "raw_logprob": -0.8558947443962097
            },
            "topTokens": null,
            "textRange": {
              "start": 55,
              "end": 66
            }
          },
          {
            "generatedToken": {
              "token": "▁human",
              "logprob": -1.0440826416015625,
              "raw_logprob": -1.5726295709609985
            },
            "topTokens": null,
            "textRange": {
              "start": 66,
              "end": 72
            }
          },
          {
            "generatedToken": {
              "token": "▁language",
              "logprob": -0.20479987561702728,
              "raw_logprob": -0.4235517382621765
            },
            "topTokens": null,
            "textRange": {
              "start": 72,
              "end": 81
            }
          },
          {
            "generatedToken": {
              "token": "▁as▁well▁as",
              "logprob": -0.2055591642856598,
              "raw_logprob": -0.457123339176178
            },
            "topTokens": null,
            "textRange": {
              "start": 81,
              "end": 92
            }
          },
          {
            "generatedToken": {
              "token": "▁humans",
              "logprob": -0.3632097840309143,
              "raw_logprob": -0.5173041224479675
            },
            "topTokens": null,
            "textRange": {
              "start": 92,
              "end": 99
            }
          },
          {
            "generatedToken": {
              "token": "▁do",
              "logprob": -0.0010070496937260032,
              "raw_logprob": -0.0054183765314519405
            },
            "topTokens": null,
            "textRange": {
              "start": 99,
              "end": 102
            }
          },
          {
            "generatedToken": {
              "token": ".",
              "logprob": -0.003985914401710033,
              "raw_logprob": -0.014302950352430344
            },
            "topTokens": null,
            "textRange": {
              "start": 102,
              "end": 103
            }
          },
          {
            "generatedToken": {
              "token": "<|endoftext|>",
              "logprob": -0.5593628287315369,
              "raw_logprob": -0.5923886299133301
            },
            "topTokens": null,
            "textRange": {
              "start": 103,
              "end": 103
            }
          }
        ]
      },
      "finishReason": {
        "reason": "endoftext"
      }
    }
  ]
}
fetch("https://api.ai21.com/studio/v1/j2-grande-instruct/complete", {
  headers: {
    "Authorization": "Bearer YOUR_API_KEY",
    "Content-Type": "application/json"
  },
  body: JSON.stringify({
      "prompt": "Write a tweet about the future of NLP\n",
      "numResults": 1,
      "maxTokens": 50,
      "temperature": 0.8,
      "topKReturn": 0,
      "topP":1,
      "countPenalty": {
        "scale": 0,
        "applyToNumbers": false,
        "applyToPunctuations": false,
        "applyToStopwords": false,
        "applyToWhitespaces": false,
        "applyToEmojis": false
      },
      "frequencyPenalty": {
        "scale": 0,
        "applyToNumbers": false,
        "applyToPunctuations": false,
        "applyToStopwords": false,
        "applyToWhitespaces": false,
        "applyToEmojis": false
      },
      "presencePenalty": {
        "scale": 0,
        "applyToNumbers": false,
        "applyToPunctuations": false,
        "applyToStopwords": false,
        "applyToWhitespaces": false,
        "applyToEmojis": false
      },
      "stopSequences":["##"]
    }),
  method: "POST"
});

Example Response

{
  "id": "029cd08c-f65b-06de-49d9-60b47c15e23a",
  "prompt": {
    "text": "Write a tweet about the future of NLP\\n",
    "tokens": [
      {
        "generatedToken": {
          "token": "▁Write",
          "logprob": -9.832908630371094,
          "raw_logprob": -9.832908630371094
        },
        "topTokens": null,
        "textRange": {
          "start": 0,
          "end": 5
        }
      },
      {
        "generatedToken": {
          "token": "▁a▁tweet",
          "logprob": -9.605907440185547,
          "raw_logprob": -9.605907440185547
        },
        "topTokens": null,
        "textRange": {
          "start": 5,
          "end": 13
        }
      },
      {
        "generatedToken": {
          "token": "▁about▁the▁future▁of",
          "logprob": -11.053253173828125,
          "raw_logprob": -11.053253173828125
        },
        "topTokens": null,
        "textRange": {
          "start": 13,
          "end": 33
        }
      },
      {
        "generatedToken": {
          "token": "▁NLP",
          "logprob": -9.101167678833008,
          "raw_logprob": -9.101167678833008
        },
        "topTokens": null,
        "textRange": {
          "start": 33,
          "end": 37
        }
      },
      {
        "generatedToken": {
          "token": "\\n",
          "logprob": -12.08486557006836,
          "raw_logprob": -12.08486557006836
        },
        "topTokens": null,
        "textRange": {
          "start": 37,
          "end": 39
        }
      }
    ]
  },
  "completions": [
    {
      "data": {
        "text": "The future of NLP is bright. Using NLP, we can train machines to understand language, reason about knowledge, and interact with humans naturally. #END",
        "tokens": [
          {
            "generatedToken": {
              "token": "▁",
              "logprob": -0.009251699782907963,
              "raw_logprob": -0.025562729686498642
            },
            "topTokens": null,
            "textRange": {
              "start": 0,
              "end": 1
            }
          },
          {
            "generatedToken": {
              "token": "▁I▁think",
              "logprob": -2.7593750953674316,
              "raw_logprob": -2.778052806854248
            },
            "topTokens": null,
            "textRange": {
              "start": 1,
              "end": 9
            }
          },
          {
            "generatedToken": {
              "token": "▁the▁future▁of",
              "logprob": -0.01882636919617653,
              "raw_logprob": -0.0687691792845726
            },
            "topTokens": null,
            "textRange": {
              "start": 9,
              "end": 23
            }
          },
          {
            "generatedToken": {
              "token": "▁NLP",
              "logprob": -0.0019030333496630192,
              "raw_logprob": -0.007231252733618021
            },
            "topTokens": null,
            "textRange": {
              "start": 23,
              "end": 27
            }
          },
          {
            "generatedToken": {
              "token": "▁is",
              "logprob": -0.05679452046751976,
              "raw_logprob": -0.12373676151037216
            },
            "topTokens": null,
            "textRange": {
              "start": 27,
              "end": 30
            }
          },
          {
            "generatedToken": {
              "token": "▁in▁building",
              "logprob": -2.596593141555786,
              "raw_logprob": -3.1734254360198975
            },
            "topTokens": null,
            "textRange": {
              "start": 30,
              "end": 42
            }
          },
          {
            "generatedToken": {
              "token": "▁systems▁that",
              "logprob": -1.0248959064483643,
              "raw_logprob": -1.5406049489974976
            },
            "topTokens": null,
            "textRange": {
              "start": 42,
              "end": 55
            }
          },
          {
            "generatedToken": {
              "token": "▁understand",
              "logprob": -0.4593545198440552,
              "raw_logprob": -0.8558947443962097
            },
            "topTokens": null,
            "textRange": {
              "start": 55,
              "end": 66
            }
          },
          {
            "generatedToken": {
              "token": "▁human",
              "logprob": -1.0440826416015625,
              "raw_logprob": -1.5726295709609985
            },
            "topTokens": null,
            "textRange": {
              "start": 66,
              "end": 72
            }
          },
          {
            "generatedToken": {
              "token": "▁language",
              "logprob": -0.20479987561702728,
              "raw_logprob": -0.4235517382621765
            },
            "topTokens": null,
            "textRange": {
              "start": 72,
              "end": 81
            }
          },
          {
            "generatedToken": {
              "token": "▁as▁well▁as",
              "logprob": -0.2055591642856598,
              "raw_logprob": -0.457123339176178
            },
            "topTokens": null,
            "textRange": {
              "start": 81,
              "end": 92
            }
          },
          {
            "generatedToken": {
              "token": "▁humans",
              "logprob": -0.3632097840309143,
              "raw_logprob": -0.5173041224479675
            },
            "topTokens": null,
            "textRange": {
              "start": 92,
              "end": 99
            }
          },
          {
            "generatedToken": {
              "token": "▁do",
              "logprob": -0.0010070496937260032,
              "raw_logprob": -0.0054183765314519405
            },
            "topTokens": null,
            "textRange": {
              "start": 99,
              "end": 102
            }
          },
          {
            "generatedToken": {
              "token": ".",
              "logprob": -0.003985914401710033,
              "raw_logprob": -0.014302950352430344
            },
            "topTokens": null,
            "textRange": {
              "start": 102,
              "end": 103
            }
          },
          {
            "generatedToken": {
              "token": "<|endoftext|>",
              "logprob": -0.5593628287315369,
              "raw_logprob": -0.5923886299133301
            },
            "topTokens": null,
            "textRange": {
              "start": 103,
              "end": 103
            }
          }
        ]
      },
      "finishReason": {
        "reason": "endoftext"
      }
    }
  ]
}