Skip to content

Commit

Permalink
Merge pull request #111 from intelligentnode/110-add-anthropic-model
Browse files Browse the repository at this point in the history
110 add anthropic model
  • Loading branch information
intelligentnode authored Mar 9, 2024
2 parents c763747 + 41f7837 commit 7cece5a
Show file tree
Hide file tree
Showing 11 changed files with 176 additions and 24 deletions.
6 changes: 3 additions & 3 deletions IntelliNode/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,12 @@

</p>

IntelliNode is the ultimate tool to integrate your data with the latest language models and deep learning frameworks using **javascript**. The library provides intuitive functions for sending input to models like ChatGPT, WaveNet and Stable diffusion, and receiving generated text, speech, or images. With just a few lines of code, you can easily access the power of cutting-edge AI models to enhance your projects.
Integrate your data with the latest language models and deep learning frameworks using intellinode **javascript**. The library provides intuitive functions for sending input to models like ChatGPT, WaveNet and Stable diffusion, and receiving generated text, speech, or images. With just a few lines of code, you can easily access the power of cutting-edge AI models to enhance your projects.

# Latest Updates
- Add Anthropic claude 3 chat.
- Add Google Gemini chat and vision.
- Add Mistral SMoE model as a chatbot provider (open source mixture of experts).
- Update Openai with DALL·E 3 vision, speech, and ChatGPT functions (automation).
- Improve Llama v2 chat speed and support llama code models. 🦙
- Update stable diffusion to use the XL model engine. 🎨
- Add support for hugging face inference. 🤗
Expand Down Expand Up @@ -65,7 +65,7 @@ const geminiBot = new Chatbot(geminiApiKey, SupportedChatModels.GEMINI);
const responses = await geminiBot.chat(geminiInput);
```

The documentation on how to switch the chatbot between ChatGPT, Mistral and LLama can be found in the [IntelliNode Wiki](https://github.com/Barqawiz/IntelliNode/wiki/ChatBot).
The documentation on how to switch between ChatGPT, Mistral, Anthropic and LLama can be found in the [IntelliNode Wiki](https://docs.intellinode.ai/docs/npm/chatbot/get-started).

### Semantic Search
1. imports:
Expand Down
5 changes: 5 additions & 0 deletions IntelliNode/config.json
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,11 @@
"visionEndpoint": "gemini-pro-vision:generateContent",
"embeddingEndpoint": "embedding-001:embedContent",
"batchEmbeddingEndpoint": "embedding-001:batchEmbedContents"
},
"anthropic": {
"base": "https://api.anthropic.com",
"messages": "/v1/messages",
"version": "2023-06-01"
}
},
"models": {
Expand Down
33 changes: 23 additions & 10 deletions IntelliNode/function/Chatbot.js
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ const CohereAIWrapper = require('../wrappers/CohereAIWrapper');
const IntellicloudWrapper = require("../wrappers/IntellicloudWrapper");
const MistralAIWrapper = require('../wrappers/MistralAIWrapper');
const GeminiAIWrapper = require('../wrappers/GeminiAIWrapper');
const AnthropicWrapper = require('../wrappers/AnthropicWrapper');
const SystemHelper = require("../utils/SystemHelper");

const {
Expand All @@ -25,7 +26,8 @@ const {
CohereInput,
LLamaSageInput,
MistralInput,
GeminiInput
GeminiInput,
AnthropicInput
} = require("../model/input/ChatModelInput");

const SupportedChatModels = {
Expand All @@ -35,6 +37,7 @@ const SupportedChatModels = {
COHERE: "cohere",
MISTRAL: "mistral",
GEMINI: "gemini",
ANTHROPIC: "anthropic",
};

class Chatbot {
Expand Down Expand Up @@ -68,6 +71,8 @@ class Chatbot {
this.mistralWrapper = new MistralAIWrapper(keyValue);
} else if (provider === SupportedChatModels.GEMINI) {
this.geminiWrapper = new GeminiAIWrapper(keyValue);
} else if (provider === SupportedChatModels.ANTHROPIC) {
this.anthropicWrapper = new AnthropicWrapper(keyValue);
} else {
throw new Error("Invalid provider name");
}
Expand Down Expand Up @@ -113,6 +118,9 @@ class Chatbot {
} else if (this.provider === SupportedChatModels.GEMINI) {
const result = await this._chatGemini(modelInput);
return modelInput.attachReference ? { result, references } : result;
} else if (this.provider === SupportedChatModels.ANTHROPIC) {
const result = await this._chatAnthropic(modelInput);
return modelInput.attachReference ? { result, references } : result;
} else {
throw new Error("The provider is not supported");
}
Expand Down Expand Up @@ -166,8 +174,6 @@ class Chatbot {
if (lastMessage && lastMessage.role === "user") {

const semanticResult = await this.extendedController.semanticSearch(lastMessage.content, modelInput.searchK);

// console.log('semanticResult: ', semanticResult);

if (semanticResult && semanticResult.length > 0) {

Expand All @@ -178,8 +184,6 @@ class Chatbot {
}
return acc;
}, {});

// console.log('references: ', references);

let contextData = semanticResult.map(doc => doc.data.map(dataItem => dataItem.text).join('\n')).join('\n').trim();
const templateWrapper = new SystemHelper().loadStaticPrompt("augmented_chatbot");
Expand All @@ -190,19 +194,14 @@ class Chatbot {
promptLines.pop();
promptLines.push(`User: ${augmentedMessage}`);
modelInput.prompt = promptLines.join('\n');

// console.log('----> prompt after update: ', modelInput.prompt);
} else if (modelInput instanceof ChatModelInput) {
modelInput.deleteLastMessage(lastMessage);
modelInput.addUserMessage(augmentedMessage);

// console.log('----> modelInput after update: ', modelInput);
} else if (typeof modelInput === "object" && Array.isArray(modelInput.messages) && messages.length > 0) {
// replace the user message directly in the array
if (lastMessage.content) {
lastMessage.content = augmentedMessage;
}
// console.log('----> messages after update: ', messages[messages.length - 1]);
}
}
}
Expand Down Expand Up @@ -416,6 +415,20 @@ class Chatbot {
return responses;
}

async _chatAnthropic(modelInput) {
let params;

if (modelInput instanceof AnthropicInput) {
params = modelInput.getChatInput();
} else {
throw new Error("Invalid input: Must be an instance of AnthropicInput");
}

const results = await this.anthropicWrapper.generateText(params);

return results.content.map(choice => choice.text);
}

} /*chatbot class*/

module.exports = {
Expand Down
8 changes: 6 additions & 2 deletions IntelliNode/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,8 @@ const {
LLamaSageInput,
CohereInput,
MistralInput,
GeminiInput
GeminiInput,
AnthropicInput
} = require('./model/input/ChatModelInput');
const FunctionModelInput = require('./model/input/FunctionModelInput');
const EmbedInput = require('./model/input/EmbedInput');
Expand All @@ -59,6 +60,7 @@ const AWSEndpointWrapper = require('./wrappers/AWSEndpointWrapper');
const IntellicloudWrapper = require('./wrappers/IntellicloudWrapper');
const MistralAIWrapper = require('./wrappers/MistralAIWrapper');
const GeminiAIWrapper = require('./wrappers/GeminiAIWrapper');
const AnthropicWrapper = require('./wrappers/AnthropicWrapper');
// utils
const { LLMEvaluation } = require('./utils/LLMEvaluation');
const AudioHelper = require('./utils/AudioHelper');
Expand Down Expand Up @@ -120,5 +122,7 @@ module.exports = {
SupportedFineTuneModels,
FineTuneInput,
GeminiInput,
GeminiAIWrapper
GeminiAIWrapper,
AnthropicInput,
AnthropicWrapper
};
41 changes: 39 additions & 2 deletions IntelliNode/model/input/ChatModelInput.js
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ class MistralInput extends ChatGPTInput {
constructor(systemMessage, options = {}) {
super(systemMessage, options);

this.model = options.model || 'mistral-tiny';
this.model = options.model || 'mistral-medium';

}

Expand Down Expand Up @@ -232,6 +232,42 @@ class GeminiInput extends ChatModelInput {

}

class AnthropicInput extends ChatModelInput {

constructor(system, options = {}) {
super(options);
this.system = system;
this.model = options.model || 'claude-3-sonnet-20240229';
this.maxTokens = options.maxTokens || 800;
this.temperature = options.temperature || 1.0;
this.messages = [];
}

addUserMessage(text) {
this.messages.push({
role: "user",
content: text
});
}

addAssistantMessage(text) {
this.messages.push({
role: "assistant",
content: text
});
}

getChatInput() {
return {
system: this.system,
model: this.model,
messages: this.messages,
max_tokens: this.maxTokens,
temperature: this.temperature,
};
}
}

class ChatLLamaInput extends ChatModelInput {
constructor(systemMessage, options = {}) {
super(options);
Expand Down Expand Up @@ -426,5 +462,6 @@ module.exports = {
LLamaReplicateInput,
CohereInput,
MistralInput,
GeminiInput
GeminiInput,
AnthropicInput
};
4 changes: 2 additions & 2 deletions IntelliNode/package.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"name": "intellinode",
"version": "1.8.4",
"description": "Connect your data, evaluate accuracy, and integrate with AI models including ChatGPT, Llama, Diffusion, Cohere, Gemini, and Hugging Face.",
"version": "1.8.5",
"description": "Evaluate and integrate with latest AI models including ChatGPT, Llama, Diffusion, Cohere, Gemini, and Hugging Face.",
"main": "index.js",
"keywords": [
"ai",
Expand Down
30 changes: 30 additions & 0 deletions IntelliNode/test/integration/AnthropicWrapper.test.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
require('dotenv').config();
const assert = require('assert');
const AnthropicWrapper = require('../../wrappers/AnthropicWrapper');

// initiate anthropic object
const anthropic = new AnthropicWrapper(process.env.ANTHROPIC_API_KEY);

async function testAnthropicGenerate() {
try {
const params = {
"model": "claude-3-sonnet-20240229",
"messages": [
{
"role": "user",
"content": "Who is the most renowned French painter? Provide a single direct short answer."
}
],
"max_tokens": 256
};

const result = await anthropic.generateText(params);
console.log('Anthropic Language Model Result:', result.content[0].text);
} catch (error) {
console.error('Anthropic Language Model Error:', error);
}
}

(async () => {
await testAnthropicGenerate();
})();
21 changes: 19 additions & 2 deletions IntelliNode/test/integration/Chatbot.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@ const { ChatGPTInput,
LLamaReplicateInput,
LLamaSageInput,
MistralInput,
GeminiInput } = require("../../model/input/ChatModelInput");
GeminiInput,
AnthropicInput } = require("../../model/input/ChatModelInput");

// env key
const apiKey = process.env.OPENAI_API_KEY;
Expand Down Expand Up @@ -292,6 +293,19 @@ async function testGeminiChatCase2() {
}
}

async function testAnthropicChatCase() {
console.log('\nAnthropic chat test case: \n');
const bot = new Chatbot(process.env.ANTHROPIC_API_KEY, SupportedChatModels.ANTHROPIC);

const input = new AnthropicInput("You are helpful asssitant.", {model: "claude-3-sonnet-20240229"});
input.addUserMessage("Tell me about the history of artificial intelligence.")
const responses = await bot.chat(input);

responses.forEach((response) => console.log("- " + response));

assert(responses.length > 0, "Anthropic chat response length should be greater than 0");
}

(async () => {

console.log('### Openai model ###')
Expand All @@ -312,11 +326,14 @@ async function testGeminiChatCase2() {
console.log('### Mistral model ###')
await testMistralChatCase();

console.log('### Anthropic model ###')
await testAnthropicChatCase();

console.log('### Gemini model ###')
await testGeminiChatCase1();
await testGeminiChatCase2();

console.log('### SageMaker llama model ###')
//await testSageMakerLLamaCase();

})();
7 changes: 6 additions & 1 deletion IntelliNode/test/integration/ModelEvaluation.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,11 @@ const mistralChat = {
type: 'chat', model: 'mistral-medium', maxTokens: 50
};

const anthropicChat = {
apiKey: process.env.ANTHROPIC_API_KEY, provider: SupportedChatModels.ANTHROPIC,
type: 'chat', model: 'claude-3-sonnet-20240229', maxTokens: 50
};

// create the evaluation object
const llmEvaluation = new LLMEvaluation(process.env.OPENAI_API_KEY, 'openai');

Expand All @@ -34,7 +39,7 @@ async function testLLMEvaluation() {
const targetAnswers = ["Photosynthesis is the process where green plants use sunlight to turn carbon dioxide and water into glucose and oxygen. The glucose provides food for the plant, and the oxygen gets released back into the air.",
"Photosynthesis is how plants make their own food. They take in water and carbon dioxide, use the energy from sunlight to transform them into glucose (their food) and oxygen, which they release into the air.",
"In simple terms, photosynthesis is like cooking for plants but instead of a stove, they use sunlight. They mix water and carbon dioxide with the sunlight to create glucose, which is their food, and also produce oxygen."];
const providerSets = [llamaChat, openaiChat, cohereCompletion, geminiChat, mistralChat];
const providerSets = [llamaChat, openaiChat, cohereCompletion, geminiChat, mistralChat, anthropicChat];

const results = await llmEvaluation.compareModels(inputString, targetAnswers, providerSets);

Expand Down
6 changes: 4 additions & 2 deletions IntelliNode/utils/LLMEvaluation.js
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ const { RemoteEmbedModel, SupportedEmbedModels } = require('../controller/Remote
const LanguageModelInput = require('../model/input/LanguageModelInput');
const { Chatbot, SupportedChatModels } = require("../function/Chatbot");
const { RemoteLanguageModel, SupportedLangModels } = require("../controller/RemoteLanguageModel");
const { ChatGPTInput, LLamaReplicateInput, LLamaSageInput, GeminiInput, CohereInput, MistralInput } = require("../model/input/ChatModelInput");
const { ChatGPTInput, LLamaReplicateInput, LLamaSageInput, GeminiInput, CohereInput, MistralInput, AnthropicInput } = require("../model/input/ChatModelInput");
const MatchHelpers = require('../utils/MatchHelpers');
const EmbedInput = require('../model/input/EmbedInput');
const { ModelEvaluation } = require('./ModelEvaluation');
Expand Down Expand Up @@ -45,7 +45,9 @@ class LLMEvaluation extends ModelEvaluation {
} else if (SupportedChatModels.COHERE == provider.toLowerCase()) {
input = new CohereInput("provide direct answer", { maxTokens: maxTokens });
} else if (SupportedChatModels.MISTRAL == provider.toLowerCase()) {
input = new MistralInput("provide direct answer", { maxTokens: maxTokens });
input = new MistralInput("provide direct answer", { model: modelName, maxTokens: maxTokens });
} else if (SupportedChatModels.ANTHROPIC == provider.toLowerCase()) {
input = new AnthropicInput("provide direct answer", { model: modelName, maxTokens: maxTokens });
} else {
input = new ChatGPTInput("provide direct answer", { model: modelName, maxTokens: maxTokens });
}
Expand Down
39 changes: 39 additions & 0 deletions IntelliNode/wrappers/AnthropicWrapper.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
/*
Apache License
Copyright 2023 Github.com/Barqawiz/IntelliNode
Licensed under the Apache License, Version 2.0 (the "License");
*/
const axios = require('axios');
const config = require('../config.json');
const connHelper = require('../utils/ConnHelper');

class AnthropicWrapper {
constructor(apiKey) {
this.API_BASE_URL = config.url.anthropic.base;
this.API_VERSION = config.url.anthropic.version;
this.httpClient = axios.create({
baseURL: this.API_BASE_URL,
headers: {
'Content-Type': 'application/json',
'Accept': 'application/json',
'x-api-key': apiKey,
'anthropic-version': this.API_VERSION
}
});
}

async generateText(params) {
const url = config.url.anthropic.messages;
try {
const response = await this.httpClient.post(url, params);
return response.data;
} catch (error) {
throw new Error(connHelper.getErrorMessage(error));
}
}

}

module.exports = AnthropicWrapper;

0 comments on commit 7cece5a

Please sign in to comment.