diff --git a/Package.swift b/Package.swift index e3df799..a9fde4c 100644 --- a/Package.swift +++ b/Package.swift @@ -5,7 +5,7 @@ import PackageDescription let package = Package( name: "OpenAISwift", - platforms: [.iOS(.v15), .macOS(.v10_15)], + platforms: [.iOS(.v13), .macOS(.v10_15)], products: [ // Products define the executables and libraries a package produces, and make them visible to other packages. .library( diff --git a/README.md b/README.md index e15341a..da3f5c5 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,39 @@ Import the framework in your project: [Create an OpenAI API key](https://platform.openai.com/account/api-keys) and add it to your configuration: -`let openAI = OpenAISwift(authToken: "TOKEN")` +let TOKEN = "sk-...... " + +let openAI: OpenAISwift = OpenAISwift(config: OpenAISwift.Config.makeDefaultOpenAI(apiKey: "TOKEN")) + + +To follow [OpenAI requirements](https://platform.openai.com/docs/api-reference/authentication) + +> Remember that your API key is a secret! Do not share it with others or expose it in any client-side code (browsers, apps). Production requests must be routed through your own backend server where your API key can be securely loaded from an environment variable or key management service. + +and basic industrial safety you should not call OpenAI API directly. + +```swift +private lazy var proxyOpenAIBackend: OpenAISwift = .init( + config: OpenAISwift.Config( + baseURL: "http://localhost", + endpointPrivider: OpenAIEndpointProvider(source: .proxy(path: { _ -> String in + "/chat/completions" + }, method: { _ -> String in + "POST" + })), + session: session, + authorizeRequest: { [weak self] request in + self?.authorizeRequest(&request) + } + )) + + private func authorizeRequest(_ request: inout URLRequest) { + if let apiKey = try? Encryptor.getApiToken() { + request.setValue(apiKey, forHTTPHeaderField: "X-API-KEY") + } + } +``` + This framework supports Swift concurrency; each example below has both an async/await and completion handler variant. diff --git a/Sources/OpenAISwift/Extensions/Extensions.swift b/Sources/OpenAISwift/Extensions/Extensions.swift new file mode 100644 index 0000000..0bd3452 --- /dev/null +++ b/Sources/OpenAISwift/Extensions/Extensions.swift @@ -0,0 +1,15 @@ +// +// File.swift +// +// +// Created by Mark Hoath on 15/11/2023. +// + +import Foundation + +extension Encodable { + func toDictionary() -> [String: Any]? { + guard let data = try? JSONEncoder().encode(self) else { return nil } + return (try? JSONSerialization.jsonObject(with: data, options: .allowFragments)).flatMap { $0 as? [String: Any] } + } +} diff --git a/Sources/OpenAISwift/Models/AssistantObject.swift b/Sources/OpenAISwift/Models/AssistantObject.swift new file mode 100644 index 0000000..be9713a --- /dev/null +++ b/Sources/OpenAISwift/Models/AssistantObject.swift @@ -0,0 +1,69 @@ +// +// File.swift +// +// +// Created by Mark Hoath on 15/11/2023. +// + +import Foundation + +public struct CodeInterpretorTool: Codable { + public let type: String +} + +public struct RetrievalTool: Codable { + public let type: String +} + +public struct ParamJSONObject: Codable { + public let properties: String +} + +public struct FunctionObject: Codable { + public let description: String + public let name: String + public let parameters: ParamJSONObject +} + +public struct FunctionTool: Codable { + public let type: String + public let function: FunctionObject +} + +public struct Tools: Codable { + public let codeInterpretorTool: CodeInterpretorTool? + public let retrievalTool: RetrievalTool? + public let functionTool: FunctionTool? +} + + + +public struct AssistantObject: Codable { + public let id: String + public let object: String + public let created_at: Int + public let name: String? + public let description: String? + public let model: String + public let instructions: String? + public let tools: [Tools] + public let file_ids: [String] + public let metadata: [String:String] +} + +public struct AssistantBody: Codable { + public let model: String + public let name: String? + public let description: String? + public let instructions: String? + public let tools: [Tools]? + public let file_ids: [String]? + public let metadata: [String:String]? +} + +public struct ListAssistantParams: Codable { + public let limit: Int? + public let order: String? + public let after: String? + public let before: String? +} diff --git a/Sources/OpenAISwift/Models/AudioObject.swift b/Sources/OpenAISwift/Models/AudioObject.swift new file mode 100644 index 0000000..7e2d2f9 --- /dev/null +++ b/Sources/OpenAISwift/Models/AudioObject.swift @@ -0,0 +1,47 @@ +// +// File.swift +// +// +// Created by Mark Hoath on 14/11/2023. +// + +import Foundation + +public enum Voice: String, Codable { + case alloy, echo, fable, onyx, nova, shimmer +} + +public enum AudioResponseFormat: String, Codable { + case mp3, opus, aac, flac +} + +public enum TranscriptionResponseFormat: String, Codable { + case json, text, srt, verbose_json, vtt +} + +public struct Audio: Encodable { + public let model: String + public let input: String + public let voice: Voice + public let response_format: AudioResponseFormat? + public let speed: Double? +} + +public struct Transcription: Encodable { + public let file: String + public let model: String + public let language: String? + public let prompt: String? + public let response_format: TranscriptionResponseFormat? + public let temperature: Double? + +} + +public struct Translation: Encodable { + public let file: String + public let model: String + public let prompt: String? + public let response_format: TranscriptionResponseFormat? + public let temperature: Double? +} + diff --git a/Sources/OpenAISwift/Models/ChatMessage.swift b/Sources/OpenAISwift/Models/ChatMessage.swift index c987cf9..cdfae49 100644 --- a/Sources/OpenAISwift/Models/ChatMessage.swift +++ b/Sources/OpenAISwift/Models/ChatMessage.swift @@ -18,9 +18,12 @@ public enum ChatRole: String, Codable { } /// A structure that represents a single message in a chat conversation. + public struct ChatMessage: Codable, Identifiable { - // uuid to conform to Identifiable protocol + /// UUID to conform to the Identifiable protocol + /// - Note: This property is not de- and encoded. A DTO or other logic might be required if the `ChatMessage` instance is stored locally. public var id = UUID() + /// The role of the sender of the message. public let role: ChatRole? /// The content of the message. @@ -34,6 +37,39 @@ public struct ChatMessage: Codable, Identifiable { self.role = role self.content = content } + + enum CodingKeys: CodingKey { + case id + case role + case content + } + + public init(from decoder: Decoder) throws { + let container: KeyedDecodingContainer = try decoder.container(keyedBy: ChatMessage.CodingKeys.self) + + + self.id = UUID() + self.role = try container.decodeIfPresent(ChatRole.self, forKey: ChatMessage.CodingKeys.role) + self.content = try container.decodeIfPresent(String.self, forKey: ChatMessage.CodingKeys.content) + + } + + public func encode(to encoder: Encoder) throws { + var container: KeyedEncodingContainer = encoder.container(keyedBy: ChatMessage.CodingKeys.self) + + try container.encodeIfPresent(self.role, forKey: ChatMessage.CodingKeys.role) + try container.encodeIfPresent(self.content, forKey: ChatMessage.CodingKeys.content) + + } +} + +extension ChatMessage: Equatable { + public func hash(into hasher: inout Hasher) { + hasher.combine(id) + } + public static func == (lhs: ChatMessage, rhs: ChatMessage) -> Bool { + return lhs.id == rhs.id + } } /// A structure that represents a chat conversation. @@ -70,7 +106,7 @@ public struct ChatConversation: Encodable { /// Modify the likelihood of specified tokens appearing in the completion. Maps tokens (specified by their token ID in the OpenAI Tokenizer—not English words) to an associated bias value from -100 to 100. Values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. let logitBias: [Int: Double]? - + /// If you're generating long completions, waiting for the response can take many seconds. To get responses sooner, you can 'stream' the completion as it's being generated. This allows you to start printing or processing the beginning of the completion before the full completion is finished. /// https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb let stream: Bool? @@ -96,6 +132,6 @@ public struct ChatError: Codable { public let message, type: String public let param, code: String? } - + public let error: Payload } diff --git a/Sources/OpenAISwift/Models/FilesObject.swift b/Sources/OpenAISwift/Models/FilesObject.swift new file mode 100644 index 0000000..92a7ce4 --- /dev/null +++ b/Sources/OpenAISwift/Models/FilesObject.swift @@ -0,0 +1,31 @@ +// +// File.swift +// +// +// Created by Mark Hoath on 14/11/2023. +// + +import Foundation + + +public enum FilePurpose: String, Codable { + case fine_tune = "fine-tune", fine_tune_results = "fine-tune-results", assistants, assistants_output = "assistants-output" +} + +public struct FilesResquest: Codable { + public let purpose: String? +} + +public struct FileUploadResquest: Codable { + public let file: Data + public let purpose: String +} + +public struct FilesModel: Codable { + public var id: String + public var bytes: Int + public var created_at: Int + public var filename: String + public var object: String = "file" + public var purpose: FilePurpose? +} diff --git a/Sources/OpenAISwift/Models/FineTuningObject.swift b/Sources/OpenAISwift/Models/FineTuningObject.swift new file mode 100644 index 0000000..ad50723 --- /dev/null +++ b/Sources/OpenAISwift/Models/FineTuningObject.swift @@ -0,0 +1,61 @@ +// +// File.swift +// +// +// Created by Mark Hoath on 15/11/2023. +// + +import Foundation + + +public struct FineTuningError: Codable { + public let code: String + public let message: String + public let param: String? +} + +public struct FineTuningHyperParams: Codable { + public let batch_size: String? + public let learning_rate_multiplier: String? + public let n_epochs: String? +} + +public struct FineTuning: Codable { + + public let id: String + public let created_at: Int + public let error: FineTuningError? + public let fine_tuned_model: String? + public let finished_at: Int + public let hyperparameters: FineTuningHyperParams? + public let model: String + public let object: String + public let organization_id: String + public let results_files: [String] + public let status: String + public let trained_tokens: Int? + public let training_file: String + public let validation_file: String? +} + +public struct FineTuningRequest: Codable { + public let model: String + public let training_file: String + public let hyperparameters: FineTuningHyperParams? + public let suffix: String? + public let validation_file: String? +} + +public struct FineTuningListRequest: Codable { + public let after: String? + public let limit: Int? +} + +public struct FineTuningEvent: Codable { + public let id: String + public let created_at: Int + public let level: String + public let message: String + public let object: String +} + diff --git a/Sources/OpenAISwift/Models/ImageGeneration.swift b/Sources/OpenAISwift/Models/ImageGeneration.swift index 139c617..ea30011 100644 --- a/Sources/OpenAISwift/Models/ImageGeneration.swift +++ b/Sources/OpenAISwift/Models/ImageGeneration.swift @@ -14,6 +14,22 @@ struct ImageGeneration: Encodable { let user: String? } +struct ImageEdit: Encodable { + let image: Data + let mask: Data? + let prompt: String + let n: Int + let size: ImageSize + let user: String? +} + +struct ImageVariations: Encodable { + let image: Data + let n: Int + let size: ImageSize + let user: String? +} + public enum ImageSize: String, Codable { case size1024 = "1024x1024" case size512 = "512x512" diff --git a/Sources/OpenAISwift/Models/MessageFileObject.swift b/Sources/OpenAISwift/Models/MessageFileObject.swift new file mode 100644 index 0000000..80c293b --- /dev/null +++ b/Sources/OpenAISwift/Models/MessageFileObject.swift @@ -0,0 +1,15 @@ +// +// File.swift +// +// +// Created by Mark Hoath on 16/11/2023. +// + +import Foundation + +public struct MessageFileObject: Codable { + public let id: String + public let object: String + public let created_at: Int + public let message_id: String +} diff --git a/Sources/OpenAISwift/Models/MessageObject.swift b/Sources/OpenAISwift/Models/MessageObject.swift new file mode 100644 index 0000000..ad31bce --- /dev/null +++ b/Sources/OpenAISwift/Models/MessageObject.swift @@ -0,0 +1,90 @@ +// +// File.swift +// +// +// Created by Mark Hoath on 16/11/2023. +// + +import Foundation + +public struct ImageFileID: Codable { + public let file_id: String +} + +public struct ImageFile: Codable { + public let type: String // always = "image_file" + public let image_file: ImageFileID +} + +public struct FileCitation: Codable { + public let file_id: String + public let quote: String +} + +public struct TextFileCitation: Codable { + public let type: String // always = "file_citation" + public let text: String + public let file_citation: FileCitation + public let start_index: Int + public let end_index: Int +} + +public struct FilePathID: Codable { + public let file_id: String +} + +public struct TextFilePath: Codable { + public let type: String // always = "file_path" + public let text: String + public let file_path: FilePathID + public let start_index: Int + public let end_index: Int +} + +public struct TextFileAnnotations: Codable { + public let file_citation: FileCitation + public let file_path: TextFilePath +} + +public struct TextFileObject: Codable { + public let value: String + public let annotations: [TextFileAnnotations] +} + +public struct TextFile: Codable { + public let type: String // always = "text" + public let text: TextFileObject +} + +public struct MessageContent: Codable { + public let image: ImageFile + public let text: TextFile +} + +public struct MessageObject: Codable { + public let id: String + public let object: String // always = "thread.message" + public let created_at: Int + public let thread_id: String + public let role: String + public let content: [MessageContent] + public let assistant_id: String? + public let run_id: String? + public let file_ids: [String] // max 10 + public let metadata: [String:String] // 16 Key - Value Pairs. Key max 64 chats. Value max 512 chars +} + +public struct Message: Codable { + public let role: String + public let content: String // always = "thread" + public let file_ids: [String]? // Max 10 File ID's + public let metadata: [String:String]? // 16 key-value pairs. Value max 512 chars + +} + +public struct MessageListRequest: Codable { + public let limit: Int? + public let order: String? + public let after: String? + public let before: String? +} diff --git a/Sources/OpenAISwift/Models/ModelObject.swift b/Sources/OpenAISwift/Models/ModelObject.swift new file mode 100644 index 0000000..1cc6286 --- /dev/null +++ b/Sources/OpenAISwift/Models/ModelObject.swift @@ -0,0 +1,17 @@ +// +// File.swift +// +// +// Created by Mark Hoath on 15/11/2023. +// + +import Foundation + +public struct ModelObject: Codable { + + public let id: String + public let created: Int + public let object: String + public let owned_by: String + +} diff --git a/Sources/OpenAISwift/Models/OpenAIEndpointModelType.swift b/Sources/OpenAISwift/Models/OpenAIEndpointModelType.swift new file mode 100644 index 0000000..5bb4814 --- /dev/null +++ b/Sources/OpenAISwift/Models/OpenAIEndpointModelType.swift @@ -0,0 +1,129 @@ +// +// OpenAIEndpointModelType.swift +// +// +// Created by Marco Boerner on 01.09.23. +// + +import Foundation + +/// Currently available and recommended models including some, but not all, legacy models +/// https://platform.openai.com/docs/models/model-endpoint-compatibility +public struct OpenAIEndpointModelType { + + public enum AudioTranscriptions: String, Codable { + /// Whisper v2-large model (under the name whisper1) is a general-purpose speech recognition model. It is trained on a large dataset of diverse audio and is also a multi-task model that can perform multilingual speech recognition as well as speech translation and language identification + case whisper1 = "whisper-1" + } + + public enum AudioTranslations: String, Codable { + /// Whisper v2-large model (under the name whisper1) is a general-purpose speech recognition model. It is trained on a large dataset of diverse audio and is also a multi-task model that can perform multilingual speech recognition as well as speech translation and language identification + case whisper1 = "whisper-1" + } + + public enum ChatCompletions: String, Codable { + /// More capable than any GPT-3.5 model, able to do more complex tasks, and optimized for chat. Will be updated with our latest model iteration 2 weeks after it is released. - 8,192 tokens + case gpt4 = "gpt-4" + + /// Snapshot of gpt-4 from June 13th 2023 with function calling data. Unlike gpt-4, this model will not receive updates, and will be deprecated 3 months after a new version is released. - 8,192 tokens + case gpt40613 = "gpt-4-0613" + + /// Same capabilities as the standard gpt-4 mode but with 4x the context length. Will be updated with our latest model iteration. - 32,768 tokens + case gpt432k = "gpt-4-32k" + + /// Snapshot of gpt-4-32 from June 13th 2023. Unlike gpt-4-32k, this model will not receive updates, and will be deprecated 3 months after a new version is released. - 32,768 tokens + case gpt432k0613 = "gpt-4-32k-0613" + + /// A faster version of GPT-3.5 with the same capabilities. Will be updated with our latest model iteration. - 4,096 tokens + case gpt35Turbo = "gpt-3.5-turbo" + + /// Snapshot of gpt-3.5-turbo from June 13th 2023. Unlike gpt-3.5-turbo, this model will not receive updates, and will be deprecated 3 months after a new version is released. - 4,096 tokens + case gpt35Turbo0613 = "gpt-3.5-turbo-0613" + + /// A faster version of GPT-3.5 with the same capabilities and 4x the context length. Will be updated with our latest model iteration. - 16,384 tokens + case gpt35Turbo16k = "gpt-3.5-turbo-16k" + + /// Snapshot of gpt-3.5-turbo-16k from June 13th 2023. Unlike gpt-3.5-turbo-16k, this model will not receive updates, and will be deprecated 3 months after a new version is released. - 16,384 tokens + case gpt35Turbo16k0613 = "gpt-3.5-turbo-16k-0613" + } + + public enum LegacyCompletions: String, Codable { + /// Can do any language task with better quality, longer output, and consistent instruction-following than the curie, babbage, or ada models. Also supports some additional features such as inserting text. - 4,097 tokens + case textDavinci003 = "text-davinci-003" + + /// Similar capabilities to text-davinci-003 but trained with supervised fine-tuning instead of reinforcement learning - 4,097 tokens + case textDavinci002 = "text-davinci-002" + + /// Optimized for code-completion tasks - 8,001 tokens + case textDavinci001 = "text-davinci-001" + + /// Very capable, faster and lower cost than Davinci. - 2,049 tokens + case textCurie001 = "text-curie-001" + + /// Capable of straightforward tasks, very fast, and lower cost. - 2,049 tokens + case textBabbage001 = "text-babbage-001" + + /// Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost. - 2,049 tokens + case textAda001 = "text-ada-001" + + /// Most capable GPT-3 model. Can do any task the other models can do, often with higher quality. - 2,049 tokens + case davinci = "davinci" + + /// Very capable, but faster and lower cost than Davinci. - 2,049 tokens + case curie = "curie" + + /// Capable of straightforward tasks, very fast, and lower cost. - 2,049 tokens + case babbage = "babbage" + + /// Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost. - 2,049 tokens + case ada = "ada" + } + + public enum Embeddings: String, Codable { + + /// The new model, text-embedding-ada-002, replaces five separate models for text search, text similarity, and code search, and outperforms previous most capable model, Davinci, at most tasks, while being priced 99.8% lower. + case textEmbeddingAda002 = "text-embedding-ada-002" + } + + public enum FineTuningJobs: String, Codable { + + /// (experimental — eligible users will be presented with an option to request access in the fine-tuning UI) + case gpt4_0613 = "gpt-4-0613" + + /// Most capable GPT-3.5 RECOMMENDED + case gpt35Turbo1106 = "gpt-3.5-turbo-1106" + + /// use 1106 for more updated model + case gpt35Turbo0613 = "gpt-3.5-turbo-0613" + + /// Replacement for the GPT-3 ada and babbage base models. - 16,384 tokens + case babbage002 = "babbage-002" + + /// Replacement for the GPT-3 curie and davinci base models. - 16,384 tokens + case davinci002 = "davinci-002" + } + + public enum FineTunes: String, Codable { + + /// Most capable GPT-3 model. Can do any task the other models can do, often with higher quality. - 2,049 tokens + case davinci = "davinci" + + /// Very capable, but faster and lower cost than Davinci. - 2,049 tokens + case curie = "curie" + + /// Capable of straightforward tasks, very fast, and lower cost. - 2,049 tokens + case babbage = "babbage" + + /// Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost. - 2,049 tokens + case ada = "ada" + } + + public enum Moderations: String, Codable { + + /// Most capable moderation model. Accuracy will be slightly higher than the stable model. + case textModerationStable = "text-moderation-stable" + + /// Almost as capable as the latest model, but slightly older. + case textModerationLatest = "text-moderation-latest" + } +} diff --git a/Sources/OpenAISwift/Models/OpenAIModelType.swift b/Sources/OpenAISwift/Models/OpenAIModelType.swift index cdedbba..18a5fca 100644 --- a/Sources/OpenAISwift/Models/OpenAIModelType.swift +++ b/Sources/OpenAISwift/Models/OpenAIModelType.swift @@ -9,6 +9,10 @@ import Foundation /// The type of model used to generate the output public enum OpenAIModelType { + + /// ``GPT4`` Family of Models + case gpt4(GPT4) + /// ``GPT3`` Family of Models case gpt3(GPT3) @@ -21,15 +25,19 @@ public enum OpenAIModelType { /// ``Chat`` Family of Models case chat(Chat) - /// ``GPT4`` Family of Models - case gpt4(GPT4) - /// ``Embedding`` Family of Models case embedding(Embedding) /// ``Moderation`` Family of Models case moderation(Moderation) + /// ``Dall-e`` Family of Models + case dalle(DALLE) + + /// ``TTS`` Family of Models + + case tts(TTS) + /// Other Custom Models case other(String) @@ -42,13 +50,83 @@ public enum OpenAIModelType { case .chat(let model): return model.rawValue case .embedding(let model): return model.rawValue case .moderation(let model): return model.rawValue + case .dalle(let model): return model.rawValue + case .tts(let model): return model.rawValue case .other(let modelName): return modelName } } + + /// Custom initializer that allows to enum to be constructed from a string value. + public init(rawValue: String) { + if let gpt4 = GPT4(rawValue: rawValue) { + self = .gpt4(gpt4) + } else if let gtp3 = GPT3(rawValue: rawValue) { + self = .gpt3(gtp3) + } else if let codex = Codex(rawValue: rawValue) { + self = .codex(codex) + } else if let feature = Feature(rawValue: rawValue) { + self = .feature(feature) + } else if let chat = Chat(rawValue: rawValue) { + self = .chat(chat) + } else if let embedding = Embedding(rawValue: rawValue) { + self = .embedding(embedding) + } else if let moderation = Moderation(rawValue: rawValue) { + self = .moderation(moderation) + } else if let dalle = DALLE(rawValue: rawValue) { + self = .dalle(dalle) + } else if let tts = TTS(rawValue: rawValue) { + self = .tts(tts) + } else { + self = .other(rawValue) + } + } + /// A set of models for the new GPT4 completions + /// Please note that you need to request access first - waitlist: https://openai.com/waitlist/gpt-4-api + /// You can read the [API Docs](https://platform.openai.com/docs/api-reference/chat/create) + public enum GPT4: String { + + case gpt4_0125_preview = "gpt-4-0125-preview" + case gpt4_turbo_preview = "gpt-4-turbo-preview" + + /// The latest GPT-4 model with improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more. Returns a maximum of 4,096 output tokens. This preview model is not yet suited for production traffic + case gpt4_1106_preview = "gpt-4-1106-preview" + + /// Ability to understand images, in addition to all other GPT-4 Turbo capabilties. Returns a maximum of 4,096 output tokens. This is a preview model version and not suited yet for production traffic. + + case gpt4_vision_preview = "gpt-4-vision-preview" + + /// More capable than any GPT-3.5 model, able to do more complex tasks, and optimized for chat. Will be updated with our latest model iteration. + /// > Model Name: gpt-4 + /// + case gpt4 = "gpt-4" + case gpt4_0613 = "gpt-4-0613" + + + /// Same capabilities as the base gpt-4 mode but with 4x the context length. Will be updated with our latest model iteration. + /// > Model Name: gpt-4-32k + case gpt4_32k = "gpt-4-32k" + + case gpt4_32k_0613 = "gpt-4-32k-0613" + + /// Snapshot of gpt-4 from March 14th 2023. Unlike gpt-4, this model will not receive updates, and will be deprecated 3 months after a new version is released. + /// > Model Name: gpt-4-0314 + + @available(*, deprecated, message: "Model: gpt-4-0314 will be DEPRECATED on 13 June 2024") + case gpt4_0314 = "gpt-4-0314" + + /// Snapshot of gpt-4-32 from March 14th 2023. Unlike gpt-4-32k, this model will not receive updates, and will be deprecated 3 months after a new version is released. + /// > Model Name: gpt-4-32k + @available(*, deprecated, message: "Model: gpt-4-32k-0314 will be DEPRECATED on 13 June 2024") + case gpt4_32k_0314 = "gpt-4-32k-0314" + } + + /// A set of models that can understand and generate natural language /// /// [GPT-3 Models OpenAI API Docs](https://beta.openai.com/docs/models/gpt-3) + /// + /// public enum GPT3: String { /// Most capable GPT-3 model. Can do any task the other models can do, often with higher quality, longer output and better instruction-following. Also supports inserting completions within text. @@ -105,6 +183,13 @@ public enum OpenAIModelType { /// You can read the [API Docs](https://platform.openai.com/docs/api-reference/chat/create) public enum Chat: String { + case chatgptturbo0125 = "gpt-3.5-turbo-0125" + + + ///The latest GPT-3.5 Turbo model with improved instruction following, JSON mode, reproducible outputs, parallel function calling, and more. Returns a maximum of 4,096 output tokens. + + case chatgptturbo1106 = "gpt-3.5-turbo-1106" + /// Most capable GPT-3.5 model and optimized for chat at 1/10th the cost of text-davinci-003. Will be updated with our latest model iteration. /// > Model Name: gpt-3.5-turbo case chatgpt = "gpt-3.5-turbo" @@ -114,27 +199,6 @@ public enum OpenAIModelType { case chatgpt0301 = "gpt-3.5-turbo-0301" } - /// A set of models for the new GPT4 completions - /// Please note that you need to request access first - waitlist: https://openai.com/waitlist/gpt-4-api - /// You can read the [API Docs](https://platform.openai.com/docs/api-reference/chat/create) - public enum GPT4: String { - - /// More capable than any GPT-3.5 model, able to do more complex tasks, and optimized for chat. Will be updated with our latest model iteration. - /// > Model Name: gpt-4 - case gpt4 = "gpt-4" - - /// Snapshot of gpt-4 from March 14th 2023. Unlike gpt-4, this model will not receive updates, and will be deprecated 3 months after a new version is released. - /// > Model Name: gpt-4-0314 - case gpt4_0314 = "gpt-4-0314" - - /// Same capabilities as the base gpt-4 mode but with 4x the context length. Will be updated with our latest model iteration. - /// > Model Name: gpt-4-32k - case gpt4_32k = "gpt-4-32k" - - /// Snapshot of gpt-4-32 from March 14th 2023. Unlike gpt-4-32k, this model will not receive updates, and will be deprecated 3 months after a new version is released. - /// > Model Name: gpt-4-32k - case gpt4_32k_0314 = "gpt-4-32k-0314" - } /// A set of models for the embedding @@ -145,11 +209,16 @@ public enum OpenAIModelType { /// /// > Model Name: text-embedding-ada-002 case ada = "text-embedding-ada-002" + case large_3 = "text-embedding-3-large" + case small_3 = "text-embedding-3-small" } /// A set of models for the moderations endpoint /// You can read the [API Docs](https://platform.openai.com/docs/api-reference/moderations) public enum Moderation: String { + + case mod_007 = "text-moderation-007" + /// Default. Automatically upgraded over time. case latest = "text-moderation-latest" @@ -157,4 +226,20 @@ public enum OpenAIModelType { /// Accuracy may be slightly lower than for text-moderation-latest. case stable = "text-moderation-stable" } + + public enum DALLE: String { + /// The latest DALL·E model released in Nov 2023. + case dalle_3 = "dall-e-3" + + /// The previous DALL·E model released in Nov 2022. The 2nd iteration of DALL·E with more realistic, accurate, and 4x greater resolution images than the original model + case dalle_2 = "dall-e-2" + } + + public enum TTS: String { + + ///The latest text to speech model, optimized for speed + case tts_1 = "tts-1" + ///The latest text to speech model, optimized for quality + case tts_1_hd = "tts-1-hd" + } } diff --git a/Sources/OpenAISwift/Models/RunObject.swift b/Sources/OpenAISwift/Models/RunObject.swift new file mode 100644 index 0000000..6f094c2 --- /dev/null +++ b/Sources/OpenAISwift/Models/RunObject.swift @@ -0,0 +1,134 @@ +// +// RunObject.swift +// +// +// Created by Mark Hoath on 16/11/2023. +// + +import Foundation + +typealias RunListRequest = MessageListRequest + +public enum RunObjectStatus: String, Codable { + case queued, in_progress, requires_action, cancelling, cancelled, failed, completed, expired +} + +public struct ToolCallsFunction: Codable { + public let name: String + public let arguements: String +} + +public struct ToolsOutput: Codable { + public let tool_call_id: String + public let output: String +} + +public struct ToolCalls: Codable { + public let id: String + public let type: String // always = "function" + public let function: ToolCallsFunction +} + +public struct SubmitToolOutputs: Codable { + public let tool_calls: [ToolCalls] +} + +public struct RequiredAction: Codable { + public let type: String // always = "submit_tool_outputs" + public let submit_tool_outputs: SubmitToolOutputs +} + +public struct LastError: Codable { + public let code: String + public let message: String +} + +public struct RunObject: Codable { + public let id: String + public let object: String // always = "thread.run" + public let created_at: Int + public let thread_id: String + public let assistant_id: String + public let status: RunObjectStatus + public let required_action: RequiredAction + public let last_error: LastError? + public let expires_at: Int + public let stared_at: Int? + public let cancelled_at: Int? + public let failed_at: Int? + public let compelted_at: Int? + public let model: String + public let instructions: String + public let metadata: [String:String] // 16 key value pairs. key Max 64 chars, value Max 512 chars. +} + +public struct RunRequest: Codable { + public let assistant_id: String + public let model: String? + public let instructions: String? + public let tools: [Tools]? + public let metadata: [String:String]? +} + +public struct ThreadRun: Codable { + public let messages: [Message]? + public let metadate: [String:String]? +} + +public struct ThreadRunRequest: Codable { + public let assistant_id: String + public let thread: ThreadRun? + public let model: String? + public let instructions: String? + public let tools: [Tools]? + public let metatdata: [String:String]? +} + +public struct MessageID: Codable { + public let message_id: String +} + +public struct MessageCreation: Codable { + public let type: String // always = "message_creation" + public let message_creation: MessageID +} + +public struct MessageToolCallsTypes: Codable { + public let code_interpretor: CodeInterpretorTool? + public let retrieval_tool: RetrievalTool? + public let function_tool: FunctionTool? +} + +public struct MessageToolCalls: Codable { + public let type: String // always = "tool_calls" + public let tool_calls: MessageToolCallsTypes +} + +public struct StepDetails: Codable { + public let message_creation: MessageCreation + public let tool_calls: MessageToolCalls +} + +public struct RunStepError: Codable { + public let code: String + public let message: String +} + +public struct RunStep: Codable { + public let id: String + public let object: String + public let created_at: Int + public let assistant_id: String + public let thread_id: String + public let run_id: String + public let type: String // either "message_creation" or "tool_calls" + public let status: String // either "in_progress", "cancelled", "failed", "completed" or "expired" + public let step_details: StepDetails + public let last_error: RunStepError? + public let expired_at: Int? + public let cancelled_at: Int? + public let failed_at: Int? + public let completed_at: Int? + public let metadata: [String:String] + +} diff --git a/Sources/OpenAISwift/Models/ThreadObject.swift b/Sources/OpenAISwift/Models/ThreadObject.swift new file mode 100644 index 0000000..d91098c --- /dev/null +++ b/Sources/OpenAISwift/Models/ThreadObject.swift @@ -0,0 +1,15 @@ +// +// File.swift +// +// +// Created by Mark Hoath on 16/11/2023. +// + +import Foundation + +public struct ThreadObject: Codable { + public let id: String + public let object:String // always = "thread" + public let created_at: Int + public let metadata: [String: String] // Max 16 array of Key/value value max is 512 bytes +} diff --git a/Sources/OpenAISwift/OpenAIEndpoint.swift b/Sources/OpenAISwift/OpenAIEndpoint.swift index eef1da7..7551aa4 100644 --- a/Sources/OpenAISwift/OpenAIEndpoint.swift +++ b/Sources/OpenAISwift/OpenAIEndpoint.swift @@ -3,15 +3,60 @@ // import Foundation +#if canImport(FoundationNetworking) && canImport(FoundationXML) +import FoundationNetworking +import FoundationXML +#endif public struct OpenAIEndpointProvider { public enum API { + case assistant_create + case assistant_retrieve + case assistant_modify + case assistant_delete + case assistant_list + case audio_speech + case audio_transcription + case audio_translation + case chat case completions case edits - case chat - case images case embeddings + case files_upload + case files_list + case files_delete + case files_retrieve + case fine_tuning_create + case fine_tuning_list + case fine_tuning_list_events + case fine_tuning_cancel + case fine_tuning_retrieve + case images + case image_edits + case image_variations + case models_list + case models_retrieve + case models_delete case moderations + case thread_create + case thread_retrieve + case thread_modify + case thread_delete + case messages_create + case messages_retrieve + case messages_list + case messages_modify + case retrieve_message_file + case list_message_file + case runs_create + case runs_retrieve + case runs_modify + case runs_list + case runs_submit + case runs_cancel + case runs_thread_create + case run_step_retrieve + case run_step_list } public enum Source { @@ -29,18 +74,41 @@ public struct OpenAIEndpointProvider { switch source { case .openAI: switch api { - case .completions: - return "/v1/completions" - case .edits: - return "/v1/edits" - case .chat: - return "/v1/chat/completions" - case .images: - return "/v1/images/generations" - case .embeddings: - return "/v1/embeddings" - case .moderations: - return "/v1/moderations" + case .assistant_create, .assistant_retrieve, .assistant_modify, .assistant_delete, .assistant_list: + return "/v1/assistants" + case .audio_transcription: + return "v1/audio/transcriptions" + case .audio_translation: + return "v1/audio/translations" + case .audio_speech: + return "v1/audio/speech" + case .chat: + return "/v1/chat/completions" + case .completions: + return "/v1/completions" // LEGACY + case .edits: + return "/v1/edits" + case .embeddings: + return "/v1/embeddings" + case .files_list, .files_delete, .files_upload, .files_retrieve: + return "v1/files" + case .fine_tuning_create, .fine_tuning_list, .fine_tuning_cancel, .fine_tuning_retrieve, .fine_tuning_list_events: + return "v1/fine_tuning/jobs" + case .images: + return "/v1/images/generations" + case .image_edits: + return "/v1/images/edits" + case .image_variations: + return "/v1/images/variations" + case .models_list, .models_delete, .models_retrieve: + return "v1/models" + case .moderations: + return "/v1/moderations" + case .thread_create, .thread_delete, .thread_modify, .thread_retrieve, .messages_list, .messages_create, .messages_modify, .messages_retrieve, .retrieve_message_file, .list_message_file, + .runs_create, .runs_retrieve, .runs_modify, .runs_list, .runs_submit, .runs_cancel, .run_step_retrieve, .run_step_list: + return "v1/threads" + case .runs_thread_create: + return "v1/threads/runs" } case let .proxy(path: pathClosure, method: _): return pathClosure(api) @@ -51,7 +119,12 @@ public struct OpenAIEndpointProvider { switch source { case .openAI: switch api { - case .completions, .edits, .chat, .images, .embeddings, .moderations: + case .assistant_delete, .files_delete, .models_delete, .thread_delete: + return "DELETE" + case .assistant_retrieve, .assistant_list, .files_retrieve, .files_list, .fine_tuning_list, .fine_tuning_list_events, .fine_tuning_retrieve, .models_list, .models_retrieve, .thread_modify, .thread_retrieve, .messages_retrieve, .messages_list, .runs_retrieve, .runs_list, .run_step_list, .run_step_retrieve, .retrieve_message_file, .list_message_file: + return "GET" + + case .assistant_create, .assistant_modify, .audio_speech, .audio_translation, .audio_transcription, .completions, .edits, .chat, .images, .embeddings, .files_upload, .fine_tuning_create, .fine_tuning_cancel, .moderations, .image_edits, .image_variations, .thread_create, .messages_create, .messages_modify, .runs_create, .runs_modify, .runs_submit, .runs_cancel, .runs_thread_create: return "POST" } case let .proxy(path: _, method: methodClosure): diff --git a/Sources/OpenAISwift/OpenAISwift.swift b/Sources/OpenAISwift/OpenAISwift.swift index 67090ac..2a94470 100644 --- a/Sources/OpenAISwift/OpenAISwift.swift +++ b/Sources/OpenAISwift/OpenAISwift.swift @@ -5,6 +5,7 @@ import FoundationXML #endif public enum OpenAIError: Error { + case networkError(code: Int) case genericError(error: Error) case decodingError(error: Error) case chatError(error: ChatError.Payload) @@ -12,7 +13,7 @@ public enum OpenAIError: Error { public class OpenAISwift { fileprivate let config: Config - fileprivate let handler = ServerSentEventsHandler() + let handler = ServerSentEventsHandler() /// Configuration object for the client public struct Config { @@ -40,271 +41,47 @@ public class OpenAISwift { } } + @available(*, deprecated, message: "Use init(config:) instead") + public convenience init(authToken: String) { + self.init(config: .makeDefaultOpenAI(apiKey: authToken)) + } + public init(config: Config) { self.config = config } } extension OpenAISwift { - /// Send a Completion to the OpenAI API - /// - Parameters: - /// - prompt: The Text Prompt - /// - model: The AI Model to Use. Set to `OpenAIModelType.gpt3(.davinci)` by default which is the most capable model - /// - maxTokens: The limit character for the returned response, defaults to 16 as per the API - /// - completionHandler: Returns an OpenAI Data Model - public func sendCompletion(with prompt: String, model: OpenAIModelType = .gpt3(.davinci), maxTokens: Int = 16, temperature: Double = 1, completionHandler: @escaping (Result, OpenAIError>) -> Void) { - let endpoint = OpenAIEndpointProvider.API.completions - let body = Command(prompt: prompt, model: model.modelName, maxTokens: maxTokens, temperature: temperature) - let request = prepareRequest(endpoint, body: body) - - makeRequest(request: request) { result in - switch result { - case .success(let success): - do { - let res = try JSONDecoder().decode(OpenAI.self, from: success) - completionHandler(.success(res)) - } catch { - completionHandler(.failure(.decodingError(error: error))) - } - case .failure(let failure): - completionHandler(.failure(.genericError(error: failure))) - } - } - } - - /// Send a Edit request to the OpenAI API - /// - Parameters: - /// - instruction: The Instruction For Example: "Fix the spelling mistake" - /// - model: The Model to use, the only support model is `text-davinci-edit-001` - /// - input: The Input For Example "My nam is Adam" - /// - completionHandler: Returns an OpenAI Data Model - public func sendEdits(with instruction: String, model: OpenAIModelType = .feature(.davinci), input: String = "", completionHandler: @escaping (Result, OpenAIError>) -> Void) { - let endpoint = OpenAIEndpointProvider.API.edits - let body = Instruction(instruction: instruction, model: model.modelName, input: input) - let request = prepareRequest(endpoint, body: body) - - makeRequest(request: request) { result in - switch result { - case .success(let success): - do { - let res = try JSONDecoder().decode(OpenAI.self, from: success) - completionHandler(.success(res)) - } catch { - completionHandler(.failure(.decodingError(error: error))) - } - case .failure(let failure): - completionHandler(.failure(.genericError(error: failure))) - } - } - } - - /// Send a Moderation request to the OpenAI API - /// - Parameters: - /// - input: The Input For Example "My nam is Adam" - /// - model: The Model to use - /// - completionHandler: Returns an OpenAI Data Model - public func sendModerations(with input: String, model: OpenAIModelType = .moderation(.latest), completionHandler: @escaping (Result, OpenAIError>) -> Void) { - let endpoint = OpenAIEndpointProvider.API.moderations - let body = Moderation(input: input, model: model.modelName) - let request = prepareRequest(endpoint, body: body) - - makeRequest(request: request) { result in - switch result { - case .success(let success): - do { - let res = try JSONDecoder().decode(OpenAI.self, from: success) - completionHandler(.success(res)) - } catch { - completionHandler(.failure(.decodingError(error: error))) - } - case .failure(let failure): - completionHandler(.failure(.genericError(error: failure))) - } - } - } - - /// Send a Chat request to the OpenAI API - /// - Parameters: - /// - messages: Array of `ChatMessages` - /// - model: The Model to use, the only support model is `gpt-3.5-turbo` - /// - user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - /// - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or topProbabilityMass but not both. - /// - topProbabilityMass: The OpenAI api equivalent of the "top_p" parameter. An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. - /// - choices: How many chat completion choices to generate for each input message. - /// - stop: Up to 4 sequences where the API will stop generating further tokens. - /// - maxTokens: The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens). - /// - presencePenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - /// - frequencyPenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - /// - logitBias: Modify the likelihood of specified tokens appearing in the completion. Maps tokens (specified by their token ID in the OpenAI Tokenizer—not English words) to an associated bias value from -100 to 100. Values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - /// - completionHandler: Returns an OpenAI Data Model - public func sendChat(with messages: [ChatMessage], - model: OpenAIModelType = .chat(.chatgpt), - user: String? = nil, - temperature: Double? = 1, - topProbabilityMass: Double? = 0, - choices: Int? = 1, - stop: [String]? = nil, - maxTokens: Int? = nil, - presencePenalty: Double? = 0, - frequencyPenalty: Double? = 0, - logitBias: [Int: Double]? = nil, - completionHandler: @escaping (Result, OpenAIError>) -> Void) { - let endpoint = OpenAIEndpointProvider.API.chat - let body = ChatConversation(user: user, - messages: messages, - model: model.modelName, - temperature: temperature, - topProbabilityMass: topProbabilityMass, - choices: choices, - stop: stop, - maxTokens: maxTokens, - presencePenalty: presencePenalty, - frequencyPenalty: frequencyPenalty, - logitBias: logitBias, - stream: false) - - let request = prepareRequest(endpoint, body: body) - - makeRequest(request: request) { result in - switch result { - case .success(let success): - if let chatErr = try? JSONDecoder().decode(ChatError.self, from: success) as ChatError { - completionHandler(.failure(.chatError(error: chatErr.error))) - return - } - - do { - let res = try JSONDecoder().decode(OpenAI.self, from: success) - completionHandler(.success(res)) - } catch { - completionHandler(.failure(.decodingError(error: error))) - } - - case .failure(let failure): - completionHandler(.failure(.genericError(error: failure))) - } - } - } - - /// Send a Embeddings request to the OpenAI API - /// - Parameters: - /// - input: The Input For Example "The food was delicious and the waiter..." - /// - model: The Model to use, the only support model is `text-embedding-ada-002` - /// - completionHandler: Returns an OpenAI Data Model - public func sendEmbeddings(with input: String, - model: OpenAIModelType = .embedding(.ada), - completionHandler: @escaping (Result, OpenAIError>) -> Void) { - let endpoint = OpenAIEndpointProvider.API.embeddings - let body = EmbeddingsInput(input: input, - model: model.modelName) - let request = prepareRequest(endpoint, body: body) - makeRequest(request: request) { result in - switch result { - case .success(let success): - do { - let res = try JSONDecoder().decode(OpenAI.self, from: success) - completionHandler(.success(res)) - } catch { - completionHandler(.failure(.decodingError(error: error))) - } - case .failure(let failure): - completionHandler(.failure(.genericError(error: failure))) - } - } - } - - /// Send a Chat request to the OpenAI API with stream enabled - /// - Parameters: - /// - messages: Array of `ChatMessages` - /// - model: The Model to use, the only support model is `gpt-3.5-turbo` - /// - user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - /// - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or topProbabilityMass but not both. - /// - topProbabilityMass: The OpenAI api equivalent of the "top_p" parameter. An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. - /// - choices: How many chat completion choices to generate for each input message. - /// - stop: Up to 4 sequences where the API will stop generating further tokens. - /// - maxTokens: The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens). - /// - presencePenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - /// - frequencyPenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - /// - logitBias: Modify the likelihood of specified tokens appearing in the completion. Maps tokens (specified by their token ID in the OpenAI Tokenizer—not English words) to an associated bias value from -100 to 100. Values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - /// - onEventReceived: Called Multiple times, returns an OpenAI Data Model - /// - onComplete: Triggers when sever complete sending the message - public func sendStreamingChat(with messages: [ChatMessage], - model: OpenAIModelType = .chat(.chatgpt), - user: String? = nil, - temperature: Double? = 1, - topProbabilityMass: Double? = 0, - choices: Int? = 1, - stop: [String]? = nil, - maxTokens: Int? = nil, - presencePenalty: Double? = 0, - frequencyPenalty: Double? = 0, - logitBias: [Int: Double]? = nil, - onEventReceived: ((Result, OpenAIError>) -> Void)? = nil, - onComplete: (() -> Void)? = nil) { - let endpoint = OpenAIEndpointProvider.API.chat - let body = ChatConversation(user: user, - messages: messages, - model: model.modelName, - temperature: temperature, - topProbabilityMass: topProbabilityMass, - choices: choices, - stop: stop, - maxTokens: maxTokens, - presencePenalty: presencePenalty, - frequencyPenalty: frequencyPenalty, - logitBias: logitBias, - stream: true) - let request = prepareRequest(endpoint, body: body) - handler.onEventReceived = onEventReceived - handler.onComplete = onComplete - handler.connect(with: request) - } - - - /// Send a Image generation request to the OpenAI API - /// - Parameters: - /// - prompt: The Text Prompt - /// - numImages: The number of images to generate, defaults to 1 - /// - size: The size of the image, defaults to 1024x1024. There are two other options: 512x512 and 256x256 - /// - user: An optional unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - /// - completionHandler: Returns an OpenAI Data Model - public func sendImages(with prompt: String, numImages: Int = 1, size: ImageSize = .size1024, user: String? = nil, completionHandler: @escaping (Result, OpenAIError>) -> Void) { - let endpoint = OpenAIEndpointProvider.API.images - let body = ImageGeneration(prompt: prompt, n: numImages, size: size, user: user) - let request = prepareRequest(endpoint, body: body) - - makeRequest(request: request) { result in - switch result { - case .success(let success): - do { - let res = try JSONDecoder().decode(OpenAI.self, from: success) - completionHandler(.success(res)) - } catch { - completionHandler(.failure(.decodingError(error: error))) - } - case .failure(let failure): - completionHandler(.failure(.genericError(error: failure))) - } - } - } - - private func makeRequest(request: URLRequest, completionHandler: @escaping (Result) -> Void) { + func makeRequest(request: URLRequest, completionHandler: @escaping (Result) -> Void) { let session = config.session let task = session.dataTask(with: request) { (data, response, error) in if let error = error { completionHandler(.failure(error)) + } else if let response = response as? HTTPURLResponse, !(200...299).contains(response.statusCode) { + completionHandler(.failure(OpenAIError.networkError(code: response.statusCode))) } else if let data = data { completionHandler(.success(data)) + } else { + let error = NSError(domain: "OpenAI", code: 6666, userInfo: [NSLocalizedDescriptionKey: "Unknown error"]) + completionHandler(.failure(OpenAIError.genericError(error: error))) } } - task.resume() } - private func prepareRequest(_ endpoint: OpenAIEndpointProvider.API, body: BodyType) -> URLRequest { + func prepareRequest( + _ endpoint: OpenAIEndpointProvider.API, + queryItems: [URLQueryItem]? = nil + ) -> URLRequest { + return prepareRequest(endpoint, body: Optional.none, queryItems: queryItems) + } + + func prepareRequest(_ endpoint: OpenAIEndpointProvider.API, body: BodyType?, queryItems: [URLQueryItem]?) -> URLRequest { + var urlComponents = URLComponents(url: URL(string: config.baseURL)!, resolvingAgainstBaseURL: true) urlComponents?.path = config.endpointProvider.getPath(api: endpoint) + urlComponents?.queryItems = queryItems var request = URLRequest(url: urlComponents!.url!) request.httpMethod = config.endpointProvider.getMethod(api: endpoint) @@ -312,190 +89,103 @@ extension OpenAISwift { request.setValue("application/json", forHTTPHeaderField: "content-type") - let encoder = JSONEncoder() - if let encoded = try? encoder.encode(body) { - request.httpBody = encoded + if let body = body { + let encoder = JSONEncoder() + if let encoded = try? encoder.encode(body) { + request.httpBody = encoded + } } return request } -} - -extension OpenAISwift { - /// Send a Completion to the OpenAI API - /// - Parameters: - /// - prompt: The Text Prompt - /// - model: The AI Model to Use. Set to `OpenAIModelType.gpt3(.davinci)` by default which is the most capable model - /// - maxTokens: The limit character for the returned response, defaults to 16 as per the API - /// - temperature: Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Defaults to 1 - /// - Returns: Returns an OpenAI Data Model - @available(swift 5.5) - @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) - public func sendCompletion(with prompt: String, model: OpenAIModelType = .gpt3(.davinci), maxTokens: Int = 16, temperature: Double = 1) async throws -> OpenAI { - return try await withCheckedThrowingContinuation { continuation in - sendCompletion(with: prompt, model: model, maxTokens: maxTokens, temperature: temperature) { result in - continuation.resume(with: result) - } - } - } - - /// Send a Edit request to the OpenAI API - /// - Parameters: - /// - instruction: The Instruction For Example: "Fix the spelling mistake" - /// - model: The Model to use, the only support model is `text-davinci-edit-001` - /// - input: The Input For Example "My nam is Adam" - /// - Returns: Returns an OpenAI Data Model - @available(swift 5.5) - @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) - public func sendEdits(with instruction: String, model: OpenAIModelType = .feature(.davinci), input: String = "") async throws -> OpenAI { - return try await withCheckedThrowingContinuation { continuation in - sendEdits(with: instruction, model: model, input: input) { result in - continuation.resume(with: result) - } + + func prepareMultipartFormImageDataRequest(_ endpoint: OpenAIEndpointProvider.API, imageData: Data, maskData: Data?, prompt: String, n: Int, size: String) -> URLRequest { + + var urlComponents = URLComponents(url: URL(string: config.baseURL)!, resolvingAgainstBaseURL: true) + urlComponents?.path = config.endpointProvider.getPath(api: endpoint) + var request = URLRequest(url: urlComponents!.url!) + request.httpMethod = config.endpointProvider.getMethod(api: endpoint) + + config.authorizeRequest(&request) + + let boundary = "Boundary-\(UUID().uuidString)" + request.setValue("multipart/form-data; boundary=\(boundary)", forHTTPHeaderField: "Content-Type") + + var body = Data() + + body.append("--\(boundary)\r\n".data(using: .utf8)!) + body.append("Content-Disposition: form-data; name=\"image\"; filename=\"image.png\"\r\n".data(using: .utf8)!) + body.append("Content-Type: image/png\r\n\r\n".data(using: .utf8)!) + body.append(imageData) + body.append("\r\n".data(using: .utf8)!) + + if let maskData = maskData { + body.append("--\(boundary)\r\n".data(using: .utf8)!) + body.append("Content-Disposition: form-data; name=\"mask\"; filename=\"mask.png\"\r\n".data(using: .utf8)!) + body.append("Content-Type: image/png\r\n\r\n".data(using: .utf8)!) + body.append(maskData) + body.append("\r\n".data(using: .utf8)!) } - } - - /// Send a Chat request to the OpenAI API - /// - Parameters: - /// - messages: Array of `ChatMessages` - /// - model: The Model to use, the only support model is `gpt-3.5-turbo` - /// - user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - /// - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or topProbabilityMass but not both. - /// - topProbabilityMass: The OpenAI api equivalent of the "top_p" parameter. An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. - /// - choices: How many chat completion choices to generate for each input message. - /// - stop: Up to 4 sequences where the API will stop generating further tokens. - /// - maxTokens: The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens). - /// - presencePenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - /// - frequencyPenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - /// - logitBias: Modify the likelihood of specified tokens appearing in the completion. Maps tokens (specified by their token ID in the OpenAI Tokenizer—not English words) to an associated bias value from -100 to 100. Values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - /// - completionHandler: Returns an OpenAI Data Model - @available(swift 5.5) - @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) - public func sendChat(with messages: [ChatMessage], - model: OpenAIModelType = .chat(.chatgpt), - user: String? = nil, - temperature: Double? = 1, - topProbabilityMass: Double? = 0, - choices: Int? = 1, - stop: [String]? = nil, - maxTokens: Int? = nil, - presencePenalty: Double? = 0, - frequencyPenalty: Double? = 0, - logitBias: [Int: Double]? = nil) async throws -> OpenAI { - return try await withCheckedThrowingContinuation { continuation in - sendChat(with: messages, - model: model, - user: user, - temperature: temperature, - topProbabilityMass: topProbabilityMass, - choices: choices, - stop: stop, - maxTokens: maxTokens, - presencePenalty: presencePenalty, - frequencyPenalty: frequencyPenalty, - logitBias: logitBias) { result in - switch result { - case .success: continuation.resume(with: result) - case .failure(let failure): continuation.resume(throwing: failure) - } - } + + // Add the "prompt" field. + + if !prompt.isEmpty { + body.append("--\(boundary)\r\n".data(using: .utf8)!) + body.append("Content-Disposition: form-data; name=\"prompt\"\r\n\r\n".data(using: .utf8)!) + body.append(prompt.data(using: .utf8)!) + body.append("\r\n".data(using: .utf8)!) } + + // Add the "n" field. + body.append("--\(boundary)\r\n".data(using: .utf8)!) + body.append("Content-Disposition: form-data; name=\"n\"\r\n\r\n".data(using: .utf8)!) + body.append("\(n)".data(using: .utf8)!) + body.append("\r\n".data(using: .utf8)!) + + // Add the "size" field. + body.append("--\(boundary)\r\n".data(using: .utf8)!) + body.append("Content-Disposition: form-data; name=\"size\"\r\n\r\n".data(using: .utf8)!) + body.append(size.data(using: .utf8)!) + body.append("\r\n".data(using: .utf8)!) + + body.append("--\(boundary)--\r\n".data(using: .utf8)!) + + request.httpBody = body + + return request } - - /// Send a Chat request to the OpenAI API with stream enabled - /// - Parameters: - /// - messages: Array of `ChatMessages` - /// - model: The Model to use, the only support model is `gpt-3.5-turbo` - /// - user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - /// - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or topProbabilityMass but not both. - /// - topProbabilityMass: The OpenAI api equivalent of the "top_p" parameter. An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. - /// - choices: How many chat completion choices to generate for each input message. - /// - stop: Up to 4 sequences where the API will stop generating further tokens. - /// - maxTokens: The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens). - /// - presencePenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. - /// - frequencyPenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - /// - logitBias: Modify the likelihood of specified tokens appearing in the completion. Maps tokens (specified by their token ID in the OpenAI Tokenizer—not English words) to an associated bias value from -100 to 100. Values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. - /// - Returns: Returns an OpenAI Data Model - @available(swift 5.5) - @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) - public func sendStreamingChat(with messages: [ChatMessage], - model: OpenAIModelType = .chat(.chatgpt), - user: String? = nil, - temperature: Double? = 1, - topProbabilityMass: Double? = 0, - choices: Int? = 1, - stop: [String]? = nil, - maxTokens: Int? = nil, - presencePenalty: Double? = 0, - frequencyPenalty: Double? = 0, - logitBias: [Int: Double]? = nil) -> AsyncStream, OpenAIError>> { - return AsyncStream { continuation in - sendStreamingChat( - with: messages, - model: model, - user: user, - temperature: temperature, - topProbabilityMass: topProbabilityMass, - choices: choices, - stop: stop, - maxTokens: maxTokens, - presencePenalty: presencePenalty, - frequencyPenalty: frequencyPenalty, - logitBias: logitBias, - onEventReceived: { result in - continuation.yield(result) - }) { - continuation.finish() - } - } - } + func prepareMultipartFormFileDataRequest(_ endpoint: OpenAIEndpointProvider.API, file: Data, purpose: String) -> URLRequest { + + var urlComponents = URLComponents(url: URL(string: config.baseURL)!, resolvingAgainstBaseURL: true) + urlComponents?.path = config.endpointProvider.getPath(api: endpoint) + var request = URLRequest(url: urlComponents!.url!) + request.httpMethod = config.endpointProvider.getMethod(api: endpoint) + + config.authorizeRequest(&request) + + let boundary = "Boundary-\(UUID().uuidString)" + request.setValue("multipart/form-data; boundary=\(boundary)", forHTTPHeaderField: "Content-Type") + + var body = Data() + + body.append("--\(boundary)\r\n".data(using: .utf8)!) + body.append("Content-Disposition: form-data; name=\"size\"\r\n\r\n".data(using: .utf8)!) + body.append(file) + body.append("\r\n".data(using: .utf8)!) + + // Add the "purpose" field. + body.append("--\(boundary)\r\n".data(using: .utf8)!) + body.append("Content-Disposition: form-data; name=\"size\"\r\n\r\n".data(using: .utf8)!) + body.append(purpose.data(using: .utf8)!) + body.append("\r\n".data(using: .utf8)!) - /// Send a Embeddings request to the OpenAI API - /// - Parameters: - /// - input: The Input For Example "The food was delicious and the waiter..." - /// - model: The Model to use, the only support model is `text-embedding-ada-002` - /// - completionHandler: Returns an OpenAI Data Model - @available(swift 5.5) - @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) - public func sendEmbeddings(with input: String, - model: OpenAIModelType = .embedding(.ada)) async throws -> OpenAI { - return try await withCheckedThrowingContinuation { continuation in - sendEmbeddings(with: input) { result in - continuation.resume(with: result) - } - } - } - - /// Send a Moderation request to the OpenAI API - /// - Parameters: - /// - input: The Input For Example "My nam is Adam" - /// - model: The Model to use - /// - Returns: Returns an OpenAI Data Model - @available(swift 5.5) - @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) - public func sendModerations(with input: String = "", model: OpenAIModelType = .moderation(.latest)) async throws -> OpenAI { - return try await withCheckedThrowingContinuation { continuation in - sendModerations(with: input, model: model) { result in - continuation.resume(with: result) - } - } - } - - /// Send a Image generation request to the OpenAI API - /// - Parameters: - /// - prompt: The Text Prompt - /// - numImages: The number of images to generate, defaults to 1 - /// - size: The size of the image, defaults to 1024x1024. There are two other options: 512x512 and 256x256 - /// - user: An optional unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. - /// - Returns: Returns an OpenAI Data Model - @available(swift 5.5) - @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) - public func sendImages(with prompt: String, numImages: Int = 1, size: ImageSize = .size1024, user: String? = nil) async throws -> OpenAI { - return try await withCheckedThrowingContinuation { continuation in - sendImages(with: prompt, numImages: numImages, size: size, user: user) { result in - continuation.resume(with: result) - } - } + + body.append("--\(boundary)--\r\n".data(using: .utf8)!) + + request.httpBody = body + + return request } + } diff --git a/Sources/OpenAISwift/OpenAISwift/Assistants.swift b/Sources/OpenAISwift/OpenAISwift/Assistants.swift new file mode 100644 index 0000000..332264b --- /dev/null +++ b/Sources/OpenAISwift/OpenAISwift/Assistants.swift @@ -0,0 +1,190 @@ +// +// Assistants.swift +// +// +// Created by Mark Hoath on 14/11/2023. +// + +import Foundation + +extension OpenAISwift { + + /// createAssistant request to the OpenAI API + /// - Parameters: + /// - model: ID of the model to use. You can use the List models API to see all of your available models, or see our Model overview for descriptions of them. + /// - name: The name of the assistant. The maximum length is 256 characters. + /// - description: The description of the assistant. The maximum length is 512 characters. + /// - instructions: The system instructions that the assistant uses. The maximum length is 32768 characters. + /// - tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types code_interpreter, retrieval, or function. + /// - file_ids: A list of file IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. + /// - metadata:Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// + /// - completionHandler: Returns an Assistant Object + + public func createAssistant(model: String, name: String?, description: String?, instructions: String?, tools: [Tools]?, file_ids: [String]?, metadata: [String:String]?, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.assistant_create + let body = AssistantBody(model: model, name: name, description: description, instructions: instructions, tools: tools, file_ids: file_ids, metadata: metadata) + let request = prepareRequest(endpoint, body: body, queryItems: nil) + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// retrieveAssistant request to the OpenAI API + /// - Parameters: + /// - assistant_id: The ID of the assistant to retrieve + /// + /// - completionHandler: Returns an Assistant Object + + public func retrieveAssistant(assistant_id: String, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.assistant_retrieve + var request = prepareRequest(endpoint, queryItems: nil) + + request.url?.appendPathComponent("/\(assistant_id)") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// modifyAssistant request to the OpenAI API + /// - Parameters: + /// - assistant_id: The ID of the assistant to modify + /// + /// - model: ID of the model to use. You can use the List models API to see all of your available models, or see our Model overview for descriptions of them. + /// - name: The name of the assistant. The maximum length is 256 characters. + /// - description: The description of the assistant. The maximum length is 512 characters. + /// - instructions: The system instructions that the assistant uses. The maximum length is 32768 characters. + /// - tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types code_interpreter, retrieval, or function. + /// - file_ids: A list of file IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. + /// - metadata:Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// + /// - completionHandler: Returns an Assistant Object + + public func modifyAssistant(assistant_id: String, model: String, name: String?, description: String?, instructions: String?, tools: [Tools]?, file_ids: [String]?, metadata: [String:String]?, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.assistant_modify + let body = AssistantBody(model: model, name: name, description: description, instructions: instructions, tools: tools, file_ids: file_ids, metadata: metadata) + var request = prepareRequest(endpoint, body: body, queryItems: nil) + request.url?.appendPathComponent("/\(assistant_id)") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// deleteAssistant request to the OpenAI API + /// - Parameters: + /// - assistant_id: The ID of the assistant to modify + /// - completionHandler: Returns the Deletion Status + + public func deleteAssistant(assistant_id: String , completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.assistant_delete + + var request = prepareRequest(endpoint, queryItems: nil) + request.url?.appendPathComponent("/\(assistant_id)") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + /// listAssistants request to the OpenAI API + /// - Parameters: + /// - limit: Optional. A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. + /// - order: Optional.Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order. Default is DESC + /// - after: Optional. A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, + /// ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. + /// - before: Optional. A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, + /// ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. + /// + /// - completionHandler: Returns a List of Assistant Objects + + public func listAssistants(limit: Int?, order: String?, after: String?, before: String? , completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.assistant_retrieve + + let laqp = ListAssistantParams(limit: limit, order: order, after: after, before: before) + + var queryItems: [URLQueryItem] = [] + + if let parameters = laqp.toDictionary() { + queryItems = parameters.compactMap{ key, value in + if let stringValue = value as? String { + return URLQueryItem(name: key, value: stringValue) + } else if let intValue = value as? Int { + return URLQueryItem(name: key, value: String(intValue)) + } + // Add more cases here for other types if needed + return nil + } + } + + let request = prepareRequest(endpoint, queryItems: queryItems) + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + + +} diff --git a/Sources/OpenAISwift/OpenAISwift/Audio.swift b/Sources/OpenAISwift/OpenAISwift/Audio.swift new file mode 100644 index 0000000..b8646fd --- /dev/null +++ b/Sources/OpenAISwift/OpenAISwift/Audio.swift @@ -0,0 +1,115 @@ +// +// Audio.swift +// +// +// Created by Mark Hoath on 14/11/2023. +// + +import Foundation + +// Completely Untested !!!! +// Needs to have ASYNC version for Audio Play in Real Time. + +extension OpenAISwift { + + /// Create Speech request to the OpenAI API + /// - Parameters: + /// - model: One of the available TTS models: tts-1 or tts-1-hd + /// - input: The text to generate audio for. The maximum length is 4096 characters. + /// - voice: The voice to use when generating the audio. Supported voices are alloy, echo, fable, onyx, nova, and shimmer + /// - response_format: TThe format to audio in. Supported formats are mp3, opus, aac, and flac. + /// - speed: The speed of the generated audio. Select a value from 0.25 to 4.0. 1.0 is the default. + + /// - completionHandler: Returns audio file content. + + // FIX needs to be streaming. + + public func createSpeech(model: OpenAIModelType.TTS, input: String, voice: Voice, response_format: AudioResponseFormat? = .mp3, speed: Double?=1.0, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.audio_speech + let body = Audio(model: model.rawValue, input: input, voice: voice, response_format: response_format, speed: speed) + let request = prepareRequest(endpoint, body: body, queryItems: nil) + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// Create Transcription request to the OpenAI API + /// - Parameters: + /// - file: URL for an Audio file. The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + /// - model: The Model to use. Only whisper-1 is currently available. + /// - language: The language of the input audio. Supplying the input language in ISO-639-1 format will improve accuracy and latency. + /// - prompt: An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language. + /// - response_format: The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. + /// - temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit. + /// - completionHandler: Returns the transcribed text {"text":"Hello There"} + + public func createTranscription(file: URL, model: OpenAIEndpointModelType.AudioTranscriptions, language: String?, prompt: String?, response_format: TranscriptionResponseFormat? = .json, temperature: Double?=0.0, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.audio_transcription + let body = Transcription(file: file.absoluteString, model: model.rawValue, language: language, prompt: prompt, response_format: response_format, temperature: temperature) + let request = prepareRequest(endpoint, body: body, queryItems: nil) + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// Create Translation request to the OpenAI API - Translates into English + /// - Parameters: + /// - file: URL for an Audio file. The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + /// - model: The Model to use. Only whisper-1 is currently available. + /// - prompt: An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language. + /// - response_format: The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. + /// - temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit. + + /// - completionHandler: Returns the translated text {"text":"Hello There"} + + + public func createTranslation(file: URL, model: OpenAIEndpointModelType.AudioTranslations, prompt: String?, response_format: TranscriptionResponseFormat? = .json, temperature: Double?=0.0, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.audio_translation + let body = Translation(file: file.absoluteString, model: model.rawValue, prompt: prompt, response_format: response_format, temperature: temperature) + let request = prepareRequest(endpoint, body: body, queryItems: nil) + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + +} diff --git a/Sources/OpenAISwift/OpenAISwift/Chats.swift b/Sources/OpenAISwift/OpenAISwift/Chats.swift new file mode 100644 index 0000000..c35c92e --- /dev/null +++ b/Sources/OpenAISwift/OpenAISwift/Chats.swift @@ -0,0 +1,354 @@ +// +// Chats.swift +// +// +// Created by Mark Hoath on 10/11/2023. +// + +import Foundation + +extension OpenAISwift { + + /// Send a Chat request to the OpenAI API + /// - Parameters: + /// - messages: Array of `ChatMessages` + /// - model: The Model to use. + /// - user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + /// - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or topProbabilityMass but not both. + /// - topProbabilityMass: The OpenAI api equivalent of the "top_p" parameter. An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. + /// - choices: How many chat completion choices to generate for each input message. + /// - stop: Up to 4 sequences where the API will stop generating further tokens. + /// - maxTokens: The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens). + /// - presencePenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// - frequencyPenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// - logitBias: Modify the likelihood of specified tokens appearing in the completion. Maps tokens (specified by their token ID in the OpenAI Tokenizer—not English words) to an associated bias value from -100 to 100. Values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// - completionHandler: Returns an OpenAI Data Model + public func sendChat(with messages: [ChatMessage], + model: OpenAIEndpointModelType.ChatCompletions = .gpt35Turbo, + user: String? = nil, + temperature: Double? = 1, + topProbabilityMass: Double? = 0, + choices: Int? = 1, + stop: [String]? = nil, + maxTokens: Int? = nil, + presencePenalty: Double? = 0, + frequencyPenalty: Double? = 0, + logitBias: [Int: Double]? = nil, + completionHandler: @escaping (Result, OpenAIError>) -> Void) { + let endpoint = OpenAIEndpointProvider.API.chat + let body = ChatConversation(user: user, + messages: messages, + model: model.rawValue, + temperature: temperature, + topProbabilityMass: topProbabilityMass, + choices: choices, + stop: stop, + maxTokens: maxTokens, + presencePenalty: presencePenalty, + frequencyPenalty: frequencyPenalty, + logitBias: logitBias, + stream: false) + + let request = prepareRequest(endpoint, body: body, queryItems: nil) + + makeRequest(request: request) { result in + switch result { + case .success(let success): + if let chatErr = try? JSONDecoder().decode(ChatError.self, from: success) as ChatError { + completionHandler(.failure(.chatError(error: chatErr.error))) + return + } + + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// Send a Chat request to the OpenAI API + @available(*, deprecated, message: "Use method with `OpenAIEndpointModelType.ChatCompletions` instead") + public func sendChat(with messages: [ChatMessage], + model: OpenAIModelType, + user: String? = nil, + temperature: Double? = 1, + topProbabilityMass: Double? = 0, + choices: Int? = 1, + stop: [String]? = nil, + maxTokens: Int? = nil, + presencePenalty: Double? = 0, + frequencyPenalty: Double? = 0, + logitBias: [Int: Double]? = nil, + completionHandler: @escaping (Result, OpenAIError>) -> Void) { + guard let model = OpenAIEndpointModelType.ChatCompletions(rawValue: model.modelName) else { + preconditionFailure("Model \(model.modelName) not supported") + } + sendChat( + with: messages, + model: model, + user: user, + temperature: temperature, + topProbabilityMass: topProbabilityMass, + choices: choices, + stop: stop, + maxTokens: maxTokens, + presencePenalty: presencePenalty, + frequencyPenalty: frequencyPenalty, + logitBias: logitBias, + completionHandler: completionHandler) + } + + /// Send a Chat request to the OpenAI API with stream enabled + /// - Parameters: + /// - messages: Array of `ChatMessages` + /// - model: The Model to use. + /// - user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + /// - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or topProbabilityMass but not both. + /// - topProbabilityMass: The OpenAI api equivalent of the "top_p" parameter. An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. + /// - choices: How many chat completion choices to generate for each input message. + /// - stop: Up to 4 sequences where the API will stop generating further tokens. + /// - maxTokens: The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens). + /// - presencePenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// - frequencyPenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// - logitBias: Modify the likelihood of specified tokens appearing in the completion. Maps tokens (specified by their token ID in the OpenAI Tokenizer—not English words) to an associated bias value from -100 to 100. Values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// - onEventReceived: Called Multiple times, returns an OpenAI Data Model + /// - onComplete: Triggers when sever complete sending the message + public func sendStreamingChat(with messages: [ChatMessage], + model: OpenAIEndpointModelType.ChatCompletions = .gpt35Turbo, + user: String? = nil, + temperature: Double? = 1, + topProbabilityMass: Double? = 0, + choices: Int? = 1, + stop: [String]? = nil, + maxTokens: Int? = nil, + presencePenalty: Double? = 0, + frequencyPenalty: Double? = 0, + logitBias: [Int: Double]? = nil, + onEventReceived: ((Result, OpenAIError>) -> Void)? = nil, + onComplete: (() -> Void)? = nil) { + let endpoint = OpenAIEndpointProvider.API.chat + let body = ChatConversation(user: user, + messages: messages, + model: model.rawValue, + temperature: temperature, + topProbabilityMass: topProbabilityMass, + choices: choices, + stop: stop, + maxTokens: maxTokens, + presencePenalty: presencePenalty, + frequencyPenalty: frequencyPenalty, + logitBias: logitBias, + stream: true) + let request = prepareRequest(endpoint, body: body, queryItems: nil) + handler.onEventReceived = onEventReceived + handler.onComplete = onComplete + handler.connect(with: request) + } + + /// Send a Chat request to the OpenAI API with stream enabled + @available(*, deprecated, message: "Use method with `OpenAIEndpointModelType.ChatCompletions` instead") + public func sendStreamingChat(with messages: [ChatMessage], + model: OpenAIModelType, + user: String? = nil, + temperature: Double? = 1, + topProbabilityMass: Double? = 0, + choices: Int? = 1, + stop: [String]? = nil, + maxTokens: Int? = nil, + presencePenalty: Double? = 0, + frequencyPenalty: Double? = 0, + logitBias: [Int: Double]? = nil, + onEventReceived: ((Result, OpenAIError>) -> Void)? = nil, + onComplete: (() -> Void)? = nil) { + guard let model = OpenAIEndpointModelType.ChatCompletions(rawValue: model.modelName) else { + preconditionFailure("Model \(model.modelName) not supported") + } + sendStreamingChat( + with: messages, + model: model, + user: user, + temperature: temperature, + topProbabilityMass: topProbabilityMass, + choices: choices, + stop: stop, + maxTokens: maxTokens, + presencePenalty: presencePenalty, + frequencyPenalty: frequencyPenalty, + logitBias: logitBias, + onEventReceived: onEventReceived, + onComplete: onComplete) + } + + + /// Send a Chat request to the OpenAI API + /// - Parameters: + /// - messages: Array of `ChatMessages` + /// - model: The Model to use. + /// - user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + /// - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or topProbabilityMass but not both. + /// - topProbabilityMass: The OpenAI api equivalent of the "top_p" parameter. An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. + /// - choices: How many chat completion choices to generate for each input message. + /// - stop: Up to 4 sequences where the API will stop generating further tokens. + /// - maxTokens: The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens). + /// - presencePenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// - frequencyPenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// - logitBias: Modify the likelihood of specified tokens appearing in the completion. Maps tokens (specified by their token ID in the OpenAI Tokenizer—not English words) to an associated bias value from -100 to 100. Values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// - completionHandler: Returns an OpenAI Data Model + @available(swift 5.5) + @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) + public func sendChat(with messages: [ChatMessage], + model: OpenAIEndpointModelType.ChatCompletions = .gpt35Turbo, + user: String? = nil, + temperature: Double? = 1, + topProbabilityMass: Double? = 0, + choices: Int? = 1, + stop: [String]? = nil, + maxTokens: Int? = nil, + presencePenalty: Double? = 0, + frequencyPenalty: Double? = 0, + logitBias: [Int: Double]? = nil) async throws -> OpenAI { + return try await withCheckedThrowingContinuation { continuation in + sendChat(with: messages, + model: model, + user: user, + temperature: temperature, + topProbabilityMass: topProbabilityMass, + choices: choices, + stop: stop, + maxTokens: maxTokens, + presencePenalty: presencePenalty, + frequencyPenalty: frequencyPenalty, + logitBias: logitBias) { result in + switch result { + case .success: continuation.resume(with: result) + case .failure(let failure): continuation.resume(throwing: failure) + } + } + } + } + + /// Send a Chat request to the OpenAI API + @available(swift 5.5) + @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) + @available(*, deprecated, message: "Use method with `OpenAIEndpointModelType.ChatCompletions` instead") + public func sendChat(with messages: [ChatMessage], + model: OpenAIModelType, + user: String? = nil, + temperature: Double? = 1, + topProbabilityMass: Double? = 0, + choices: Int? = 1, + stop: [String]? = nil, + maxTokens: Int? = nil, + presencePenalty: Double? = 0, + frequencyPenalty: Double? = 0, + logitBias: [Int: Double]? = nil) async throws -> OpenAI { + guard let model = OpenAIEndpointModelType.ChatCompletions(rawValue: model.modelName) else { + preconditionFailure("Model \(model.modelName) not supported") + } + return try await sendChat(with: messages, + model: model, + user: user, + temperature: temperature, + topProbabilityMass: topProbabilityMass, + choices: choices, + stop: stop, + maxTokens: maxTokens, + presencePenalty: presencePenalty, + frequencyPenalty: frequencyPenalty, + logitBias: logitBias) + } + + /// Send a Chat request to the OpenAI API with stream enabled + /// - Parameters: + /// - messages: Array of `ChatMessages` + /// - model: The Model to use, the only support model is `gpt-3.5-turbo` + /// - user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + /// - temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or topProbabilityMass but not both. + /// - topProbabilityMass: The OpenAI api equivalent of the "top_p" parameter. An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. + /// - choices: How many chat completion choices to generate for each input message. + /// - stop: Up to 4 sequences where the API will stop generating further tokens. + /// - maxTokens: The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens). + /// - presencePenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + /// - frequencyPenalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + /// - logitBias: Modify the likelihood of specified tokens appearing in the completion. Maps tokens (specified by their token ID in the OpenAI Tokenizer—not English words) to an associated bias value from -100 to 100. Values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + /// - Returns: Returns an OpenAI Data Model + @available(swift 5.5) + @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) + public func sendStreamingChat(with messages: [ChatMessage], + model: OpenAIEndpointModelType.ChatCompletions = .gpt35Turbo, + user: String? = nil, + temperature: Double? = 1, + topProbabilityMass: Double? = 0, + choices: Int? = 1, + stop: [String]? = nil, + maxTokens: Int? = nil, + presencePenalty: Double? = 0, + frequencyPenalty: Double? = 0, + logitBias: [Int: Double]? = nil) -> AsyncStream, OpenAIError>> { + return AsyncStream { continuation in + sendStreamingChat( + with: messages, + model: model, + user: user, + temperature: temperature, + topProbabilityMass: topProbabilityMass, + choices: choices, + stop: stop, + maxTokens: maxTokens, + presencePenalty: presencePenalty, + frequencyPenalty: frequencyPenalty, + logitBias: logitBias, + onEventReceived: { result in + continuation.yield(result) + }) { + continuation.finish() + } + } + } + + /// Send a Chat request to the OpenAI API with stream enabled + @available(swift 5.5) + @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) + @available(*, deprecated, message: "Use method with `OpenAIEndpointModelType.ChatCompletions` instead") + public func sendStreamingChat(with messages: [ChatMessage], + model: OpenAIModelType, + user: String? = nil, + temperature: Double? = 1, + topProbabilityMass: Double? = 0, + choices: Int? = 1, + stop: [String]? = nil, + maxTokens: Int? = nil, + presencePenalty: Double? = 0, + frequencyPenalty: Double? = 0, + logitBias: [Int: Double]? = nil) -> AsyncStream, OpenAIError>> { + guard let model = OpenAIEndpointModelType.ChatCompletions(rawValue: model.modelName) else { + preconditionFailure("Model \(model.modelName) not supported") + } + return AsyncStream { continuation in + sendStreamingChat( + with: messages, + model: model, + user: user, + temperature: temperature, + topProbabilityMass: topProbabilityMass, + choices: choices, + stop: stop, + maxTokens: maxTokens, + presencePenalty: presencePenalty, + frequencyPenalty: frequencyPenalty, + logitBias: logitBias, + onEventReceived: { result in + continuation.yield(result) + }) { + continuation.finish() + } + } + } + +} diff --git a/Sources/OpenAISwift/OpenAISwift/Completions.swift b/Sources/OpenAISwift/OpenAISwift/Completions.swift new file mode 100644 index 0000000..b259423 --- /dev/null +++ b/Sources/OpenAISwift/OpenAISwift/Completions.swift @@ -0,0 +1,97 @@ + + +import Foundation +#if canImport(FoundationNetworking) && canImport(FoundationXML) +import FoundationNetworking +import FoundationXML +#endif + +extension OpenAISwift { + + /// Send a Completion to the OpenAI API + /// - Parameters: + /// - prompt: The Text Prompt + /// - model: The AI Model to Use. Set to `OpenAIEndpointModelType.LegacyCompletions.davinci,` by default which is the most capable model + /// - maxTokens: The limit character for the returned response, defaults to 16 as per the API + /// - completionHandler: Returns an OpenAI Data Model + /// - Note: OpenAI marked this endpoint as legacy + public func sendCompletion(with prompt: String, model: OpenAIEndpointModelType.LegacyCompletions = .davinci, maxTokens: Int = 16, temperature: Double = 1, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + let endpoint = OpenAIEndpointProvider.API.completions + let body = Command(prompt: prompt, model: model.rawValue, maxTokens: maxTokens, temperature: temperature) + let request = prepareRequest(endpoint, body: body, queryItems: nil) + + makeRequest(request: request) { result in + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// Send a Completion to the OpenAI API + @available(*, deprecated, message: "Use method with `OpenAIEndpointModelType.LegacyCompletions` instead") + public func sendCompletion(with prompt: String, model: OpenAIModelType, maxTokens: Int = 16, temperature: Double = 1, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + guard let model = OpenAIEndpointModelType.LegacyCompletions(rawValue: model.modelName) else { + preconditionFailure("Model \(model.modelName) not supported") + } + sendCompletion( + with: prompt, + model: model, + maxTokens: maxTokens, + temperature: temperature, + completionHandler: completionHandler) + } + + /// Send a Completion to the OpenAI API + /// - Parameters: + /// - prompt: The Text Prompt + /// - model: The AI Model to Use. Set to `OpenAIEndpointModelType.LegacyCompletions.davinci` by default which is the most capable model + /// - maxTokens: The limit character for the returned response, defaults to 16 as per the API + /// - temperature: Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. Defaults to 1 + /// - Returns: Returns an OpenAI Data Model + /// - Note: OpenAI marked this endpoint as legacy + @available(swift 5.5) + @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) + public func sendCompletion(with prompt: String, model: OpenAIEndpointModelType.LegacyCompletions = .davinci, maxTokens: Int = 16, temperature: Double = 1) async throws -> OpenAI { + return try await withCheckedThrowingContinuation { continuation in + sendCompletion(with: prompt, model: model, maxTokens: maxTokens, temperature: temperature) { result in + continuation.resume(with: result) + } + } + } + + /// Send a Completion to the OpenAI API + @available(swift 5.5) + @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) + @available(*, deprecated, message: "Use method with `OpenAIEndpointModelType.LegacyCompletions` instead") + public func sendCompletion(with prompt: String, model: OpenAIModelType, maxTokens: Int = 16, temperature: Double = 1) async throws -> OpenAI { + guard let model = OpenAIEndpointModelType.LegacyCompletions(rawValue: model.modelName) else { + preconditionFailure("Model \(model.modelName) not supported") + } + return try await sendCompletion(with: prompt, model: model, maxTokens: maxTokens, temperature: temperature) + } + + /// Send a Edit request to the OpenAI API + /// - Parameters: + /// - instruction: The Instruction For Example: "Fix the spelling mistake" + /// - model: The Model to use, the only support model is `text-davinci-edit-001` + /// - input: The Input For Example "My nam is Adam" + /// - Returns: Returns an OpenAI Data Model + @available(swift 5.5) + @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) + public func sendEdits(with instruction: String, model: OpenAIModelType = .feature(.davinci), input: String = "") async throws -> OpenAI { + return try await withCheckedThrowingContinuation { continuation in + sendEdits(with: instruction, model: model, input: input) { result in + continuation.resume(with: result) + } + } + } + +} diff --git a/Sources/OpenAISwift/OpenAISwift/Edits.swift b/Sources/OpenAISwift/OpenAISwift/Edits.swift new file mode 100644 index 0000000..7190a13 --- /dev/null +++ b/Sources/OpenAISwift/OpenAISwift/Edits.swift @@ -0,0 +1,39 @@ +// +// Edits.swift +// +// +// Created by Mark Hoath on 10/11/2023. +// + +import Foundation + +extension OpenAISwift { + + + /// Send a Edit request to the OpenAI API + /// - Parameters: + /// - instruction: The Instruction For Example: "Fix the spelling mistake" + /// - model: The Model to use, the only support model is `text-davinci-edit-001` + /// - input: The Input For Example "My nam is Adam" + /// - completionHandler: Returns an OpenAI Data Model + public func sendEdits(with instruction: String, model: OpenAIModelType = .feature(.davinci), input: String = "", completionHandler: @escaping (Result, OpenAIError>) -> Void) { + let endpoint = OpenAIEndpointProvider.API.edits + let body = Instruction(instruction: instruction, model: model.modelName, input: input) + let request = prepareRequest(endpoint, body: body, queryItems: nil) + + makeRequest(request: request) { result in + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + +} diff --git a/Sources/OpenAISwift/OpenAISwift/Embeddings.swift b/Sources/OpenAISwift/OpenAISwift/Embeddings.swift new file mode 100644 index 0000000..3e6353c --- /dev/null +++ b/Sources/OpenAISwift/OpenAISwift/Embeddings.swift @@ -0,0 +1,80 @@ +// +// Embeddings.swift +// +// +// Created by Mark Hoath on 10/11/2023. +// + +import Foundation + +extension OpenAISwift { + /// Send a Embeddings request to the OpenAI API + /// - Parameters: + /// - input: The Input For Example "The food was delicious and the waiter..." + /// - model: The Model to use + /// - completionHandler: Returns an OpenAI Data Model + public func sendEmbeddings(with input: String, + model: OpenAIEndpointModelType.Embeddings = .textEmbeddingAda002, + completionHandler: @escaping (Result, OpenAIError>) -> Void) { + let endpoint = OpenAIEndpointProvider.API.embeddings + let body = EmbeddingsInput(input: input, + model: model.rawValue) + + let request = prepareRequest(endpoint, body: body, queryItems: nil) + makeRequest(request: request) { result in + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// Send a Embeddings request to the OpenAI API + @available(*, deprecated, message: "Use method with `OpenAIEndpointModelType.Embeddings` instead") + public func sendEmbeddings(with input: String, + model: OpenAIModelType, + completionHandler: @escaping (Result, OpenAIError>) -> Void) { + guard let model = OpenAIEndpointModelType.Embeddings(rawValue: model.modelName) else { + preconditionFailure("Model \(model.modelName) not supported") + } + sendEmbeddings(with: input, model: model, completionHandler: completionHandler) + } + + + /// Send a Embeddings request to the OpenAI API + /// - Parameters: + /// - input: The Input For Example "The food was delicious and the waiter..." + /// - model: The Model to use, the only support model is `text-embedding-ada-002` + /// - completionHandler: Returns an OpenAI Data Model + @available(swift 5.5) + @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) + public func sendEmbeddings(with input: String, + model: OpenAIEndpointModelType.Embeddings = .textEmbeddingAda002) async throws -> OpenAI { + return try await withCheckedThrowingContinuation { continuation in + sendEmbeddings(with: input) { result in + continuation.resume(with: result) + } + } + } + + /// Send a Embeddings request to the OpenAI API + @available(swift 5.5) + @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) + @available(*, deprecated, message: "Use method with `OpenAIEndpointModelType.Embeddings` instead") + public func sendEmbeddings(with input: String, + model: OpenAIModelType) async throws -> OpenAI { + guard let model = OpenAIEndpointModelType.Embeddings(rawValue: model.modelName) else { + preconditionFailure("Model \(model.modelName) not supported") + } + return try await sendEmbeddings(with: input, model: model) + } + + +} diff --git a/Sources/OpenAISwift/OpenAISwift/Files.swift b/Sources/OpenAISwift/OpenAISwift/Files.swift new file mode 100644 index 0000000..16378bf --- /dev/null +++ b/Sources/OpenAISwift/OpenAISwift/Files.swift @@ -0,0 +1,176 @@ +// +// Files.swift +// +// +// Created by Mark Hoath on 14/11/2023. +// + +import Foundation + +extension OpenAISwift { + + /// listFiles request to the OpenAI API + /// - Parameters: + /// - purpose: Only return files with the given purpose. + + /// - completionHandler: Returns an OpenAI Data Model + + public func listFiles(purpose: FilePurpose?, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.files_list + + let pur_request = FilesResquest(purpose: purpose?.rawValue) + + var queryItems: [URLQueryItem] = [] + + if let parameters = pur_request.toDictionary() { + queryItems = parameters.compactMap{ key, value in + if let stringValue = value as? String { + return URLQueryItem(name: key, value: stringValue) + } else if let intValue = value as? Int { + return URLQueryItem(name: key, value: String(intValue)) + } + // Add more cases here for other types if needed + return nil + } + } + + let request = prepareRequest(endpoint, queryItems: queryItems) + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// uploadFiles request to the OpenAI API + /// - Parameters: + /// - File: The Data of the file to be uploaded. + /// - purpose: The intended purpose of the uploaded file. + /// Use "fine-tune" for Fine-tuning and "assistants" for Assistants and Messages. This allows us to validate the format of the uploaded file is correct for fine-tuning. + + /// - completionHandler: Returns an OpenAI Data Model + + public func uploadFiles(file: Data, purpose: FilePurpose, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.files_upload + + let request = prepareMultipartFormFileDataRequest(endpoint, file: file, purpose: purpose.rawValue) + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// deleteFiles request to the OpenAI API + /// - Parameters: + /// - file_id: The ID of the file to use for this request. + + /// - completionHandler: Returns the deletion status + + public func deleteFiles(file_id: String, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.files_delete + var request = prepareRequest(endpoint, queryItems: nil) + + request.url?.appendPathComponent(file_id) + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// retrieveFile request to the OpenAI API + /// - Parameters: + /// - file_id: The ID of the file to use for this request. + + /// - completionHandler: Returns the file object + + + public func retrieveFile(file_id: String, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.files_delete + var request = prepareRequest(endpoint, queryItems: nil) + + request.url?.appendPathComponent(file_id) + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// retrieveFileContent request to the OpenAI API + /// - Parameters: + /// - file_id: The ID of the file to use for this request. + + /// - completionHandler: Returns the file content + + + public func retrieveFileContent(file_id: String, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.files_delete + + var request = prepareRequest(endpoint, queryItems: nil) + + request.url?.appendPathComponent(file_id) + request.url?.appendPathComponent("/content") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } +} diff --git a/Sources/OpenAISwift/OpenAISwift/FineTunings.swift b/Sources/OpenAISwift/OpenAISwift/FineTunings.swift new file mode 100644 index 0000000..2dde645 --- /dev/null +++ b/Sources/OpenAISwift/OpenAISwift/FineTunings.swift @@ -0,0 +1,191 @@ +// +// FineTunings.swift +// +// +// Created by Mark Hoath on 14/11/2023. +// + +import Foundation + +extension OpenAISwift { + + /// uploadFiles request to the OpenAI API + /// - Parameters: + /// - Model: The name of the model to fine-tune. You can select one of the supported models. + /// - training_file : The ID of an uploaded file that contains training data. + /// - hyperparameters: (Optional) The hyperparameters used for the fine-tuning job. + /// - suffix: (Optional) A string of up to 18 characters that will be added to your fine-tuned model name. + /// - validation_file: The ID of an uploaded file that contains validation data. + /// + /// Use "fine-tune" for Fine-tuning and "assistants" for Assistants and Messages. This allows us to validate the format of the uploaded file is correct for fine-tuning. + + /// - completionHandler: Returns an OpenAI Data Model + + public func createFineTuningJob(model: String, training_file: String, hyperparameters: FineTuningHyperParams?, suffix: String?, validation_file: String? , completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.fine_tuning_create + + let body = FineTuningRequest(model: model, training_file: training_file, hyperparameters: hyperparameters, suffix: suffix, validation_file: validation_file) + let request = prepareRequest(endpoint, body: body, queryItems: nil) + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// listFineTuningJobs request to the OpenAI API + /// - Parameters: + /// - after: Identifier for the last job from the previous pagination request. Optional + /// - limit: Number of fine-tuning jobs to retrieve. - Optional Defaults to 20 + + /// - completionHandler: Returns a list of fine-tuning Jobs. + + public func listFineTuningJobs(after: String?, limit: Int? , completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.fine_tuning_list + + let ftlr = FineTuningListRequest(after: after, limit: limit) + + var queryItems: [URLQueryItem] = [] + + if let parameters = ftlr.toDictionary() { + queryItems = parameters.compactMap{ key, value in + if let stringValue = value as? String { + return URLQueryItem(name: key, value: stringValue) + } else if let intValue = value as? Int { + return URLQueryItem(name: key, value: String(intValue)) + } + // Add more cases here for other types if needed + return nil + } + } + + let request = prepareRequest(endpoint, queryItems: queryItems) + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// retrieveFineTuningJob request to the OpenAI API + /// - Parameters: + /// - fine_tuning_job_id: The ID of the fine-tuning job. + + /// - completionHandler: Returns the fine-tuning Object. + + public func retrieveFineTuningJob(fine_tuning_job_id: String, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.fine_tuning_retrieve + var request = prepareRequest(endpoint, queryItems: nil) + request.url?.appendPathComponent("/\(fine_tuning_job_id)") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// cancelFineTuningJob request to the OpenAI API + /// - Parameters: + /// - fine_tuning_job_id: The ID of the fine-tuning job to cancel. + + /// - completionHandler: Returns the fine-tuning Object. + + public func cancelFineTuningJob(fine_tuning_job_id: String, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.fine_tuning_cancel + var request = prepareRequest(endpoint, queryItems: nil) + request.url?.appendPathComponent("/\(fine_tuning_job_id)/cancel") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// listFineTuningEvents request to the OpenAI API + /// - Parameters: + /// - fine_tuning_job_id: The ID of the fine-tuning job to cancel. + + /// - completionHandler: Returns the fine-tuning Object. + + public func listFineTuningEvents(fine_tuning_job_id: String, after: String?, limit: Int?, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.fine_tuning_list_events + let ftlr = FineTuningListRequest(after: after, limit: limit) + + var queryItems: [URLQueryItem] = [] + + if let parameters = ftlr.toDictionary() { + queryItems = parameters.compactMap{ key, value in + if let stringValue = value as? String { + return URLQueryItem(name: key, value: stringValue) + } else if let intValue = value as? Int { + return URLQueryItem(name: key, value: String(intValue)) + } + // Add more cases here for other types if needed + return nil + } + } + + var request = prepareRequest(endpoint, queryItems: queryItems) + request.url?.appendPathComponent("/\(fine_tuning_job_id)/events") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } +} diff --git a/Sources/OpenAISwift/OpenAISwift/Images.swift b/Sources/OpenAISwift/OpenAISwift/Images.swift new file mode 100644 index 0000000..3e9bfe7 --- /dev/null +++ b/Sources/OpenAISwift/OpenAISwift/Images.swift @@ -0,0 +1,114 @@ +// +// Images.swift +// +// +// Created by Mark Hoath on 10/11/2023. +// + +import Foundation + +extension OpenAISwift { + + + /// Send a Image generation request to the OpenAI API + /// - Parameters: + /// - prompt: The Text Prompt + /// - numImages: The number of images to generate, defaults to 1 + /// - size: The size of the image, defaults to 1024x1024. There are two other options: 512x512 and 256x256 + /// - user: An optional unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + /// - completionHandler: Returns an OpenAI Data Model + public func sendImages(with prompt: String, numImages: Int = 1, size: ImageSize = .size1024, user: String? = nil, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + let endpoint = OpenAIEndpointProvider.API.images + let body = ImageGeneration(prompt: prompt, n: numImages, size: size, user: user) + let request = prepareRequest(endpoint, body: body, queryItems: nil) + + makeRequest(request: request) { result in + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// Send a Image Edit request to the OpenAI API + /// - Parameters: + /// - image: The Image to be edited. - Must be less than 4MB. + /// - mask: The mask area to be edited - Must be less than 4MB. + /// - numImages: The number of images to generate, defaults to 1 + /// - size: The size of the image, defaults to 1024x1024. There are two other options: 512x512 and 256x256 + /// - user: An optional unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + /// - completionHandler: Returns an OpenAI Data Model + + public func sendImageEdit(image: Data, mask: Data?, with prompt: String, numImages: Int = 1, size: ImageSize = .size1024, user: String? = nil, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.image_edits + let request = prepareMultipartFormImageDataRequest(endpoint, imageData: image, maskData: mask, prompt: prompt, n: numImages, size: size.rawValue) + + makeRequest(request: request) { result in + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// Send a Image Variation request to the OpenAI API + /// - Parameters: + /// - image: The Image to be varied. - Must be less than 4MB. + /// - numImages: The number of images to generate, defaults to 1 + /// - size: The size of the image, defaults to 1024x1024. There are two other options: 512x512 and 256x256 + /// - user: An optional unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + /// - completionHandler: Returns an OpenAI Data Model + + + public func sendImageVariations(image: Data, numImages: Int = 1, size: ImageSize = .size1024, user: String? = nil, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + let endpoint = OpenAIEndpointProvider.API.image_variations + let request = prepareMultipartFormImageDataRequest(endpoint, imageData: image, maskData: nil, prompt: "", n: numImages, size: size.rawValue) + makeRequest(request: request) { result in + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + + } + } + + /// Send a Image generation request to the OpenAI API + /// - Parameters: + /// - prompt: The Text Prompt + /// - numImages: The number of images to generate, defaults to 1 + /// - size: The size of the image, defaults to 1024x1024. There are two other options: 512x512 and 256x256 + /// - user: An optional unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. + /// - Returns: Returns an OpenAI Data Model + @available(swift 5.5) + @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) + public func sendImages(with prompt: String, numImages: Int = 1, size: ImageSize = .size1024, user: String? = nil) async throws -> OpenAI { + return try await withCheckedThrowingContinuation { continuation in + sendImages(with: prompt, numImages: numImages, size: size, user: user) { result in + continuation.resume(with: result) + } + } + } + +} diff --git a/Sources/OpenAISwift/OpenAISwift/MessageFiles.swift b/Sources/OpenAISwift/OpenAISwift/MessageFiles.swift new file mode 100644 index 0000000..26c3874 --- /dev/null +++ b/Sources/OpenAISwift/OpenAISwift/MessageFiles.swift @@ -0,0 +1,93 @@ +// +// File.swift +// +// +// Created by Mark Hoath on 16/11/2023. +// + +import Foundation + +extension OpenAISwift { + + /// retrieveMessageFile request to the OpenAI API + /// - Parameters: + /// - thread_id: The ID of the thread to create a message for. + /// - message_id: The ID of the message to retrieve + /// - file_id: The ID of the file being retrieved + /// + /// - completionHandler: Returns a Message file + + public func retrieveMessageFiles(thread_id: String, message_id: String, file_id:String, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.retrieve_message_file + + var request = prepareRequest(endpoint) + + request.url?.appendPathComponent("/\(thread_id)/messages/\(message_id)/files/\(file_id)") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// listMessageFile request to the OpenAI API + /// - Parameters: + /// - thread_id: The ID of the thread to create a message for. + /// - message_id: The ID of the message to retrieve + /// + /// - completionHandler: Returns a Message file + + public func listMessageFiles(thread_id: String, message_id: String, limit: Int?, order: String?, after:String?, before:String?, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.list_message_file + + let mlr = MessageListRequest(limit: limit, order: order, after: after, before: before) + + var queryItems: [URLQueryItem] = [] + + if let parameters = mlr.toDictionary() { + queryItems = parameters.compactMap{ key, value in + if let stringValue = value as? String { + return URLQueryItem(name: key, value: stringValue) + } else if let intValue = value as? Int { + return URLQueryItem(name: key, value: String(intValue)) + } + // Add more cases here for other types if needed + return nil + } + } + + var request = prepareRequest(endpoint, queryItems: queryItems) + + request.url?.appendPathComponent("/\(thread_id)/messages/\(message_id)/files") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + + +} diff --git a/Sources/OpenAISwift/OpenAISwift/Messages.swift b/Sources/OpenAISwift/OpenAISwift/Messages.swift new file mode 100644 index 0000000..896429e --- /dev/null +++ b/Sources/OpenAISwift/OpenAISwift/Messages.swift @@ -0,0 +1,167 @@ +// +// Messages.swift +// +// +// Created by Mark Hoath on 14/11/2023. +// + +import Foundation + + +extension OpenAISwift { + + /// createMessage request to the OpenAI API + /// - Parameters: + /// - thread_id: The ID of the thread to create a message for. + /// - role: The role of the entity that is creating the message. Currently only user is supported. + /// - content The content of the message. + /// - file_ids: Optional. A list of File IDs that the message should use. There can be a maximum of 10 files attached to a message. + /// - metadata: Optional. Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// + /// - completionHandler: Returns a Message Object + + public func createMessage(thread_id: String, role: String, content: String, file_ids: [String]?, metadata: [String:String]? , completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.messages_create + + let body = Message(role: role, content: content, file_ids: file_ids, metadata: metadata) + + var request = prepareRequest(endpoint, body: body, queryItems: nil) + + request.url?.appendPathComponent("/\(thread_id)/messages") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// retrieveMessage request to the OpenAI API + /// - Parameters: + /// - thread_id: The ID of the thread to create a message for. + /// - message_id: The ID of the message to retrieve + + /// + /// - completionHandler: Returns a Message Object + + public func retrieveMessage(thread_id: String, message_id: String , completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.messages_retrieve + + var request = prepareRequest(endpoint) + + request.url?.appendPathComponent("/\(thread_id)/messages/\(message_id)") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// modifyMessage request to the OpenAI API + /// - Parameters: + /// - thread_id: The ID of the thread to create a message for. + /// - message_id: The ID of the message to retrieve + /// + /// - metadata: Optional. Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + + /// + /// - completionHandler: Returns a Modified Message Object + + public func modifyMessage(thread_id: String, message_id: String, metadata: [String:String]?, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.messages_modify + + var request: URLRequest + + if metadata == nil { + request = prepareRequest(endpoint) + } else { + request = prepareRequest(endpoint, body: metadata, queryItems: nil) + } + request.url?.appendPathComponent("/\(thread_id)/messages/\(message_id)") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// listMessages request to the OpenAI API + /// - Parameters: + /// - thread_id: The ID of the thread to create a message for. + /// + /// + /// - completionHandler: Returns a list of message objects + + public func listMessages(thread_id: String, limit: Int?, order: String?, after:String?, before:String?, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.messages_list + + let mlr = MessageListRequest(limit: limit, order: order, after: after, before: before) + + var queryItems: [URLQueryItem] = [] + + if let parameters = mlr.toDictionary() { + queryItems = parameters.compactMap{ key, value in + if let stringValue = value as? String { + return URLQueryItem(name: key, value: stringValue) + } else if let intValue = value as? Int { + return URLQueryItem(name: key, value: String(intValue)) + } + // Add more cases here for other types if needed + return nil + } + } + + var request = prepareRequest(endpoint, queryItems: queryItems) + request.url?.appendPathComponent("/\(thread_id)/messages") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + +} diff --git a/Sources/OpenAISwift/OpenAISwift/Models.swift b/Sources/OpenAISwift/OpenAISwift/Models.swift new file mode 100644 index 0000000..cefe59e --- /dev/null +++ b/Sources/OpenAISwift/OpenAISwift/Models.swift @@ -0,0 +1,99 @@ +// +// Models.swift +// +// +// Created by Mark Hoath on 14/11/2023. +// + +import Foundation + +extension OpenAISwift { + + /// listModels Lists the currently available models, and provides basic information about each one such as the owner and availability. + /// - Parameters: + /// + /// - completionHandler: Returns a list of model Objects + + public func listModels( completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.models_list + let request = prepareRequest(endpoint, queryItems: nil) + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + + /// retrieveModel Retrieves a model instance, providing basic information about the model such as the owner and permissioning. + /// - Parameters: + /// - model: String. The ID of the model to use for this request. + /// + /// - completionHandler: Returns a model Object + + public func retrieveModel(model: String, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.models_retrieve + var request = prepareRequest(endpoint, queryItems: nil) + + request.url?.appendPathComponent("/\(model)") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + + /// deleteModel Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + /// - Parameters: + /// + /// - model: String. The ID of the model to use for this request. + /// - completionHandler: Returns deletion status + + public func deleteModel(model: String, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.models_delete + var request = prepareRequest(endpoint, queryItems: nil) + + request.url?.appendPathComponent("/\(model)") + + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + +} diff --git a/Sources/OpenAISwift/OpenAISwift/Moderations.swift b/Sources/OpenAISwift/OpenAISwift/Moderations.swift new file mode 100644 index 0000000..ca53f9a --- /dev/null +++ b/Sources/OpenAISwift/OpenAISwift/Moderations.swift @@ -0,0 +1,76 @@ +// +// Moderations.swift +// +// +// Created by Mark Hoath on 10/11/2023. +// + +import Foundation + +extension OpenAISwift { + + /// Send a Moderation request to the OpenAI API + /// - Parameters: + /// - input: The Input For Example "My nam is Adam" + /// - model: The Model to use + /// - completionHandler: Returns an OpenAI Data Model + public func sendModerations(with input: String, model: OpenAIEndpointModelType.Moderations = .textModerationLatest, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + let endpoint = OpenAIEndpointProvider.API.moderations + let body = Moderation(input: input, model: model.rawValue) + let request = prepareRequest(endpoint, body: body, queryItems: nil) + + makeRequest(request: request) { result in + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// Send a Moderation request to the OpenAI API + @available(*, deprecated, message: "Use method with `OpenAIEndpointModelType.Moderations` instead") + public func sendModerations(with input: String, model: OpenAIModelType, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + guard let model = OpenAIEndpointModelType.Moderations(rawValue: model.modelName) else { + preconditionFailure("Model \(model.modelName) not supported") + } + sendModerations(with: input, + model: model, + completionHandler: completionHandler + ) + } + + + /// Send a Moderation request to the OpenAI API + /// - Parameters: + /// - input: The Input For Example "My nam is Adam" + /// - model: The Model to use + /// - Returns: Returns an OpenAI Data Model + @available(swift 5.5) + @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) + public func sendModerations(with input: String = "", model: OpenAIEndpointModelType.Moderations = .textModerationLatest) async throws -> OpenAI { + return try await withCheckedThrowingContinuation { continuation in + sendModerations(with: input, model: model) { result in + continuation.resume(with: result) + } + } + } + + /// Send a Moderation request to the OpenAI API + @available(swift 5.5) + @available(macOS 10.15, iOS 13, watchOS 6, tvOS 13, *) + @available(*, deprecated, message: "Use method with `OpenAIEndpointModelType.Moderations` instead") + public func sendModerations(with input: String = "", model: OpenAIModelType) async throws -> OpenAI { + guard let model = OpenAIEndpointModelType.Moderations(rawValue: model.modelName) else { + preconditionFailure("Model \(model.modelName) not supported") + } + return try await sendModerations(with: input, model: model) + } + +} diff --git a/Sources/OpenAISwift/OpenAISwift/Runs.swift b/Sources/OpenAISwift/OpenAISwift/Runs.swift new file mode 100644 index 0000000..76d26d8 --- /dev/null +++ b/Sources/OpenAISwift/OpenAISwift/Runs.swift @@ -0,0 +1,365 @@ +// +// Runs.swift +// +// +// Created by Mark Hoath on 14/11/2023. +// + +import Foundation + +// case runs_create +// case runs_retrieve +// case runs_modify +// case runs_list +// case runs_submit +// case runs_cancel +// case runs_thread_create +// case run_step_retrive +// case run_step_list + + +extension OpenAISwift { + + /// createRun request to the OpenAI API + /// - Parameters: + /// - thread_id: The ID of the thread to run. + /// - assistant_id: The ID of the assistant to use to execute this run. + /// - model: Optional The ID of the Model to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + /// - instructions: Optional. Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. + /// - tools: Optional. Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + /// - metadata: Optional. Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// + /// - completionHandler: Returns a Run Object + + public func createRun(thread_id: String, assistant_id: String, model: String?, instructions: String?, tools: [Tools]?, metadata: [String:String]? , completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.runs_create + + let body = RunRequest(assistant_id: assistant_id, model: model, instructions: instructions, tools: tools, metadata: metadata) + var request = prepareRequest(endpoint, body: body, queryItems: nil) + + request.url?.appendPathComponent("/\(thread_id)/runs") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// retrieveRun request to the OpenAI API + /// - Parameters: + /// - thread_id: The ID of the thread to create a message for. + /// - run_id: The ID of the message to retrieve + /// + /// - completionHandler: Returns a Run Object + + public func retrieveMessage(thread_id: String, run_id: String , completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.runs_retrieve + + var request = prepareRequest(endpoint) + + request.url?.appendPathComponent("/\(thread_id)/runs/\(run_id)") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// modifyRun request to the OpenAI API + /// - Parameters: + /// - thread_id: The ID of the thread to create a message for. + /// - run_id: The ID of the run to modify + /// + /// - metadata: Optional. Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + + /// + /// - completionHandler: Returns a Modified Run Object + + public func modifyRun(thread_id: String, run_id: String, metadata: [String:String]?, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.runs_modify + + var request: URLRequest + + if metadata == nil { + request = prepareRequest(endpoint) + } else { + request = prepareRequest(endpoint, body: metadata, queryItems: nil) + } + request.url?.appendPathComponent("/\(thread_id)/runs/\(run_id)") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// listruns request to the OpenAI API + /// - Parameters: + /// - thread_id: The ID of the thread to create a message for. + /// + /// - limit: Optional, A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. + /// - order: Optional. Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order. + /// - after: Optional. A cursor for use in pagination. after is an object ID that defines your place in the list. + /// - before: Optional. A cursor for use in pagination. before is an object ID that defines your place in the list. + + /// - completionHandler: Returns a list of run objects + + public func listruns(thread_id: String, limit: Int?, order: String?, after:String?, before:String?, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.runs_list + + let rlr = RunListRequest(limit: limit, order: order, after: after, before: before) + + var queryItems: [URLQueryItem] = [] + + if let parameters = rlr.toDictionary() { + queryItems = parameters.compactMap{ key, value in + if let stringValue = value as? String { + return URLQueryItem(name: key, value: stringValue) + } else if let intValue = value as? Int { + return URLQueryItem(name: key, value: String(intValue)) + } + // Add more cases here for other types if needed + return nil + } + } + + var request = prepareRequest(endpoint, queryItems: queryItems) + request.url?.appendPathComponent("/\(thread_id)/runs") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// runSubmit request to the OpenAI API + /// - Parameters: + /// - thread_id: The ID of the thread to create a message for. + /// - run_id: The ID of the run that requires the tool output submission + /// - tool_outputs: A list of tools for which the outputs are being submitted. + /// + /// + /// - completionHandler: Returns the modified run object for the specified ID. + + public func runsSubmit(thread_id: String, run_id:String, tools_output: [ToolsOutput], completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.runs_submit + + + var request = prepareRequest(endpoint, body: tools_output, queryItems: nil) + request.url?.appendPathComponent("/\(thread_id)/runs/\(run_id)/submit_tool_outputs") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// runCancel request to the OpenAI API + /// - Parameters: + /// - thread_id: The ID of the thread to which this run belongs + /// - run_id: The ID of the run to cancel + /// + /// + /// - completionHandler: Returns the modified run object for the specified ID. + + public func runCancel(thread_id: String, run_id:String, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.runs_cancel + + var request = prepareRequest(endpoint) + request.url?.appendPathComponent("/\(thread_id)/runs/\(run_id)/cancel") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + + /// createThreadRun request to the OpenAI API + /// - Parameters: + /// - assistant_id: The ID of the assistant to use to execute this run. + /// - thread: Optional. The ID of the run to cancel + /// - model: Optional. The ID of the Model to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + /// - instructions: Optional. Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. + /// - tools: Optional. Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + /// - metadata Optional. Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// + /// - completionHandler: A Run Object. + + public func createThreadRun(assistant_id: String, thread: ThreadRun?, model:String?, instructions: String?, tools: [Tools]?, metadata: [String:String]?, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.runs_thread_create + + let body = ThreadRunRequest(assistant_id: assistant_id, thread: thread, model: model, instructions: instructions, tools: tools, metatdata: metadata) + + let request = prepareRequest(endpoint, body: body, queryItems: nil) + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// retrieveRunStep request to the OpenAI API + /// - Parameters: + /// - thread_id: The ID of the thread to which the run and run step belongs. + /// - run_id: The ID of the run to which the run step belongs. + /// - step_id: The ID of the run step to retrieve. + /// + /// - completionHandler: A RunStep Object. + + public func retrieveRunStep(thread_id: String, run_id: String, step_id: String, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.run_step_retrieve + + var request = prepareRequest(endpoint) + + request.url?.appendPathComponent("/\(thread_id)/runs/\(run_id)/steps/\(step_id)") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// listRunSteps request to the OpenAI API + /// - Parameters: + /// - thread_id: The ID of the thread to which the run and run step belongs. + /// - run_id: The ID of the run to which the run step belongs. + /// + /// - limit: Optional, A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. + /// - order: Optional. Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order. + /// - after: Optional. A cursor for use in pagination. after is an object ID that defines your place in the list. + /// - before: Optional. A cursor for use in pagination. before is an object ID that defines your place in the list. + /// + /// - completionHandler: A list of RunSteps belonging to the a Run + + public func listRunSteps(thread_id: String, run_id: String, limit: Int?, order: String?, after:String?, before:String?, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.run_step_list + + let rlr = RunListRequest(limit: limit, order: order, after: after, before: before) + + var queryItems: [URLQueryItem] = [] + + if let parameters = rlr.toDictionary() { + queryItems = parameters.compactMap{ key, value in + if let stringValue = value as? String { + return URLQueryItem(name: key, value: stringValue) + } else if let intValue = value as? Int { + return URLQueryItem(name: key, value: String(intValue)) + } + // Add more cases here for other types if needed + return nil + } + } + + var request = prepareRequest(endpoint, queryItems: queryItems) + + request.url?.appendPathComponent("/\(thread_id)/runs/\(run_id)/steps") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + + +} diff --git a/Sources/OpenAISwift/OpenAISwift/Threads.swift b/Sources/OpenAISwift/OpenAISwift/Threads.swift new file mode 100644 index 0000000..e5eb899 --- /dev/null +++ b/Sources/OpenAISwift/OpenAISwift/Threads.swift @@ -0,0 +1,146 @@ +// +// Threads.swift +// +// +// Created by Mark Hoath on 14/11/2023. +// + +import Foundation + +extension OpenAISwift { + + /// createThread request to the OpenAI API + /// - Parameters: + /// - messages: Optional. An array of messages to start the thread with. + /// + /// - completionHandler: Returns a Thread Object + + public func createThread(messages: [Message]?, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.thread_create + + var request: URLRequest + + if messages == nil { + request = prepareRequest(endpoint, queryItems: nil) + } else { + request = prepareRequest(endpoint, body: messages, queryItems: nil) + } + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// retrieveThread request to the OpenAI API + /// - Parameters: + /// - thread_id: The ID of the thread to retrieve. + /// + /// - completionHandler: Returns a Thread Object + + public func retrieveThread(thread_id: String, completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.thread_retrieve + + var request = prepareRequest(endpoint, queryItems: nil) + + request.url?.appendPathComponent("/\thread_id") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// modifyThread request to the OpenAI API + /// - Parameters: + /// - thread_id: The ID of the thread to retrieve. + /// - metadata: Optional. Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + /// + /// - completionHandler: Returns a Modified Thread Object + + public func modifyThread(thread_id: String, metadata: [String:String]? ,completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.thread_modify + + var request: URLRequest + + if metadata == nil { + request = prepareRequest(endpoint) + } else { + request = prepareRequest(endpoint, body: metadata, queryItems: nil) + } + + request.url?.appendPathComponent("/\thread_id") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + /// deleteThread request to the OpenAI API + /// - Parameters: + /// - thread_id: The ID of the thread to retrieve. + /// - + /// - completionHandler: Deletion Status + + public func deleteThread(thread_id: String,completionHandler: @escaping (Result, OpenAIError>) -> Void) { + + let endpoint = OpenAIEndpointProvider.API.thread_delete + + var request = prepareRequest(endpoint) + + request.url?.appendPathComponent("/\thread_id") + + makeRequest(request: request) { result in + + switch result { + case .success(let success): + do { + let res = try JSONDecoder().decode(OpenAI.self, from: success) + completionHandler(.success(res)) + } catch { + completionHandler(.failure(.decodingError(error: error))) + } + case .failure(let failure): + completionHandler(.failure(.genericError(error: failure))) + } + } + } + + + + +} diff --git a/Sources/OpenAISwift/ServerSentEventsHandler.swift b/Sources/OpenAISwift/ServerSentEventsHandler.swift index e3f92db..6ccb3f0 100644 --- a/Sources/OpenAISwift/ServerSentEventsHandler.swift +++ b/Sources/OpenAISwift/ServerSentEventsHandler.swift @@ -6,17 +6,25 @@ // import Foundation +#if canImport(FoundationNetworking) && canImport(FoundationXML) +import FoundationNetworking +import FoundationXML +#endif class ServerSentEventsHandler: NSObject { var onEventReceived: ((Result, OpenAIError>) -> Void)? var onComplete: (() -> Void)? - private lazy var session: URLSession = URLSession(configuration: .default, delegate: self, delegateQueue: nil) +// private lazy var session: URLSession = URLSession(configuration: .default, delegate: self, delegateQueue: nil) + private var session: URLSession? private var task: URLSessionDataTask? func connect(with request: URLRequest) { - task = session.dataTask(with: request) + var config = URLSessionConfiguration() + config.timeoutIntervalForRequest = 300 + session = URLSession(configuration: config, delegate: self, delegateQueue: nil) + task = session?.dataTask(with: request) task?.resume() }