diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AIOpenAIModelFactory.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AIOpenAIModelFactory.cs
index ec7a7c0333db..1b50613f2f02 100644
--- a/sdk/openai/Azure.AI.OpenAI/src/Generated/AIOpenAIModelFactory.cs
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AIOpenAIModelFactory.cs
@@ -7,6 +7,7 @@
using System;
using System.Collections.Generic;
+using System.IO;
using System.Linq;
namespace Azure.AI.OpenAI
@@ -37,7 +38,7 @@ public static partial class AIOpenAIModelFactory
///
/// The model to use for this transcription request.
/// A new instance for mocking.
- public static AudioTranscriptionOptions AudioTranscriptionOptions(BinaryData audioData = null, string filename = null, AudioTranscriptionFormat? responseFormat = null, string language = null, string prompt = null, float? temperature = null, string deploymentName = null)
+ public static AudioTranscriptionOptions AudioTranscriptionOptions(Stream audioData = null, string filename = null, AudioTranscriptionFormat? responseFormat = null, string language = null, string prompt = null, float? temperature = null, string deploymentName = null)
{
return new AudioTranscriptionOptions(
audioData,
@@ -127,7 +128,7 @@ public static AudioTranscriptionSegment AudioTranscriptionSegment(int id = defau
///
/// The model to use for this translation request.
/// A new instance for mocking.
- public static AudioTranslationOptions AudioTranslationOptions(BinaryData audioData = null, string filename = null, AudioTranslationFormat? responseFormat = null, string prompt = null, float? temperature = null, string deploymentName = null)
+ public static AudioTranslationOptions AudioTranslationOptions(Stream audioData = null, string filename = null, AudioTranslationFormat? responseFormat = null, string prompt = null, float? temperature = null, string deploymentName = null)
{
return new AudioTranslationOptions(
audioData,
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionOptions.Serialization.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionOptions.Serialization.cs
index 6612917f582f..80021cce7a89 100644
--- a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionOptions.Serialization.cs
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionOptions.Serialization.cs
@@ -5,212 +5,38 @@
#nullable disable
-using System;
-using System.ClientModel.Primitives;
-using System.Collections.Generic;
-using System.Text.Json;
-using Azure.Core;
-
namespace Azure.AI.OpenAI
{
- public partial class AudioTranscriptionOptions : IUtf8JsonSerializable, IJsonModel
+ public partial class AudioTranscriptionOptions
{
- void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W"));
-
- void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options)
+ /// Convert into a MultipartFormDataRequestContent.
+ internal virtual MultipartFormDataRequestContent ToMultipartContent()
{
- var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
- if (format != "J")
- {
- throw new FormatException($"The model {nameof(AudioTranscriptionOptions)} does not support writing '{format}' format.");
- }
+ MultipartFormDataRequestContent content = new();
- writer.WriteStartObject();
- writer.WritePropertyName("file"u8);
- writer.WriteBase64StringValue(AudioData.ToArray(), "D");
- if (Optional.IsDefined(Filename))
- {
- writer.WritePropertyName("filename"u8);
- writer.WriteStringValue(Filename);
- }
- if (Optional.IsDefined(ResponseFormat))
- {
- writer.WritePropertyName("response_format"u8);
- writer.WriteStringValue(ResponseFormat.Value.ToString());
- }
- if (Optional.IsDefined(Language))
- {
- writer.WritePropertyName("language"u8);
- writer.WriteStringValue(Language);
- }
- if (Optional.IsDefined(Prompt))
- {
- writer.WritePropertyName("prompt"u8);
- writer.WriteStringValue(Prompt);
- }
- if (Optional.IsDefined(Temperature))
- {
- writer.WritePropertyName("temperature"u8);
- writer.WriteNumberValue(Temperature.Value);
- }
- if (Optional.IsDefined(DeploymentName))
- {
- writer.WritePropertyName("model"u8);
- writer.WriteStringValue(DeploymentName);
- }
- if (options.Format != "W" && _serializedAdditionalRawData != null)
- {
- foreach (var item in _serializedAdditionalRawData)
- {
- writer.WritePropertyName(item.Key);
-#if NET6_0_OR_GREATER
- writer.WriteRawValue(item.Value);
-#else
- using (JsonDocument document = JsonDocument.Parse(item.Value))
- {
- JsonSerializer.Serialize(writer, document.RootElement);
- }
-#endif
- }
- }
- writer.WriteEndObject();
- }
+ content.Add(AudioData, "file", Filename);
+ content.Add(DeploymentName.ToString(), "model");
- AudioTranscriptionOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options)
- {
- var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
- if (format != "J")
+ if (Language is not null)
{
- throw new FormatException($"The model {nameof(AudioTranscriptionOptions)} does not support reading '{format}' format.");
+ content.Add(Language, "language");
}
- using JsonDocument document = JsonDocument.ParseValue(ref reader);
- return DeserializeAudioTranscriptionOptions(document.RootElement, options);
- }
-
- internal static AudioTranscriptionOptions DeserializeAudioTranscriptionOptions(JsonElement element, ModelReaderWriterOptions options = null)
- {
- options ??= new ModelReaderWriterOptions("W");
-
- if (element.ValueKind == JsonValueKind.Null)
+ if (Prompt is not null)
{
- return null;
+ content.Add(Prompt, "prompt");
}
- BinaryData file = default;
- string filename = default;
- AudioTranscriptionFormat? responseFormat = default;
- string language = default;
- string prompt = default;
- float? temperature = default;
- string model = default;
- IDictionary serializedAdditionalRawData = default;
- Dictionary rawDataDictionary = new Dictionary();
- foreach (var property in element.EnumerateObject())
- {
- if (property.NameEquals("file"u8))
- {
- file = BinaryData.FromBytes(property.Value.GetBytesFromBase64("D"));
- continue;
- }
- if (property.NameEquals("filename"u8))
- {
- filename = property.Value.GetString();
- continue;
- }
- if (property.NameEquals("response_format"u8))
- {
- if (property.Value.ValueKind == JsonValueKind.Null)
- {
- continue;
- }
- responseFormat = new AudioTranscriptionFormat(property.Value.GetString());
- continue;
- }
- if (property.NameEquals("language"u8))
- {
- language = property.Value.GetString();
- continue;
- }
- if (property.NameEquals("prompt"u8))
- {
- prompt = property.Value.GetString();
- continue;
- }
- if (property.NameEquals("temperature"u8))
- {
- if (property.Value.ValueKind == JsonValueKind.Null)
- {
- continue;
- }
- temperature = property.Value.GetSingle();
- continue;
- }
- if (property.NameEquals("model"u8))
- {
- model = property.Value.GetString();
- continue;
- }
- if (options.Format != "W")
- {
- rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText()));
- }
- }
- serializedAdditionalRawData = rawDataDictionary;
- return new AudioTranscriptionOptions(
- file,
- filename,
- responseFormat,
- language,
- prompt,
- temperature,
- model,
- serializedAdditionalRawData);
- }
-
- BinaryData IPersistableModel.Write(ModelReaderWriterOptions options)
- {
- var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
- switch (format)
+ if (ResponseFormat is not null)
{
- case "J":
- return ModelReaderWriter.Write(this, options);
- default:
- throw new FormatException($"The model {nameof(AudioTranscriptionOptions)} does not support writing '{options.Format}' format.");
+ content.Add(ResponseFormat.ToString(), "response_format");
}
- }
-
- AudioTranscriptionOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options)
- {
- var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
- switch (format)
+ if (Temperature is not null)
{
- case "J":
- {
- using JsonDocument document = JsonDocument.Parse(data);
- return DeserializeAudioTranscriptionOptions(document.RootElement, options);
- }
- default:
- throw new FormatException($"The model {nameof(AudioTranscriptionOptions)} does not support reading '{options.Format}' format.");
+ content.Add(Temperature.Value, "temperature");
}
- }
-
- string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J";
- /// Deserializes the model from a raw response.
- /// The response to deserialize the model from.
- internal static AudioTranscriptionOptions FromResponse(Response response)
- {
- using var document = JsonDocument.Parse(response.Content);
- return DeserializeAudioTranscriptionOptions(document.RootElement);
- }
-
- /// Convert into a Utf8JsonRequestContent.
- internal virtual RequestContent ToRequestContent()
- {
- var content = new Utf8JsonRequestContent();
- content.JsonWriter.WriteObjectValue(this, new ModelReaderWriterOptions("W"));
return content;
}
}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionOptions.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionOptions.cs
index d8714e3bbf36..42040cd9977b 100644
--- a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionOptions.cs
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranscriptionOptions.cs
@@ -7,6 +7,7 @@
using System;
using System.Collections.Generic;
+using System.IO;
namespace Azure.AI.OpenAI
{
@@ -51,7 +52,7 @@ public partial class AudioTranscriptionOptions
/// flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.
///
/// is null.
- public AudioTranscriptionOptions(BinaryData audioData)
+ public AudioTranscriptionOptions(Stream audioData)
{
Argument.AssertNotNull(audioData, nameof(audioData));
@@ -81,7 +82,7 @@ public AudioTranscriptionOptions(BinaryData audioData)
///
/// The model to use for this transcription request.
/// Keeps track of any properties unknown to the library.
- internal AudioTranscriptionOptions(BinaryData audioData, string filename, AudioTranscriptionFormat? responseFormat, string language, string prompt, float? temperature, string deploymentName, IDictionary serializedAdditionalRawData)
+ internal AudioTranscriptionOptions(Stream audioData, string filename, AudioTranscriptionFormat? responseFormat, string language, string prompt, float? temperature, string deploymentName, IDictionary serializedAdditionalRawData)
{
AudioData = audioData;
Filename = filename;
@@ -115,7 +116,7 @@ internal AudioTranscriptionOptions()
///
///
///
- public BinaryData AudioData { get; }
+ public Stream AudioData { get; }
/// The optional filename or descriptive identifier to associate with with the audio data.
public string Filename { get; set; }
/// The requested format of the transcription response data, which will influence the content and detail of the result.
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranslationOptions.Serialization.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranslationOptions.Serialization.cs
index 0c94ca95de60..633940ff7654 100644
--- a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranslationOptions.Serialization.cs
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranslationOptions.Serialization.cs
@@ -5,200 +5,33 @@
#nullable disable
-using System;
-using System.ClientModel.Primitives;
-using System.Collections.Generic;
-using System.Text.Json;
-using Azure.Core;
-
namespace Azure.AI.OpenAI
{
- public partial class AudioTranslationOptions : IUtf8JsonSerializable, IJsonModel
+ public partial class AudioTranslationOptions
{
- void IUtf8JsonSerializable.Write(Utf8JsonWriter writer) => ((IJsonModel)this).Write(writer, new ModelReaderWriterOptions("W"));
-
- void IJsonModel.Write(Utf8JsonWriter writer, ModelReaderWriterOptions options)
+ /// Convert into a MultipartFormDataRequestContent.
+ internal virtual MultipartFormDataRequestContent ToMultipartContent()
{
- var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
- if (format != "J")
- {
- throw new FormatException($"The model {nameof(AudioTranslationOptions)} does not support writing '{format}' format.");
- }
+ MultipartFormDataRequestContent content = new();
- writer.WriteStartObject();
- writer.WritePropertyName("file"u8);
- writer.WriteBase64StringValue(AudioData.ToArray(), "D");
- if (Optional.IsDefined(Filename))
- {
- writer.WritePropertyName("filename"u8);
- writer.WriteStringValue(Filename);
- }
- if (Optional.IsDefined(ResponseFormat))
- {
- writer.WritePropertyName("response_format"u8);
- writer.WriteStringValue(ResponseFormat.Value.ToString());
- }
- if (Optional.IsDefined(Prompt))
- {
- writer.WritePropertyName("prompt"u8);
- writer.WriteStringValue(Prompt);
- }
- if (Optional.IsDefined(Temperature))
- {
- writer.WritePropertyName("temperature"u8);
- writer.WriteNumberValue(Temperature.Value);
- }
- if (Optional.IsDefined(DeploymentName))
- {
- writer.WritePropertyName("model"u8);
- writer.WriteStringValue(DeploymentName);
- }
- if (options.Format != "W" && _serializedAdditionalRawData != null)
- {
- foreach (var item in _serializedAdditionalRawData)
- {
- writer.WritePropertyName(item.Key);
-#if NET6_0_OR_GREATER
- writer.WriteRawValue(item.Value);
-#else
- using (JsonDocument document = JsonDocument.Parse(item.Value))
- {
- JsonSerializer.Serialize(writer, document.RootElement);
- }
-#endif
- }
- }
- writer.WriteEndObject();
- }
+ content.Add(AudioData, "file", Filename);
+ content.Add(DeploymentName.ToString(), "model");
- AudioTranslationOptions IJsonModel.Create(ref Utf8JsonReader reader, ModelReaderWriterOptions options)
- {
- var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
- if (format != "J")
+ if (Prompt is not null)
{
- throw new FormatException($"The model {nameof(AudioTranslationOptions)} does not support reading '{format}' format.");
+ content.Add(Prompt, "prompt");
}
- using JsonDocument document = JsonDocument.ParseValue(ref reader);
- return DeserializeAudioTranslationOptions(document.RootElement, options);
- }
-
- internal static AudioTranslationOptions DeserializeAudioTranslationOptions(JsonElement element, ModelReaderWriterOptions options = null)
- {
- options ??= new ModelReaderWriterOptions("W");
-
- if (element.ValueKind == JsonValueKind.Null)
+ if (ResponseFormat is not null)
{
- return null;
+ content.Add(ResponseFormat.ToString(), "response_format");
}
- BinaryData file = default;
- string filename = default;
- AudioTranslationFormat? responseFormat = default;
- string prompt = default;
- float? temperature = default;
- string model = default;
- IDictionary serializedAdditionalRawData = default;
- Dictionary rawDataDictionary = new Dictionary();
- foreach (var property in element.EnumerateObject())
- {
- if (property.NameEquals("file"u8))
- {
- file = BinaryData.FromBytes(property.Value.GetBytesFromBase64("D"));
- continue;
- }
- if (property.NameEquals("filename"u8))
- {
- filename = property.Value.GetString();
- continue;
- }
- if (property.NameEquals("response_format"u8))
- {
- if (property.Value.ValueKind == JsonValueKind.Null)
- {
- continue;
- }
- responseFormat = new AudioTranslationFormat(property.Value.GetString());
- continue;
- }
- if (property.NameEquals("prompt"u8))
- {
- prompt = property.Value.GetString();
- continue;
- }
- if (property.NameEquals("temperature"u8))
- {
- if (property.Value.ValueKind == JsonValueKind.Null)
- {
- continue;
- }
- temperature = property.Value.GetSingle();
- continue;
- }
- if (property.NameEquals("model"u8))
- {
- model = property.Value.GetString();
- continue;
- }
- if (options.Format != "W")
- {
- rawDataDictionary.Add(property.Name, BinaryData.FromString(property.Value.GetRawText()));
- }
- }
- serializedAdditionalRawData = rawDataDictionary;
- return new AudioTranslationOptions(
- file,
- filename,
- responseFormat,
- prompt,
- temperature,
- model,
- serializedAdditionalRawData);
- }
- BinaryData IPersistableModel.Write(ModelReaderWriterOptions options)
- {
- var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
-
- switch (format)
+ if (Temperature is not null)
{
- case "J":
- return ModelReaderWriter.Write(this, options);
- default:
- throw new FormatException($"The model {nameof(AudioTranslationOptions)} does not support writing '{options.Format}' format.");
+ content.Add(Temperature.Value, "temperature");
}
- }
-
- AudioTranslationOptions IPersistableModel.Create(BinaryData data, ModelReaderWriterOptions options)
- {
- var format = options.Format == "W" ? ((IPersistableModel)this).GetFormatFromOptions(options) : options.Format;
- switch (format)
- {
- case "J":
- {
- using JsonDocument document = JsonDocument.Parse(data);
- return DeserializeAudioTranslationOptions(document.RootElement, options);
- }
- default:
- throw new FormatException($"The model {nameof(AudioTranslationOptions)} does not support reading '{options.Format}' format.");
- }
- }
-
- string IPersistableModel.GetFormatFromOptions(ModelReaderWriterOptions options) => "J";
-
- /// Deserializes the model from a raw response.
- /// The response to deserialize the model from.
- internal static AudioTranslationOptions FromResponse(Response response)
- {
- using var document = JsonDocument.Parse(response.Content);
- return DeserializeAudioTranslationOptions(document.RootElement);
- }
-
- /// Convert into a Utf8JsonRequestContent.
- internal virtual RequestContent ToRequestContent()
- {
- var content = new Utf8JsonRequestContent();
- content.JsonWriter.WriteObjectValue(this, new ModelReaderWriterOptions("W"));
return content;
}
}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranslationOptions.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranslationOptions.cs
index 3588dee18ecd..52d2a07fd42e 100644
--- a/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranslationOptions.cs
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/AudioTranslationOptions.cs
@@ -7,6 +7,7 @@
using System;
using System.Collections.Generic;
+using System.IO;
namespace Azure.AI.OpenAI
{
@@ -51,7 +52,7 @@ public partial class AudioTranslationOptions
/// flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm.
///
/// is null.
- public AudioTranslationOptions(BinaryData audioData)
+ public AudioTranslationOptions(Stream audioData)
{
Argument.AssertNotNull(audioData, nameof(audioData));
@@ -76,7 +77,7 @@ public AudioTranslationOptions(BinaryData audioData)
///
/// The model to use for this translation request.
/// Keeps track of any properties unknown to the library.
- internal AudioTranslationOptions(BinaryData audioData, string filename, AudioTranslationFormat? responseFormat, string prompt, float? temperature, string deploymentName, IDictionary serializedAdditionalRawData)
+ internal AudioTranslationOptions(Stream audioData, string filename, AudioTranslationFormat? responseFormat, string prompt, float? temperature, string deploymentName, IDictionary serializedAdditionalRawData)
{
AudioData = audioData;
Filename = filename;
@@ -109,7 +110,7 @@ internal AudioTranslationOptions()
///
///
///
- public BinaryData AudioData { get; }
+ public Stream AudioData { get; }
/// The optional filename or descriptive identifier to associate with with the audio data.
public string Filename { get; set; }
/// The requested format of the translation response data, which will influence the content and detail of the result.
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/MultipartFormDataRequestContent.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/MultipartFormDataRequestContent.cs
new file mode 100644
index 000000000000..f0d8f23bda03
--- /dev/null
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/Internal/MultipartFormDataRequestContent.cs
@@ -0,0 +1,180 @@
+//
+
+#nullable enable
+
+using System;
+using System.Diagnostics;
+using System.Globalization;
+using System.IO;
+using System.Net.Http;
+using System.Net.Http.Headers;
+using System.Threading;
+using System.Threading.Tasks;
+using Azure.Core;
+using Azure.Core.Pipeline;
+using MultipartFormDataContent = System.Net.Http.MultipartFormDataContent;
+
+namespace Azure.AI.OpenAI;
+
+internal class MultipartFormDataRequestContent : RequestContent
+{
+ private readonly MultipartFormDataContent _multipartContent;
+
+ private static readonly Random _random = new();
+ private static readonly char[] _boundaryValues = "0123456789=ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz".ToCharArray();
+
+ public MultipartFormDataRequestContent()
+ {
+ _multipartContent = new MultipartFormDataContent(CreateBoundary());
+ }
+
+ public string ContentType
+ {
+ get
+ {
+ Debug.Assert(_multipartContent.Headers.ContentType is not null);
+
+ return _multipartContent.Headers.ContentType!.ToString();
+ }
+ }
+
+ internal HttpContent HttpContent => _multipartContent;
+
+ public void Add(Stream stream, string name, string? filename = default)
+ {
+ Argument.AssertNotNull(stream, nameof(stream));
+ Argument.AssertNotNullOrEmpty(name, nameof(name));
+
+ Add(new StreamContent(stream), name, filename);
+ }
+
+ public void Add(string content, string name, string? filename = default)
+ {
+ Argument.AssertNotNull(content, nameof(content));
+ Argument.AssertNotNullOrEmpty(name, nameof(name));
+
+ Add(new StringContent(content), name, filename);
+ }
+
+ public void Add(int content, string name, string? filename = default)
+ {
+ Argument.AssertNotNullOrEmpty(name, nameof(name));
+
+ // https://learn.microsoft.com/en-us/dotnet/standard/base-types/standard-numeric-format-strings#GFormatString
+ string value = content.ToString("G", CultureInfo.InvariantCulture);
+ Add(new StringContent(value), name, filename);
+ }
+
+ public void Add(double content, string name, string? filename = default)
+ {
+ Argument.AssertNotNullOrEmpty(name, nameof(name));
+
+ // https://learn.microsoft.com/en-us/dotnet/standard/base-types/standard-numeric-format-strings#GFormatString
+ string value = content.ToString("G", CultureInfo.InvariantCulture);
+ Add(new StringContent(value), name, filename);
+ }
+
+ public void Add(byte[] content, string name, string? filename = default)
+ {
+ Argument.AssertNotNull(content, nameof(content));
+ Argument.AssertNotNullOrEmpty(name, nameof(name));
+
+ Add(new ByteArrayContent(content), name, filename);
+ }
+
+ public void Add(BinaryData content, string name, string? filename = default)
+ {
+ Argument.AssertNotNull(content, nameof(content));
+ Argument.AssertNotNullOrEmpty(name, nameof(name));
+
+ Add(new ByteArrayContent(content.ToArray()), name, filename);
+ }
+
+ private void Add(HttpContent content, string name, string? filename)
+ {
+ if (filename is not null)
+ {
+ Argument.AssertNotNullOrEmpty(filename, nameof(filename));
+
+ AddFilenameHeader(content, name, filename);
+ }
+
+ _multipartContent.Add(content, name);
+ }
+
+ private static void AddFilenameHeader(HttpContent content, string name, string filename)
+ {
+ // Add the content header manually because the default implementation
+ // adds a `filename*` parameter to the header, which RFC 7578 says not
+ // to do. We are following up with the BCL team per correctness.
+ ContentDispositionHeaderValue header = new("form-data")
+ {
+ Name = name,
+ FileName = filename
+ };
+
+ content.Headers.ContentDisposition = header;
+ }
+
+ private static string CreateBoundary()
+ {
+ Span chars = new char[70];
+
+ byte[] random = new byte[70];
+ _random.NextBytes(random);
+
+ // The following will sample evenly from the possible values.
+ // This is important to ensuring that the odds of creating a boundary
+ // that occurs in any content part are astronomically small.
+ int mask = 255 >> 2;
+
+ Debug.Assert(_boundaryValues.Length - 1 == mask);
+
+ for (int i = 0; i < 70; i++)
+ {
+ chars[i] = _boundaryValues[random[i] & mask];
+ }
+
+ return chars.ToString();
+ }
+
+ public override bool TryComputeLength(out long length)
+ {
+ // We can't call the protected TryComputeLength method on HttpContent,
+ // but if the Content-Length header is available, we can use that.
+
+ if (_multipartContent.Headers.ContentLength is long contentLength)
+ {
+ length = contentLength;
+ return true;
+ }
+
+ length = 0;
+ return false;
+ }
+
+ public override void WriteTo(Stream stream, CancellationToken cancellationToken = default)
+ {
+#if NET6_0_OR_GREATER
+ _multipartContent.CopyTo(stream, default, cancellationToken);
+#else
+#pragma warning disable AZC0107
+ _multipartContent.CopyToAsync(stream).EnsureCompleted();
+#pragma warning restore AZC0107
+#endif
+ }
+
+ public override async Task WriteToAsync(Stream stream, CancellationToken cancellationToken = default)
+ {
+#if NET6_0_OR_GREATER
+ await _multipartContent.CopyToAsync(stream, cancellationToken).ConfigureAwait(false);
+#else
+ await _multipartContent.CopyToAsync(stream).ConfigureAwait(false);
+#endif
+ }
+
+ public override void Dispose()
+ {
+ _multipartContent.Dispose();
+ }
+}
diff --git a/sdk/openai/Azure.AI.OpenAI/src/Generated/OpenAIClient.cs b/sdk/openai/Azure.AI.OpenAI/src/Generated/OpenAIClient.cs
index 0300f9d48f79..7a8223d9b74e 100644
--- a/sdk/openai/Azure.AI.OpenAI/src/Generated/OpenAIClient.cs
+++ b/sdk/openai/Azure.AI.OpenAI/src/Generated/OpenAIClient.cs
@@ -115,8 +115,8 @@ public virtual async Task> GetAudioTranscriptionAsPlainTextAsyn
Argument.AssertNotNull(audioTranscriptionOptions, nameof(audioTranscriptionOptions));
RequestContext context = FromCancellationToken(cancellationToken);
- using RequestContent content = audioTranscriptionOptions.ToRequestContent();
- Response response = await GetAudioTranscriptionAsPlainTextAsync(deploymentId, content, context).ConfigureAwait(false);
+ using MultipartFormDataRequestContent content = audioTranscriptionOptions.ToMultipartContent();
+ Response response = await GetAudioTranscriptionAsPlainTextAsync(deploymentId, content, content.ContentType, context).ConfigureAwait(false);
return Response.FromValue(response.Content.ToString(), response);
}
@@ -135,8 +135,8 @@ public virtual Response GetAudioTranscriptionAsPlainText(string deployme
Argument.AssertNotNull(audioTranscriptionOptions, nameof(audioTranscriptionOptions));
RequestContext context = FromCancellationToken(cancellationToken);
- using RequestContent content = audioTranscriptionOptions.ToRequestContent();
- Response response = GetAudioTranscriptionAsPlainText(deploymentId, content, context);
+ using MultipartFormDataRequestContent content = audioTranscriptionOptions.ToMultipartContent();
+ Response response = GetAudioTranscriptionAsPlainText(deploymentId, content, content.ContentType, context);
return Response.FromValue(response.Content.ToString(), response);
}
@@ -153,12 +153,13 @@ public virtual Response GetAudioTranscriptionAsPlainText(string deployme
///
/// Specifies either the model deployment name (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request.
/// The content to send as the body of the request.
+ /// The content type of the request content.
/// The request context, which can override default behaviors of the client pipeline on a per-call basis.
/// or is null.
/// is an empty string, and was expected to be non-empty.
/// Service returned a non-success status code.
/// The response returned from the service.
- internal virtual async Task GetAudioTranscriptionAsPlainTextAsync(string deploymentId, RequestContent content, RequestContext context = null)
+ internal virtual async Task GetAudioTranscriptionAsPlainTextAsync(string deploymentId, RequestContent content, string contentType, RequestContext context = null)
{
Argument.AssertNotNullOrEmpty(deploymentId, nameof(deploymentId));
Argument.AssertNotNull(content, nameof(content));
@@ -167,7 +168,7 @@ internal virtual async Task GetAudioTranscriptionAsPlainTextAsync(stri
scope.Start();
try
{
- using HttpMessage message = CreateGetAudioTranscriptionAsPlainTextRequest(deploymentId, content, context);
+ using HttpMessage message = CreateGetAudioTranscriptionAsPlainTextRequest(deploymentId, content, contentType, context);
return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false);
}
catch (Exception e)
@@ -190,12 +191,13 @@ internal virtual async Task GetAudioTranscriptionAsPlainTextAsync(stri
///
/// Specifies either the model deployment name (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request.
/// The content to send as the body of the request.
+ /// The content type of the request content.
/// The request context, which can override default behaviors of the client pipeline on a per-call basis.
/// or is null.
/// is an empty string, and was expected to be non-empty.
/// Service returned a non-success status code.
/// The response returned from the service.
- internal virtual Response GetAudioTranscriptionAsPlainText(string deploymentId, RequestContent content, RequestContext context = null)
+ internal virtual Response GetAudioTranscriptionAsPlainText(string deploymentId, RequestContent content, string contentType, RequestContext context = null)
{
Argument.AssertNotNullOrEmpty(deploymentId, nameof(deploymentId));
Argument.AssertNotNull(content, nameof(content));
@@ -204,7 +206,7 @@ internal virtual Response GetAudioTranscriptionAsPlainText(string deploymentId,
scope.Start();
try
{
- using HttpMessage message = CreateGetAudioTranscriptionAsPlainTextRequest(deploymentId, content, context);
+ using HttpMessage message = CreateGetAudioTranscriptionAsPlainTextRequest(deploymentId, content, contentType, context);
return _pipeline.ProcessMessage(message, context);
}
catch (Exception e)
@@ -229,8 +231,8 @@ public virtual async Task> GetAudioTranscriptionAsR
Argument.AssertNotNull(audioTranscriptionOptions, nameof(audioTranscriptionOptions));
RequestContext context = FromCancellationToken(cancellationToken);
- using RequestContent content = audioTranscriptionOptions.ToRequestContent();
- Response response = await GetAudioTranscriptionAsResponseObjectAsync(deploymentId, content, context).ConfigureAwait(false);
+ using MultipartFormDataRequestContent content = audioTranscriptionOptions.ToMultipartContent();
+ Response response = await GetAudioTranscriptionAsResponseObjectAsync(deploymentId, content, content.ContentType, context).ConfigureAwait(false);
return Response.FromValue(AudioTranscription.FromResponse(response), response);
}
@@ -249,8 +251,8 @@ public virtual Response GetAudioTranscriptionAsResponseObjec
Argument.AssertNotNull(audioTranscriptionOptions, nameof(audioTranscriptionOptions));
RequestContext context = FromCancellationToken(cancellationToken);
- using RequestContent content = audioTranscriptionOptions.ToRequestContent();
- Response response = GetAudioTranscriptionAsResponseObject(deploymentId, content, context);
+ using MultipartFormDataRequestContent content = audioTranscriptionOptions.ToMultipartContent();
+ Response response = GetAudioTranscriptionAsResponseObject(deploymentId, content, content.ContentType, context);
return Response.FromValue(AudioTranscription.FromResponse(response), response);
}
@@ -267,12 +269,13 @@ public virtual Response GetAudioTranscriptionAsResponseObjec
///
/// Specifies either the model deployment name (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request.
/// The content to send as the body of the request.
+ /// The content type of the request content.
/// The request context, which can override default behaviors of the client pipeline on a per-call basis.
/// or is null.
/// is an empty string, and was expected to be non-empty.
/// Service returned a non-success status code.
/// The response returned from the service.
- internal virtual async Task GetAudioTranscriptionAsResponseObjectAsync(string deploymentId, RequestContent content, RequestContext context = null)
+ internal virtual async Task GetAudioTranscriptionAsResponseObjectAsync(string deploymentId, RequestContent content, string contentType, RequestContext context = null)
{
Argument.AssertNotNullOrEmpty(deploymentId, nameof(deploymentId));
Argument.AssertNotNull(content, nameof(content));
@@ -281,7 +284,7 @@ internal virtual async Task GetAudioTranscriptionAsResponseObjectAsync
scope.Start();
try
{
- using HttpMessage message = CreateGetAudioTranscriptionAsResponseObjectRequest(deploymentId, content, context);
+ using HttpMessage message = CreateGetAudioTranscriptionAsResponseObjectRequest(deploymentId, content, contentType, context);
return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false);
}
catch (Exception e)
@@ -304,12 +307,13 @@ internal virtual async Task GetAudioTranscriptionAsResponseObjectAsync
///
/// Specifies either the model deployment name (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request.
/// The content to send as the body of the request.
+ /// The content type of the request content.
/// The request context, which can override default behaviors of the client pipeline on a per-call basis.
/// or is null.
/// is an empty string, and was expected to be non-empty.
/// Service returned a non-success status code.
/// The response returned from the service.
- internal virtual Response GetAudioTranscriptionAsResponseObject(string deploymentId, RequestContent content, RequestContext context = null)
+ internal virtual Response GetAudioTranscriptionAsResponseObject(string deploymentId, RequestContent content, string contentType, RequestContext context = null)
{
Argument.AssertNotNullOrEmpty(deploymentId, nameof(deploymentId));
Argument.AssertNotNull(content, nameof(content));
@@ -318,7 +322,7 @@ internal virtual Response GetAudioTranscriptionAsResponseObject(string deploymen
scope.Start();
try
{
- using HttpMessage message = CreateGetAudioTranscriptionAsResponseObjectRequest(deploymentId, content, context);
+ using HttpMessage message = CreateGetAudioTranscriptionAsResponseObjectRequest(deploymentId, content, contentType, context);
return _pipeline.ProcessMessage(message, context);
}
catch (Exception e)
@@ -340,8 +344,8 @@ public virtual async Task> GetAudioTranslationAsPlainTextAsync(
Argument.AssertNotNull(audioTranslationOptions, nameof(audioTranslationOptions));
RequestContext context = FromCancellationToken(cancellationToken);
- using RequestContent content = audioTranslationOptions.ToRequestContent();
- Response response = await GetAudioTranslationAsPlainTextAsync(deploymentId, content, context).ConfigureAwait(false);
+ using MultipartFormDataRequestContent content = audioTranslationOptions.ToMultipartContent();
+ Response response = await GetAudioTranslationAsPlainTextAsync(deploymentId, content, content.ContentType, context).ConfigureAwait(false);
return Response.FromValue(response.Content.ToString(), response);
}
@@ -357,8 +361,8 @@ public virtual Response GetAudioTranslationAsPlainText(string deployment
Argument.AssertNotNull(audioTranslationOptions, nameof(audioTranslationOptions));
RequestContext context = FromCancellationToken(cancellationToken);
- using RequestContent content = audioTranslationOptions.ToRequestContent();
- Response response = GetAudioTranslationAsPlainText(deploymentId, content, context);
+ using MultipartFormDataRequestContent content = audioTranslationOptions.ToMultipartContent();
+ Response response = GetAudioTranslationAsPlainText(deploymentId, content, content.ContentType, context);
return Response.FromValue(response.Content.ToString(), response);
}
@@ -374,12 +378,13 @@ public virtual Response GetAudioTranslationAsPlainText(string deployment
///
/// Specifies either the model deployment name (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request.
/// The content to send as the body of the request.
+ /// The content type of the request content.
/// The request context, which can override default behaviors of the client pipeline on a per-call basis.
/// or is null.
/// is an empty string, and was expected to be non-empty.
/// Service returned a non-success status code.
/// The response returned from the service.
- internal virtual async Task GetAudioTranslationAsPlainTextAsync(string deploymentId, RequestContent content, RequestContext context = null)
+ internal virtual async Task GetAudioTranslationAsPlainTextAsync(string deploymentId, RequestContent content, string contentType, RequestContext context = null)
{
Argument.AssertNotNullOrEmpty(deploymentId, nameof(deploymentId));
Argument.AssertNotNull(content, nameof(content));
@@ -388,7 +393,7 @@ internal virtual async Task GetAudioTranslationAsPlainTextAsync(string
scope.Start();
try
{
- using HttpMessage message = CreateGetAudioTranslationAsPlainTextRequest(deploymentId, content, context);
+ using HttpMessage message = CreateGetAudioTranslationAsPlainTextRequest(deploymentId, content, contentType, context);
return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false);
}
catch (Exception e)
@@ -410,12 +415,13 @@ internal virtual async Task GetAudioTranslationAsPlainTextAsync(string
///
/// Specifies either the model deployment name (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request.
/// The content to send as the body of the request.
+ /// The content type of the request content.
/// The request context, which can override default behaviors of the client pipeline on a per-call basis.
/// or is null.
/// is an empty string, and was expected to be non-empty.
/// Service returned a non-success status code.
/// The response returned from the service.
- internal virtual Response GetAudioTranslationAsPlainText(string deploymentId, RequestContent content, RequestContext context = null)
+ internal virtual Response GetAudioTranslationAsPlainText(string deploymentId, RequestContent content, string contentType, RequestContext context = null)
{
Argument.AssertNotNullOrEmpty(deploymentId, nameof(deploymentId));
Argument.AssertNotNull(content, nameof(content));
@@ -424,7 +430,7 @@ internal virtual Response GetAudioTranslationAsPlainText(string deploymentId, Re
scope.Start();
try
{
- using HttpMessage message = CreateGetAudioTranslationAsPlainTextRequest(deploymentId, content, context);
+ using HttpMessage message = CreateGetAudioTranslationAsPlainTextRequest(deploymentId, content, contentType, context);
return _pipeline.ProcessMessage(message, context);
}
catch (Exception e)
@@ -446,8 +452,8 @@ public virtual async Task> GetAudioTranslationAsRespo
Argument.AssertNotNull(audioTranslationOptions, nameof(audioTranslationOptions));
RequestContext context = FromCancellationToken(cancellationToken);
- using RequestContent content = audioTranslationOptions.ToRequestContent();
- Response response = await GetAudioTranslationAsResponseObjectAsync(deploymentId, content, context).ConfigureAwait(false);
+ using MultipartFormDataRequestContent content = audioTranslationOptions.ToMultipartContent();
+ Response response = await GetAudioTranslationAsResponseObjectAsync(deploymentId, content, content.ContentType, context).ConfigureAwait(false);
return Response.FromValue(AudioTranslation.FromResponse(response), response);
}
@@ -463,8 +469,8 @@ public virtual Response GetAudioTranslationAsResponseObject(st
Argument.AssertNotNull(audioTranslationOptions, nameof(audioTranslationOptions));
RequestContext context = FromCancellationToken(cancellationToken);
- using RequestContent content = audioTranslationOptions.ToRequestContent();
- Response response = GetAudioTranslationAsResponseObject(deploymentId, content, context);
+ using MultipartFormDataRequestContent content = audioTranslationOptions.ToMultipartContent();
+ Response response = GetAudioTranslationAsResponseObject(deploymentId, content, content.ContentType, context);
return Response.FromValue(AudioTranslation.FromResponse(response), response);
}
@@ -480,12 +486,13 @@ public virtual Response GetAudioTranslationAsResponseObject(st
///
/// Specifies either the model deployment name (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request.
/// The content to send as the body of the request.
+ /// The content type of the request content.
/// The request context, which can override default behaviors of the client pipeline on a per-call basis.
/// or is null.
/// is an empty string, and was expected to be non-empty.
/// Service returned a non-success status code.
/// The response returned from the service.
- internal virtual async Task GetAudioTranslationAsResponseObjectAsync(string deploymentId, RequestContent content, RequestContext context = null)
+ internal virtual async Task GetAudioTranslationAsResponseObjectAsync(string deploymentId, RequestContent content, string contentType, RequestContext context = null)
{
Argument.AssertNotNullOrEmpty(deploymentId, nameof(deploymentId));
Argument.AssertNotNull(content, nameof(content));
@@ -494,7 +501,7 @@ internal virtual async Task GetAudioTranslationAsResponseObjectAsync(s
scope.Start();
try
{
- using HttpMessage message = CreateGetAudioTranslationAsResponseObjectRequest(deploymentId, content, context);
+ using HttpMessage message = CreateGetAudioTranslationAsResponseObjectRequest(deploymentId, content, contentType, context);
return await _pipeline.ProcessMessageAsync(message, context).ConfigureAwait(false);
}
catch (Exception e)
@@ -516,12 +523,13 @@ internal virtual async Task GetAudioTranslationAsResponseObjectAsync(s
///
/// Specifies either the model deployment name (when using Azure OpenAI) or model name (when using non-Azure OpenAI) to use for this request.
/// The content to send as the body of the request.
+ /// The content type of the request content.
/// The request context, which can override default behaviors of the client pipeline on a per-call basis.
/// or is null.
/// is an empty string, and was expected to be non-empty.
/// Service returned a non-success status code.
/// The response returned from the service.
- internal virtual Response GetAudioTranslationAsResponseObject(string deploymentId, RequestContent content, RequestContext context = null)
+ internal virtual Response GetAudioTranslationAsResponseObject(string deploymentId, RequestContent content, string contentType, RequestContext context = null)
{
Argument.AssertNotNullOrEmpty(deploymentId, nameof(deploymentId));
Argument.AssertNotNull(content, nameof(content));
@@ -530,7 +538,7 @@ internal virtual Response GetAudioTranslationAsResponseObject(string deploymentI
scope.Start();
try
{
- using HttpMessage message = CreateGetAudioTranslationAsResponseObjectRequest(deploymentId, content, context);
+ using HttpMessage message = CreateGetAudioTranslationAsResponseObjectRequest(deploymentId, content, contentType, context);
return _pipeline.ProcessMessage(message, context);
}
catch (Exception e)
@@ -1118,7 +1126,7 @@ internal virtual Response GetEmbeddings(string deploymentId, RequestContent cont
}
}
- internal HttpMessage CreateGetAudioTranscriptionAsPlainTextRequest(string deploymentId, RequestContent content, RequestContext context)
+ internal HttpMessage CreateGetAudioTranscriptionAsPlainTextRequest(string deploymentId, RequestContent content, string contentType, RequestContext context)
{
var message = _pipeline.CreateMessage(context, ResponseClassifier200);
var request = message.Request;
@@ -1132,12 +1140,12 @@ internal HttpMessage CreateGetAudioTranscriptionAsPlainTextRequest(string deploy
uri.AppendQuery("api-version", _apiVersion, true);
request.Uri = uri;
request.Headers.Add("Accept", "text/plain");
- request.Headers.Add("content-type", "multipart/form-data");
+ request.Headers.Add("content-type", contentType);
request.Content = content;
return message;
}
- internal HttpMessage CreateGetAudioTranscriptionAsResponseObjectRequest(string deploymentId, RequestContent content, RequestContext context)
+ internal HttpMessage CreateGetAudioTranscriptionAsResponseObjectRequest(string deploymentId, RequestContent content, string contentType, RequestContext context)
{
var message = _pipeline.CreateMessage(context, ResponseClassifier200);
var request = message.Request;
@@ -1151,12 +1159,12 @@ internal HttpMessage CreateGetAudioTranscriptionAsResponseObjectRequest(string d
uri.AppendQuery("api-version", _apiVersion, true);
request.Uri = uri;
request.Headers.Add("Accept", "application/json");
- request.Headers.Add("content-type", "multipart/form-data");
+ request.Headers.Add("content-type", contentType);
request.Content = content;
return message;
}
- internal HttpMessage CreateGetAudioTranslationAsPlainTextRequest(string deploymentId, RequestContent content, RequestContext context)
+ internal HttpMessage CreateGetAudioTranslationAsPlainTextRequest(string deploymentId, RequestContent content, string contentType, RequestContext context)
{
var message = _pipeline.CreateMessage(context, ResponseClassifier200);
var request = message.Request;
@@ -1170,12 +1178,12 @@ internal HttpMessage CreateGetAudioTranslationAsPlainTextRequest(string deployme
uri.AppendQuery("api-version", _apiVersion, true);
request.Uri = uri;
request.Headers.Add("Accept", "text/plain");
- request.Headers.Add("content-type", "multipart/form-data");
+ request.Headers.Add("content-type", contentType);
request.Content = content;
return message;
}
- internal HttpMessage CreateGetAudioTranslationAsResponseObjectRequest(string deploymentId, RequestContent content, RequestContext context)
+ internal HttpMessage CreateGetAudioTranslationAsResponseObjectRequest(string deploymentId, RequestContent content, string contentType, RequestContext context)
{
var message = _pipeline.CreateMessage(context, ResponseClassifier200);
var request = message.Request;
@@ -1189,7 +1197,7 @@ internal HttpMessage CreateGetAudioTranslationAsResponseObjectRequest(string dep
uri.AppendQuery("api-version", _apiVersion, true);
request.Uri = uri;
request.Headers.Add("Accept", "application/json");
- request.Headers.Add("content-type", "multipart/form-data");
+ request.Headers.Add("content-type", contentType);
request.Content = content;
return message;
}