Skip to content

Commit 0a32345

Browse files
authored
Fix docstrings after #1811 Blingfire default tokenizer switch (#3812)
1 parent 2929fa7 commit 0a32345

File tree

8 files changed

+7
-8
lines changed
  • livekit-plugins
    • livekit-plugins-cartesia/livekit/plugins/cartesia
    • livekit-plugins-elevenlabs/livekit/plugins/elevenlabs
    • livekit-plugins-google/livekit/plugins/google
    • livekit-plugins-minimax/livekit/plugins/minimax
    • livekit-plugins-resemble/livekit/plugins/resemble
    • livekit-plugins-smallestai/livekit/plugins/smallestai
    • livekit-plugins-speechmatics/livekit/plugins/speechmatics
    • livekit-plugins-upliftai/livekit/plugins/upliftai

8 files changed

+7
-8
lines changed

livekit-plugins/livekit-plugins-cartesia/livekit/plugins/cartesia/tts.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ def __init__(
118118
word_timestamps (bool, optional): Whether to add word timestamps to the output. Defaults to True.
119119
api_key (str, optional): The Cartesia API key. If not provided, it will be read from the CARTESIA_API_KEY environment variable.
120120
http_session (aiohttp.ClientSession | None, optional): An existing aiohttp ClientSession to use. If not provided, a new session will be created.
121-
tokenizer (tokenize.SentenceTokenizer, optional): The tokenizer to use. Defaults to tokenize.basic.SentenceTokenizer(min_sentence_len=BUFFERED_WORDS_COUNT).
121+
tokenizer (tokenize.SentenceTokenizer, optional): The tokenizer to use. Defaults to `livekit.agents.tokenize.blingfire.SentenceTokenizer`.
122122
text_pacing (tts.SentenceStreamPacer | bool, optional): Stream pacer for the TTS. Set to True to use the default pacer, False to disable.
123123
base_url (str, optional): The base URL for the Cartesia API. Defaults to "https://api.cartesia.ai".
124124
""" # noqa: E501

livekit-plugins/livekit-plugins-elevenlabs/livekit/plugins/elevenlabs/tts.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ def __init__(
110110
streaming_latency (NotGivenOr[int]): Optimize for streaming latency, defaults to 0 - disabled. 4 for max latency optimizations. deprecated
111111
inactivity_timeout (int): Inactivity timeout in seconds for the websocket connection. Defaults to 300.
112112
auto_mode (bool): Reduces latency by disabling chunk schedule and buffers. Sentence tokenizer will be used to synthesize one sentence at a time. Defaults to True.
113-
word_tokenizer (NotGivenOr[tokenize.WordTokenizer | tokenize.SentenceTokenizer]): Tokenizer for processing text. Defaults to basic WordTokenizer.
113+
word_tokenizer (NotGivenOr[tokenize.WordTokenizer | tokenize.SentenceTokenizer]): Tokenizer for processing text. Defaults to basic WordTokenizer when auto_mode=False, `livekit.agents.tokenize.blingfire.SentenceTokenizer` otherwise.
114114
enable_ssml_parsing (bool): Enable SSML parsing for input text. Defaults to False.
115115
enable_logging (bool): Enable logging of the request. When set to false, zero retention mode will be used. Defaults to True.
116116
chunk_length_schedule (NotGivenOr[list[int]]): Schedule for chunk lengths, ranging from 50 to 500. Defaults are [120, 160, 250, 290].

livekit-plugins/livekit-plugins-google/livekit/plugins/google/tts.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ def __init__(
9898
volume_gain_db (float, optional): Volume gain in decibels. Default is 0.0. In the range [-96.0, 16.0]. Strongly recommended not to exceed +10 (dB).
9999
credentials_info (dict, optional): Dictionary containing Google Cloud credentials. Default is None.
100100
credentials_file (str, optional): Path to the Google Cloud credentials JSON file. Default is None.
101-
tokenizer (tokenize.SentenceTokenizer, optional): Tokenizer for the TTS. Default is a basic sentence tokenizer.
101+
tokenizer (tokenize.SentenceTokenizer, optional): Tokenizer for the TTS. Defaults to `livekit.agents.tokenize.blingfire.SentenceTokenizer`.
102102
custom_pronunciations (CustomPronunciations, optional): Custom pronunciations for the TTS. Default is None.
103103
use_streaming (bool, optional): Whether to use streaming synthesis. Default is True.
104104
enable_ssml (bool, optional): Whether to enable SSML support. Default is False.

livekit-plugins/livekit-plugins-minimax/livekit/plugins/minimax/tts.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ def __init__(
163163
timbre (int | None, optional): Corresponds to the "Nasal/Crisp" slider on the official page. Range: [-100, 100].
164164
sample_rate (TTSSampleRate, optional): The audio sample rate in Hz. Defaults to 24000.
165165
bitrate (TTSBitRate, optional): The audio bitrate in kbps. Defaults to 128000.
166-
tokenizer (NotGivenOr[tokenize.SentenceTokenizer], optional): The sentence tokenizer to use. Defaults to NOT_GIVEN.
166+
tokenizer (NotGivenOr[tokenize.SentenceTokenizer], optional): The sentence tokenizer to use. Defaults to `livekit.agents.tokenize.basic.SentenceTokenizer`.
167167
text_pacing (tts.SentenceStreamPacer | bool, optional): Enable text pacing for sentence-level timing control. Defaults to False.
168168
api_key (str | None, optional): The Minimax API key. Defaults to None.
169169
base_url (NotGivenOr[str], optional): The base URL for the Minimax API. Defaults to NOT_GIVEN.

livekit-plugins/livekit-plugins-resemble/livekit/plugins/resemble/tts.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def __init__(
7070
sample_rate (int, optional): The audio sample rate in Hz. Defaults to 44100.
7171
api_key (str | None, optional): The Resemble API key. If not provided, it will be read from the RESEMBLE_API_KEY environment variable.
7272
http_session (aiohttp.ClientSession | None, optional): An existing aiohttp ClientSession to use. If not provided, a new session will be created.
73-
tokenizer (tokenize.SentenceTokenizer, optional): The tokenizer to use. Defaults to tokenize.SentenceTokenizer().
73+
tokenizer (tokenize.SentenceTokenizer, optional): The tokenizer to use. Defaults to `livekit.agents.tokenize.blingfire.SentenceTokenizer`.
7474
use_streaming (bool, optional): Whether to use streaming or not. Defaults to True.
7575
""" # noqa: E501
7676
super().__init__(

livekit-plugins/livekit-plugins-smallestai/livekit/plugins/smallestai/tts.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,6 @@ def __init__(
8686
output_format: Output format of the audio.
8787
base_url: Base URL for the Smallest AI API.
8888
http_session: An existing aiohttp ClientSession to use.
89-
tokenizer: The tokenizer to use for streaming.
9089
"""
9190

9291
super().__init__(

livekit-plugins/livekit-plugins-speechmatics/livekit/plugins/speechmatics/tts.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ def __init__(
5757
sample_rate (int): Sample rate of audio. Defaults to 16000.
5858
api_key (str): Speechmatics API key. If not provided, will look for SPEECHMATICS_API_KEY in environment.
5959
base_url (str): Base URL for Speechmatics TTS API. Defaults to "https://preview.tts.speechmatics.com"
60-
word_tokenizer (tokenize.WordTokenizer): Tokenizer for processing text. Defaults to basic WordTokenizer.
60+
word_tokenizer (tokenize.WordTokenizer): Tokenizer for processing text. Defaults to `livekit.agents.tokenize.basic.WordTokenizer`.
6161
http_session (aiohttp.ClientSession): Optional aiohttp session to use for requests.
6262
"""
6363
super().__init__(

livekit-plugins/livekit-plugins-upliftai/livekit/plugins/upliftai/tts.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ def __init__(
119119
- 'ULAW_8000_8': μ-law format, 8kHz, 8-bit
120120
sample_rate: Sample rate for audio output. Defaults to 22050
121121
num_channels: Number of audio channels. Defaults to 1 (mono)
122-
word_tokenizer: Tokenizer for processing text
122+
word_tokenizer: Tokenizer for processing text. Defaults to `livekit.agents.tokenize.basic.WordTokenizer`.
123123
"""
124124
super().__init__(
125125
capabilities=tts.TTSCapabilities(

0 commit comments

Comments
 (0)