LLM to TTS
Integration Examples
End-to-end examples piping popular LLM providers into ShunyaLabs TTS with sentence buffering.
OpenAI + ShunyaLabs
Stream GPT-4o tokens and flush complete sentences to ShunyaLabs TTS in real time.
OpenAI + ShunyaLabs
from openai import AsyncOpenAI
from shunyalabs import AsyncShunyaClient
from shunyalabs.tts import TTSConfig
async def gpt_to_tts(user_message: str):
oai = AsyncOpenAI()
shunya = AsyncShunyaClient()
config = TTSConfig(model="zero-indic", voice="Sunita", response_format="pcm")
buffer = ""
chunks = []
stream = await oai.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": user_message}],
stream=True,
)
async for chunk in stream:
token = chunk.choices[0].delta.content or ""
buffer += token
if token in (".", "!", "?", ";") and len(buffer) > 15:
async for audio in await shunya.tts.stream(buffer, config=config):
chunks.append(audio)
buffer = ""
if buffer.strip():
async for audio in await shunya.tts.stream(buffer, config=config):
chunks.append(audio)Anthropic Claude + ShunyaLabs
Stream Claude tokens and flush complete sentences to ShunyaLabs TTS in real time.
Anthropic Claude + ShunyaLabs
import anthropic
from shunyalabs import AsyncShunyaClient
from shunyalabs.tts import TTSConfig
async def claude_to_tts(user_message: str):
claude = anthropic.AsyncAnthropic()
shunya = AsyncShunyaClient()
config = TTSConfig(model="zero-indic", voice="Rajesh", response_format="pcm")
buffer = ""
chunks = []
async with claude.messages.stream(
model="claude-sonnet-4-6",
max_tokens=1024,
messages=[{"role": "user", "content": user_message}],
) as stream:
async for text in stream.text_stream:
buffer += text
if text in (".", "!", "?", ";") and len(buffer) > 15:
async for audio in await shunya.tts.stream(buffer, config=config):
chunks.append(audio)
buffer = ""
if buffer.strip():
async for audio in await shunya.tts.stream(buffer, config=config):
chunks.append(audio)