# SPDX-License-Identifier: Apache-2.0# SPDX-FileCopyrightText: Copyright contributors to the vLLM project"""This script demonstrates how to use the vLLM Realtime WebSocket API to performaudio transcription by uploading an audio file.Before running this script, you must start the vLLM server with a realtime-capablemodel, for example: vllm serve mistralai/Voxtral-Mini-4B-Realtime-2602 --enforce-eagerRequirements:- vllm with audio support- websockets- librosa- numpyThe script:1. Connects to the Realtime WebSocket endpoint2. Converts an audio file to PCM16 @ 16kHz3. Sends audio chunks to the server4. Receives and prints transcription as it streams"""importargparseimportasyncioimportbase64importjsonimportlibrosaimportnumpyasnpimportwebsocketsfromvllm.assets.audioimportAudioAssetdefaudio_to_pcm16_base64(audio_path:str)->str:""" Load an audio file and convert it to base64-encoded PCM16 @ 16kHz. """# Load audio and resample to 16kHz monoaudio,_=librosa.load(audio_path,sr=16000,mono=True)# Convert to PCM16pcm16=(audio*32767).astype(np.int16)# Encode as base64returnbase64.b64encode(pcm16.tobytes()).decode("utf-8")asyncdefrealtime_transcribe(audio_path:str,host:str,port:int,model:str):""" Connect to the Realtime API and transcribe an audio file. """uri=f"ws://{host}:{port}/v1/realtime"asyncwithwebsockets.connect(uri)asws:# Wait for session.createdresponse=json.loads(awaitws.recv())ifresponse["type"]=="session.created":print(f"Session created: {response['id']}")else:print(f"Unexpected response: {response}")return# Validate modelawaitws.send(json.dumps({"type":"session.update","model":model}))# Signal ready to startawaitws.send(json.dumps({"type":"input_audio_buffer.commit"}))# Convert audio file to base64 PCM16print(f"Loading audio from: {audio_path}")audio_base64=audio_to_pcm16_base64(audio_path)# Send audio in chunks (4KB of raw audio = ~8KB base64)chunk_size=4096audio_bytes=base64.b64decode(audio_base64)total_chunks=(len(audio_bytes)+chunk_size-1)//chunk_sizeprint(f"Sending {total_chunks} audio chunks...")foriinrange(0,len(audio_bytes),chunk_size):chunk=audio_bytes[i:i+chunk_size]awaitws.send(json.dumps({"type":"input_audio_buffer.append","audio":base64.b64encode(chunk).decode("utf-8"),}))# Signal all audio is sentawaitws.send(json.dumps({"type":"input_audio_buffer.commit","final":True}))print("Audio sent. Waiting for transcription...\n")# Receive transcriptionprint("Transcription: ",end="",flush=True)whileTrue:response=json.loads(awaitws.recv())ifresponse["type"]=="transcription.delta":print(response["delta"],end="",flush=True)elifresponse["type"]=="transcription.done":print(f"\n\nFinal transcription: {response['text']}")ifresponse.get("usage"):print(f"Usage: {response['usage']}")breakelifresponse["type"]=="error":print(f"\nError: {response['error']}")breakdefmain(args):ifargs.audio_path:audio_path=args.audio_pathelse:# Use default audio assetaudio_path=str(AudioAsset("mary_had_lamb").get_local_path())print(f"No audio path provided, using default: {audio_path}")asyncio.run(realtime_transcribe(audio_path,args.host,args.port,args.model))if__name__=="__main__":parser=argparse.ArgumentParser(description="Realtime WebSocket Transcription Client")parser.add_argument("--model",type=str,default="mistralai/Voxtral-Mini-4B-Realtime-2602",help="Model that is served and should be pinged.",)parser.add_argument("--audio_path",type=str,default=None,help="Path to the audio file to transcribe.",)parser.add_argument("--host",type=str,default="localhost",help="vLLM server host (default: localhost)",)parser.add_argument("--port",type=int,default=8000,help="vLLM server port (default: 8000)",)args=parser.parse_args()main(args)