# -*- coding : utf-8 -*- import json import wave import time import librosa import asyncio import webrtcvad import contextlib import websockets import collections import numpy as np from scipy.io import wavfile asr_url = 'ws://172.26.215.90/asr/v0.7' start_signal = { "signal": "start", "nbest": 1, "continuous_decoding": False, } end_signal = { "signal": "end", } async def recv(conn, ooo): while True: res_bytes = await conn.recv() res = json.loads(res_bytes) print("receive:", res) if res['type'] == 'final_result': ooo.append(res) break return ooo async def asr_send(wav_path): ooo = [] for i in range(1, 3): try: async with websockets.connect(asr_url, ping_timeout=2400, max_size=2 ** 200) as websocket: await websocket.send(json.dumps(start_signal)) await websocket.send(wav_path) await websocket.send(json.dumps(end_signal)) while True: res_bytes = await websocket.recv() res = json.loads(res_bytes) # print("receive:", res) if res['type'] == 'final_result': ooo.append(res) break return ooo except TimeoutError: print('retring', i, ' times') time.sleep(1) def read_wave(path): """Reads a .wav file. Takes the path, and returns (PCM audio data, sample rate). """ with contextlib.closing(wave.open(path, 'rb')) as wf: num_channels = wf.getnchannels() assert num_channels == 1 sample_width = wf.getsampwidth() assert sample_width == 2 sample_rate = wf.getframerate() assert sample_rate in (8000, 16000, 32000, 48000) pcm_data = wf.readframes(wf.getnframes()) return pcm_data, sample_rate class Frame(object): """Represents a "frame" of audio data.""" def __init__(self, bytes, timestamp, duration): self.bytes = bytes self.timestamp = timestamp self.duration = duration def frame_generator(frame_duration_ms, audio, sample_rate): """Generates audio frames from PCM audio data. Takes the desired frame duration in milliseconds, the PCM data, and the sample rate. Yields Frames of the requested duration. """ n = int(sample_rate * (frame_duration_ms / 1000.0) * 2) offset = 0 timestamp = 0.0 duration = (float(n) / sample_rate) / 2.0 while offset + n < len(audio): yield Frame(audio[offset:offset + n], timestamp, duration) timestamp += duration offset += n def vad_collector(sample_rate, frame_duration_ms, padding_duration_ms, vad, frames): """Filters out non-voiced audio frames. Given a webrtcvad.Vad and a source of audio frames, yields only the voiced audio. Uses a padded, sliding window algorithm over the audio frames. When more than 90% of the frames in the window are voiced (as reported by the VAD), the collector triggers and begins yielding audio frames. Then the collector waits until 90% of the frames in the window are unvoiced to detrigger. The window is padded at the front and back to provide a small amount of silence or the beginnings/endings of speech around the voiced frames. Arguments: sample_rate - The audio sample rate, in Hz. frame_duration_ms - The frame duration in milliseconds. padding_duration_ms - The amount to pad the window, in milliseconds. vad - An instance of webrtcvad.Vad. frames - a source of audio frames (sequence or generator). Returns: A generator that yields PCM audio data. """ num_padding_frames = int(padding_duration_ms / frame_duration_ms) # We use a deque for our sliding window/ring buffer. ring_buffer = collections.deque(maxlen=num_padding_frames) # We have two states: TRIGGERED and NOTTRIGGERED. We start in the # NOTTRIGGERED state. triggered = False time_list = [] voiced_frames = [] for frame in frames: is_speech = vad.is_speech(frame.bytes, sample_rate) if not triggered: ring_buffer.append((frame, is_speech)) num_voiced = len([f for f, speech in ring_buffer if speech]) # If we're NOTTRIGGERED and more than 90% of the frames in # the ring buffer are voiced frames, then enter the # TRIGGERED state. if num_voiced > 0.9 * ring_buffer.maxlen: triggered = True time_list.append(ring_buffer[0][0].timestamp) # print('starttime', ring_buffer[0][0].timestamp) # We want to yield all the audio we see from now until # we are NOTTRIGGERED, but we have to start with the # audio that's already in the ring buffer. for f, s in ring_buffer: voiced_frames.append(f) ring_buffer.clear() else: # We're in the TRIGGERED state, so collect the audio data # and add it to the ring buffer. voiced_frames.append(frame) ring_buffer.append((frame, is_speech)) num_unvoiced = len([f for f, speech in ring_buffer if not speech]) # If more than 90% of the frames in the ring buffer are # unvoiced, then enter NOTTRIGGERED and yield whatever # audio we've collected. if num_unvoiced > 0.9 * ring_buffer.maxlen: time_list.append(frame.timestamp + frame.duration) # print('endtime =', frame.timestamp + frame.duration) triggered = False yield b''.join([f.bytes for f in voiced_frames]), time_list voiced_frames = [] time_list.clear() if triggered: time_list.append(frame.timestamp + frame.duration) # If we have any leftover voiced audio when we run out of input, # yield it. if voiced_frames: yield b''.join([f.bytes for f in voiced_frames]), time_list def asr_audio(wav): # print('wav name =', wav) if librosa.load(wav, sr=8000)[0].size < 3: # print(wav, 'no wav data open error') return [] set_vadmode = 3 # 0,1,2,3 wav_result = [] audio, sample_rate = read_wave(wav) vad = webrtcvad.Vad(set_vadmode) frames = frame_generator(30, audio, sample_rate) frames = list(frames) segments = vad_collector(sample_rate, 30, 300, vad, frames) for i, segment in enumerate(segments): part_result = {} part_result['start_time'] = segment[1][0] part_result['end_time'] = segment[1][1] part_result['wav_name'] = wav loop1 = asyncio.new_event_loop() asyncio.set_event_loop(loop1) loop = asyncio.get_event_loop() asr_result = loop.run_until_complete(asr_send(segment[0])) loop.close() a = asr_result[0] b = a['nbest'] c = json.loads(b)[0] part_result['result'] = c['sentence'] wav_result.append(part_result) # print('wav_result =', wav_result) return wav_result if __name__ == '__main__': file_path = './69896b4e-42b9-4387-a6b5-eb999d36a6e6.wav' # samplerate, data = wavfile.read(file_path) file_path_left = file_path[:-4] + '_left.wav' file_path_right = file_path[:-4] + '_right.wav' wav_result = asr_audio(file_path_left) for result in wav_result: print( result )