1 Star 0 Fork 0

嗜雪的蚂蚁/asr_timestamp_insert_text_grid

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
asr_timestamp_client.py 16.80 KB
一键复制 编辑 原始数据 按行查看 历史
# -*- encoding: utf-8 -*-
# -- coding: utf-8 --
'''
# @Time : 2023/10/17 18:03
# @Author: Alibaba Damo FunASR
# @Modificator : Shiyu He
# @University : Xinjiang University
'''
import os
import time
import websockets, ssl
import asyncio
import wave
import copy
# import threading
import argparse
import json
import traceback
from multiprocessing import Process
# from funasr.fileio.datadir_writer import DatadirWriter
import logging
logging.basicConfig(level=logging.ERROR)
parser = argparse.ArgumentParser()
parser.add_argument("--host",
type=str,
default="localhost",
required=False,
help="host ip, localhost, 0.0.0.0")
parser.add_argument("--port",
type=int,
default=10095,
required=False,
help="grpc server port")
parser.add_argument("--chunk_size",
type=str,
default="5, 10, 5",
help="chunk")
parser.add_argument("--chunk_interval",
type=int,
default=10,
help="chunk")
parser.add_argument("--hotword",
type=str,
default="",
help="hotword, *.txt(one hotword perline) or hotwords seperate by space (could be: 阿里巴巴 达摩院)")
parser.add_argument("--audio_in",
type=str,
default=None,
help="audio_in")
parser.add_argument("--send_without_sleep",
action="store_true",
default=True,
help="if audio_in is set, send_without_sleep")
parser.add_argument("--thread_num",
type=int,
default=1,
help="thread_num")
parser.add_argument("--words_max_print",
type=int,
default=10000,
help="chunk")
parser.add_argument("--output_dir",
type=str,
default='result/',
help="output_dir")
parser.add_argument("--ssl",
type=int,
default=1,
help="1 for ssl connect, 0 for no ssl")
parser.add_argument("--use_itn",
type=int,
default=0,
help="1 for using itn, 0 for not itn")
parser.add_argument("--mode",
type=str,
default="2pass",
help="offline, online, 2pass")
args = parser.parse_args()
args.chunk_size = [int(x) for x in args.chunk_size.split(",")]
print(args)
# voices = asyncio.Queue()
from queue import Queue
voices = Queue()
offline_msg_done = False
if args.output_dir is not None:
# if os.path.exists(args.output_dir):
# os.remove(args.output_dir)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
async def record_microphone():
is_finished = False
import pyaudio
# print("2")
global voices
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
chunk_size = 60 * args.chunk_size[1] / args.chunk_interval
CHUNK = int(RATE / 1000 * chunk_size)
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
# hotwords
formatted_words = []
if args.hotword.endswith(".txt"):
f_scp = open(args.hotword)
hot_lines = f_scp.readlines()
for line in hot_lines:
line = line.strip()
formatted_words.append(line)
hotwords = ' '.join(formatted_words)
else:
hotwords = args.hotword
hotword_msg = hotwords
use_itn = True
if args.use_itn == 0:
use_itn = False
message = json.dumps({"mode": args.mode, "chunk_size": args.chunk_size, "chunk_interval": args.chunk_interval,
"wav_name": "microphone", "is_speaking": True, "hotwords": hotword_msg, "itn": use_itn})
# voices.put(message)
await websocket.send(message)
while True:
data = stream.read(CHUNK)
message = data
# voices.put(message)
await websocket.send(message)
await asyncio.sleep(0.005)
async def record_from_scp(chunk_begin, chunk_size):
global voices
is_finished = False
if args.audio_in.endswith(".scp"):
f_scp = open(args.audio_in)
wavs = f_scp.readlines()
else:
wavs = [args.audio_in]
formatted_words = []
if args.hotword.endswith(".txt"):
f_scp = open(args.hotword)
hot_lines = f_scp.readlines()
for line in hot_lines:
line = line.strip()
formatted_words.append(line)
hotwords = ' '.join(formatted_words)
else:
hotwords = args.hotword
# 如果使用逆文本正则化,加上这三行
use_itn = True
if args.use_itn == 0:
use_itn = False
if chunk_size > 0:
wavs = wavs[chunk_begin:chunk_begin + chunk_size]
for wav in wavs:
wav_splits = wav.strip().split()
wav_name = wav_splits[0] if len(wav_splits) > 1 else "demo"
wav_path = wav_splits[1] if len(wav_splits) > 1 else wav_splits[0]
if not len(wav_path.strip()) > 0:
continue
if wav_path.endswith(".pcm"):
with open(wav_path, "rb") as f:
audio_bytes = f.read()
elif wav_path.endswith(".wav"):
with wave.open(wav_path, "rb") as wav_file:
params = wav_file.getparams()
frames = wav_file.readframes(wav_file.getnframes())
audio_bytes = bytes(frames)
else:
import ffmpeg
try:
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
# Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
audio_bytes, _ = (
ffmpeg.input(wav_path, threads=0)
.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=16000)
.run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e
# stride = int(args.chunk_size/1000*16000*2)
stride = int(60 * args.chunk_size[1] / args.chunk_interval / 1000 * 16000 * 2)
chunk_num = (len(audio_bytes) - 1) // stride + 1
# print(stride)
# send first time
hotword_msg = hotwords
message = json.dumps({"mode": args.mode, "chunk_size": args.chunk_size, "chunk_interval": args.chunk_interval,
"wav_name": wav_name, "is_speaking": True, "hotwords": hotword_msg, "itn": use_itn})
# voices.put(message)
await websocket.send(message)
is_speaking = True
for i in range(chunk_num):
beg = i * stride
data = audio_bytes[beg:beg + stride]
message = data
# voices.put(message)
await websocket.send(message)
if i == chunk_num - 1:
is_speaking = False
message = json.dumps({"is_speaking": is_speaking})
# voices.put(message)
await websocket.send(message)
sleep_duration = 0.001 if args.mode == "offline" else 60 * args.chunk_size[1] / args.chunk_interval / 1000
await asyncio.sleep(sleep_duration)
if not args.mode == "offline":
await asyncio.sleep(2)
# offline model need to wait for message recved
if args.mode == "offline":
global offline_msg_done
while not offline_msg_done:
await asyncio.sleep(1)
await websocket.close()
async def message(id):
global websocket, voices, offline_msg_done, result_dict,rec_stamp_dict
text_print = ""
text_print_2pass_online = ""
text_print_2pass_offline = ""
if args.output_dir is not None:
ibest_result_writer = open(os.path.join(args.output_dir, "result.txt"), "a", encoding="utf-8")
ibest_time_writer = open(os.path.join(args.output_dir, "timestamp.txt"), "a", encoding="utf-8")
# ibest_rec_time_json = open(os.path.join(args.output_dir, "timestamp.json"), "w", encoding="utf-8")
else:
ibest_result_writer = None
ibest_time_writer = None
# ibest_rec_time_json = None
# rec_stamp_dict = {
# 'rec' : [],
# 'time_stamp' : [],
# 'duration' : []
# }
try:
while True:
meg = await websocket.recv()
meg = json.loads(meg)
wav_name = meg.get("wav_name", "demo")
text = meg["text"]
# result_dict[wav_name]["rec"] = text
timestamp = ""
# 如果需要时间戳,请加上这两行
if "timestamp" in meg:
timestamp = meg["timestamp"]
print("meg:\n", meg)
if ibest_result_writer is not None:
if timestamp != "":
text_write_line = "{} {}\n".format(wav_name, text)
timestamp_write_line = "{} {}\n".format(wav_name, timestamp)
ibest_time_writer.write(timestamp_write_line)
# result_dict[wav_name]["time_stamp"].extend(list(json.loads(timestamp)))
# json.dump(result_dict, ibest_rec_time_json, ensure_ascii=False)
else:
text_write_line = "{}\t{}\n".format(wav_name, text)
ibest_result_writer.write(text_write_line)
if meg["mode"] == "online":
text_print += "{}".format(text)
text_print = text_print[-args.words_max_print:]
os.system('clear')
# TODO
# print("\rpid" + str(id) + ": " + text_print)
elif meg["mode"] == "offline":
if timestamp != "":
text_print += "{} timestamp: {}".format(text, timestamp)
else:
text_print += "{}".format(text)
# text_print = text_print[-args.words_max_print:]
# os.system('clear')
# TODO
# print("\rpid" + str(id) + ": " + wav_name + ": " + text_print)
offline_msg_done = True
else:
if meg["mode"] == "2pass-online":
text_print_2pass_online += "{}".format(text)
text_print = text_print_2pass_offline + text_print_2pass_online
else:
text_print_2pass_online = ""
text_print = text_print_2pass_offline + "{}".format(text)
text_print_2pass_offline += "{}".format(text)
text_print = text_print[-args.words_max_print:]
os.system('clear')
print("\rpid" + str(id) + ": " + text_print)
offline_msg_done = True
except Exception as e:
print("Exception:", e)
# traceback.print_exc()
# await websocket.close()
# json.dump(result_dict, ibest_rec_time_json, ensure_ascii=False)
async def ws_client(id, chunk_begin, chunk_size):
if args.audio_in is None:
chunk_begin = 0
chunk_size = 1
global websocket, voices, offline_msg_done
for i in range(chunk_begin, chunk_begin + chunk_size):
offline_msg_done = False
voices = Queue()
if args.ssl == 1:
ssl_context = ssl.SSLContext()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
uri = "wss://{}:{}".format(args.host, args.port)
else:
uri = "ws://{}:{}".format(args.host, args.port)
ssl_context = None
print("connect to", uri)
async with websockets.connect(uri, subprotocols=["binary"], ping_interval=None, ssl=ssl_context) as websocket:
if args.audio_in is not None:
task = asyncio.create_task(record_from_scp(i, 1))
else:
task = asyncio.create_task(record_microphone())
task3 = asyncio.create_task(message(str(id) + "_" + str(i))) # processid+fileid
await asyncio.gather(task, task3)
exit(0)
def one_thread(id, chunk_begin, chunk_size):
asyncio.get_event_loop().run_until_complete(ws_client(id, chunk_begin, chunk_size))
asyncio.get_event_loop().run_forever()
def get_audio_duration(wav_path):
# 读取wave文件获取最大时长
audio = wave.open(wav_path, 'rb')
duration = audio.getparams().nframes / audio.getparams().framerate
return duration
def generate_timestamp_json(input_dir, output_dir):
'''
生成 duration.txt, result.txt,timestamp.txt后,将其合并成 timestamp.json 的格式
'''
# 读取duration.txt文件
with open(os.path.join(input_dir, "duration.txt"), "r") as f:
duration_data = {}
for line in f:
wav_name, duration = line.strip().split(' ')
duration_data[wav_name] = float(duration)
# 读取result.txt文件
with open(os.path.join(input_dir, "result.txt"), "r", encoding='utf-8') as f:
result_data = {}
for line in f:
wav_name, result = line.strip().split(' ')
result_data[wav_name] = result
# 读取timestamp.txt文件
with open(os.path.join(input_dir, "timestamp.txt"), "r") as f:
timestamp_data = {}
for line in f:
wav_name, time_stamps = line.strip().split(' ')
timestamp_data[wav_name] = list(json.loads(time_stamps))
# 合并三个字典
merged_data = {}
for wav_name in duration_data.keys():
merged_data[wav_name] = {
"rec": result_data[wav_name],
"time_stamp": timestamp_data[wav_name],
"duration": duration_data[wav_name]
}
print(f'merged_data number is {len(merged_data)}')
ibest_rec_time_json = open(os.path.join(output_dir, "timestamp.json"), "w", encoding="utf-8")
json.dump(merged_data, ibest_rec_time_json, ensure_ascii=False)
if __name__ == '__main__':
# for microphone
star = time.time()
if args.audio_in is None:
p = Process(target=one_thread, args=(0, 0, 0))
p.start()
p.join()
print('end')
else:
# calculate the number of wavs for each preocess
if args.audio_in.endswith(".scp"):
f_scp = open(args.audio_in)
wavs = f_scp.readlines()
else:
wavs = [args.audio_in]
ibest_duration_writer = open(os.path.join(args.output_dir, "duration.txt"), "a", encoding="utf-8")
for wav in wavs:
wav_splits = wav.strip().split()
wav_name = wav_splits[0] if len(wav_splits) > 1 else "demo"
wav_path = wav_splits[1] if len(wav_splits) > 1 else wav_splits[0]
audio_type = os.path.splitext(wav_path)[-1].lower()
duration = get_audio_duration(wav_path)
duration_write_line = "{} {}\n".format(wav_name, duration)
ibest_duration_writer.write(duration_write_line)
# result_dict.update({wav_name: copy.deepcopy(rec_stamp_dict)})
# result_dict[wav_name]["duration"] = duration
total_len = len(wavs)
if total_len >= args.thread_num:
chunk_size = int(total_len / args.thread_num)
remain_wavs = total_len - chunk_size * args.thread_num
else:
chunk_size = 1
remain_wavs = 0
# 检查文件是否已存在
check_file_path = os.path.join(args.output_dir, "result.txt")
if os.path.exists(check_file_path):
# 打开文件并以写入模式将其截断为零字节
with open(check_file_path, 'w', encoding='utf-8') as file:
file.truncate(0)
print("原结果文件内容已清空")
process_list = []
chunk_begin = 0
for i in range(args.thread_num):
now_chunk_size = chunk_size
if remain_wavs > 0:
now_chunk_size = chunk_size + 1
remain_wavs = remain_wavs - 1
# process i handle wavs at chunk_begin and size of now_chunk_size
p = Process(target=one_thread, args=(i, chunk_begin, now_chunk_size))
chunk_begin = chunk_begin + now_chunk_size
p.start()
process_list.append(p)
for i in process_list:
i.join()
# 将得到的识别结果和时间戳字典保存为 JSON 文件
# ibest_rec_time_json = open(os.path.join(args.output_dir, "timestamp.json"), "a", encoding="utf-8")
# json.dump(result_dict, ibest_rec_time_json, ensure_ascii=False)
end = time.time()
print('time: ', end - star)
print('ASR end')
print('开始进行数据合并,生成timestamp.json')
generate_timestamp_json(args.output_dir, args.output_dir)
print('已生成timestamp.json')
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/mayi123/asr_timestamp_insert_text_grid.git
git@gitee.com:mayi123/asr_timestamp_insert_text_grid.git
mayi123
asr_timestamp_insert_text_grid
asr_timestamp_insert_text_grid
master

搜索帮助