Fix displaying streaming speech recognition results for Python. (#2196)
This commit is contained in:
@@ -6,7 +6,6 @@ from _sherpa_onnx import (
|
||||
AudioTaggingModelConfig,
|
||||
CircularBuffer,
|
||||
DenoisedAudio,
|
||||
Display,
|
||||
FastClustering,
|
||||
FastClusteringConfig,
|
||||
OfflinePunctuation,
|
||||
@@ -48,6 +47,7 @@ from _sherpa_onnx import (
|
||||
write_wave,
|
||||
)
|
||||
|
||||
from .display import Display
|
||||
from .keyword_spotter import KeywordSpotter
|
||||
from .offline_recognizer import OfflineRecognizer
|
||||
from .online_recognizer import OnlineRecognizer
|
||||
|
||||
41
sherpa-onnx/python/sherpa_onnx/display.py
Normal file
41
sherpa-onnx/python/sherpa_onnx/display.py
Normal file
@@ -0,0 +1,41 @@
|
||||
# Copyright (c) 2025 Xiaomi Corporation
|
||||
import os
|
||||
from time import gmtime, strftime
|
||||
|
||||
|
||||
def get_current_time():
|
||||
return strftime("%Y-%m-%d %H:%M:%S", gmtime())
|
||||
|
||||
|
||||
def clear_console():
|
||||
os.system("cls" if os.name == "nt" else "clear")
|
||||
|
||||
|
||||
class Display:
|
||||
def __init__(self):
|
||||
self.sentences = []
|
||||
self.currentText = ""
|
||||
|
||||
def update_text(self, text):
|
||||
self.currentText = text
|
||||
|
||||
def finalize_current_sentence(self):
|
||||
if self.currentText.strip():
|
||||
self.sentences.append((get_current_time(), self.currentText))
|
||||
|
||||
self.currentText = ""
|
||||
|
||||
def display(self):
|
||||
clear_console()
|
||||
print("=== Speech Recognition with Next-gen Kaldi ===")
|
||||
print("Time:", get_current_time())
|
||||
print("-" * 30)
|
||||
|
||||
# display history sentences
|
||||
if self.sentences:
|
||||
for i, (when, text) in enumerate(self.sentences):
|
||||
print(f"[{when}] {i + 1}. {text}")
|
||||
print("-" * 30)
|
||||
|
||||
if self.currentText.strip():
|
||||
print("Recognizing:", self.currentText)
|
||||
Reference in New Issue
Block a user