Track token scores (#571)

* add export of per-token scores (ys, lm, context)

- for best path of the modified-beam-search decoding of transducer

* refactoring JSON export of OnlineRecognitionResult, extending pybind11 API of OnlineRecognitionResult

* export per-token scores also for greedy-search (online-transducer)

- export un-scaled lm_probs (modified-beam search, online-transducer)
- polishing

* fill lm_probs/context_scores only if LM/ContextGraph is present (make Result smaller)
This commit is contained in:
Karel Vesely
2024-02-28 23:28:45 +01:00
committed by GitHub
parent 85d59b5840
commit 38c072dcb2
11 changed files with 155 additions and 49 deletions

View File

@@ -503,6 +503,9 @@ class OnlineRecognizer(object):
def get_result(self, s: OnlineStream) -> str:
return self.recognizer.get_result(s).text.strip()
def get_result_as_json_string(self, s: OnlineStream) -> str:
return self.recognizer.get_result(s).as_json_string()
def tokens(self, s: OnlineStream) -> List[str]:
return self.recognizer.get_result(s).tokens
@@ -512,6 +515,15 @@ class OnlineRecognizer(object):
def start_time(self, s: OnlineStream) -> float:
return self.recognizer.get_result(s).start_time
def ys_probs(self, s: OnlineStream) -> List[float]:
return self.recognizer.get_result(s).ys_probs
def lm_probs(self, s: OnlineStream) -> List[float]:
return self.recognizer.get_result(s).lm_probs
def context_scores(self, s: OnlineStream) -> List[float]:
return self.recognizer.get_result(s).context_scores
def is_endpoint(self, s: OnlineStream) -> bool:
return self.recognizer.is_endpoint(s)