42 lines
1.1 KiB
Python
42 lines
1.1 KiB
Python
from fastapi import FastAPI, Request
|
|
from contextlib import asynccontextmanager
|
|
from funasr import AutoModel
|
|
from funasr.utils.postprocess_utils import rich_transcription_postprocess
|
|
import os
|
|
import torch
|
|
|
|
model_dir = os.getenv("MODEL_DIR", "/model/iic/SenseVoiceSmall")
|
|
model = None
|
|
|
|
@asynccontextmanager
|
|
async def lifespan(app: FastAPI):
|
|
global model
|
|
if model is None:
|
|
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
model = AutoModel(model=model_dir, disable_update=True, device=device)
|
|
yield
|
|
pass
|
|
|
|
app = FastAPI(lifespan=lifespan)
|
|
|
|
@app.post("/recognition")
|
|
async def asr(request: Request, language: str = "auto"):
|
|
audio_data = await request.body()
|
|
res = model.generate(input=audio_data, use_itn=True, ban_emo_unk=True)
|
|
text = rich_transcription_postprocess(res[0]["text"])
|
|
return {
|
|
"RecognitionStatus": "Success",
|
|
"DisplayText": text
|
|
}
|
|
|
|
@app.get("/health")
|
|
@app.get("/ready")
|
|
def ready():
|
|
return {"status": "ok"}
|
|
|
|
|
|
if __name__ == "__main__":
|
|
import uvicorn
|
|
uvicorn.run(app, host="0.0.0.0", port=80)
|
|
|