|
| 1 | +# /// script |
| 2 | +# requires-python = ">=3.11" |
| 3 | +# dependencies = [ |
| 4 | +# "faster-whisper", |
| 5 | +# "flask", |
| 6 | +# ] |
| 7 | +# /// |
| 8 | +from flask import Flask, request, jsonify |
| 9 | +from faster_whisper import WhisperModel |
| 10 | +import os |
| 11 | + |
| 12 | +app = Flask(__name__) |
| 13 | + |
| 14 | +# Load Whisper model (you can choose size: tiny, base, small, medium, large) |
| 15 | +model = WhisperModel("base", compute_type="auto") |
| 16 | + |
| 17 | +@app.route("/") |
| 18 | +def index(): |
| 19 | + return "Whisper Transcription API is running." |
| 20 | + |
| 21 | +@app.route("/transcribe", methods=["POST"]) |
| 22 | +def transcribe(): |
| 23 | + if 'file' not in request.files: |
| 24 | + return jsonify({"error": "No file uploaded"}), 400 |
| 25 | + |
| 26 | + file = request.files['file'] |
| 27 | + if file.filename == '': |
| 28 | + return jsonify({"error": "Empty filename"}), 400 |
| 29 | + |
| 30 | + # Save file temporarily |
| 31 | + filepath = os.path.join("/tmp", file.filename) |
| 32 | + file.save(filepath) |
| 33 | + |
| 34 | + segments, _ = model.transcribe(filepath) |
| 35 | + |
| 36 | + result = [] |
| 37 | + for segment in segments: |
| 38 | + result.append({ |
| 39 | + "start": segment.start, |
| 40 | + "end": segment.end, |
| 41 | + "text": segment.text |
| 42 | + }) |
| 43 | + |
| 44 | + os.remove(filepath) # clean up |
| 45 | + |
| 46 | + return jsonify({"transcription": result}) |
| 47 | + |
| 48 | +if __name__ == "__main__": |
| 49 | + app.run(debug=True, port=8000) |
| 50 | + |
0 commit comments