diff --git a/README.md b/README.md index 474a1301da7..e4e1383b018 100644 --- a/README.md +++ b/README.md @@ -509,6 +509,14 @@ cmake --build build -j --config Release https://user-images.githubusercontent.com/1991296/194935793-76afede7-cfa8-48d8-a80f-28ba83be7d09.mp4 +If you want to stream audio from another app without SDL or a microphone device, use +[stream-pcm](examples/stream-pcm). It reads raw PCM from stdin or a pipe: + +```bash +ffmpeg -i samples/jfk.wav -f s16le -ac 1 -ar 16000 - | \ + ./build/bin/whisper-stream-pcm -m ./models/ggml-base.en.bin --format s16 --sample-rate 16000 --step 500 --length 5000 +``` + ## Confidence color-coding Adding the `--print-colors` argument will print the transcribed text using an experimental color coding strategy @@ -842,6 +850,7 @@ Some of the examples are even ported to run in the browser using WebAssembly. Ch | [whisper-cli](examples/cli) | [whisper.wasm](examples/whisper.wasm) | Tool for translating and transcribing audio using Whisper | | [whisper-bench](examples/bench) | [bench.wasm](examples/bench.wasm) | Benchmark the performance of Whisper on your machine | | [whisper-stream](examples/stream) | [stream.wasm](examples/stream.wasm) | Real-time transcription of raw microphone capture | +| [whisper-stream-pcm](examples/stream-pcm) | | Real-time transcription of raw PCM via stdin/pipe (no SDL dependency) | | [whisper-command](examples/command) | [command.wasm](examples/command.wasm) | Basic voice assistant example for receiving voice commands from the mic | | [whisper-server](examples/server) | | HTTP transcription server with OAI-like API | | [whisper-talk-llama](examples/talk-llama) | | Talk with a LLaMA bot | diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index b202ca00b77..c441115af4d 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -107,6 +107,7 @@ else() add_subdirectory(server) add_subdirectory(quantize) add_subdirectory(vad-speech-segments) + add_subdirectory(stream-pcm) if (WHISPER_SDL2) add_subdirectory(stream) add_subdirectory(command) diff --git a/examples/stream-pcm/CMakeLists.txt b/examples/stream-pcm/CMakeLists.txt new file mode 100644 index 00000000000..5da78e0d3c5 --- /dev/null +++ b/examples/stream-pcm/CMakeLists.txt @@ -0,0 +1,8 @@ +set(TARGET whisper-stream-pcm) +add_executable(${TARGET} stream-pcm.cpp) + +include(DefaultTargetOptions) + +target_link_libraries(${TARGET} PRIVATE common whisper ${CMAKE_THREAD_LIBS_INIT}) + +install(TARGETS ${TARGET} RUNTIME) diff --git a/examples/stream-pcm/README.md b/examples/stream-pcm/README.md new file mode 100644 index 00000000000..7b296e0c2ae --- /dev/null +++ b/examples/stream-pcm/README.md @@ -0,0 +1,55 @@ +# whisper.cpp/examples/stream-pcm + +This example performs real-time inference on raw PCM audio streamed via stdin, a pipe, or a file. +It is PCM-first (input is consumed once) and does not require SDL or a microphone device. + +## Usage + +Stream raw PCM (16 kHz, mono) into the tool (non-VAD): + +```bash +./build/bin/whisper-stream-pcm -m ./models/ggml-base.en.bin --format s16 --sample-rate 16000 --step 1000 --length 10000 --keep 500 +``` + +Enable VAD-based segmentation (optional, recommended for speech bursts): + +```bash +./build/bin/whisper-stream-pcm -m ./models/ggml-base.en.bin --format s16 --sample-rate 16000 --vad --vad-probe-ms 200 --vad-silence-ms 800 --vad-pre-roll-ms 300 --length 8000 +``` + +You can also read from a named pipe (FIFO): + +```bash +mkfifo /tmp/whisper.pcm +./build/bin/whisper-stream-pcm -m ./models/ggml-base.en.bin --input /tmp/whisper.pcm --format s16 --sample-rate 16000 --step 1000 --length 10000 --keep 500 +``` + +Example of piping a WAV file using ffmpeg (optional, `-re` for realtime pacing): + +```bash +ffmpeg -re -i samples/jfk.wav -f s16le -ac 1 -ar 16000 - | \ + ./build/bin/whisper-stream-pcm -m ./models/ggml-base.en.bin --format s16 --sample-rate 16000 --step 1000 --length 10000 --keep 500 +``` + +Windows (PowerShell + `cmd /c`) pipe example: + +```powershell +cmd /c "ffmpeg -re -hide_banner -loglevel error -i samples\jfk.wav -f s16le -ac 1 -ar 16000 - | build-cpu\bin\Release\whisper-stream-pcm.exe -m models\ggml-base.en.bin --format s16 --sample-rate 16000 --step 1000 --length 10000 --keep 500" +``` + +## Notes + +- Input must be raw PCM, mono, 16 kHz. The tool does not resample. +- Supported formats: `f32` or `s16` (little-endian). +- Use `--input -` (default) for stdin. +- `--step` must be > 0 unless `--vad` is enabled. +- For VAD, `--vad-probe-ms` should be at least 200 ms; very small probes can fail to trigger. + +## Building + +`whisper-stream-pcm` does not depend on SDL and builds with the default examples: + +```bash +cmake -B build +cmake --build build --config Release +``` diff --git a/examples/stream-pcm/stream-pcm.cpp b/examples/stream-pcm/stream-pcm.cpp new file mode 100644 index 00000000000..6ce7b572ce2 --- /dev/null +++ b/examples/stream-pcm/stream-pcm.cpp @@ -0,0 +1,901 @@ +// Real-time speech recognition from a raw PCM stream (stdin or pipe) +// +// This mirrors the mic-based whisper-stream example but reads PCM audio +// from stdin or a file/pipe to avoid external audio device dependencies. +// +#include "common.h" +#include "common-whisper.h" +#include "whisper.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(_WIN32) +#include +#include +#endif + +// command-line parameters +struct whisper_params { + int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency()); + int32_t step_ms = 3000; + int32_t length_ms = 10000; + int32_t keep_ms = 200; + int32_t max_tokens = 32; + int32_t audio_ctx = 0; + int32_t beam_size = -1; + + float vad_thold = 0.6f; + float freq_thold = 100.0f; + + bool translate = false; + bool no_fallback = false; + bool print_special = false; + bool no_context = true; + bool no_timestamps = false; + bool tinydiarize = false; + bool save_audio = false; // save audio to wav file + bool use_gpu = true; + bool flash_attn = true; + bool debug = false; + bool use_vad = false; + + std::string language = "en"; + std::string model = "models/ggml-base.en.bin"; + std::string fname_out; + + std::string input = "-"; // "-" = stdin + std::string format = "f32"; // f32 or s16 + int32_t sample_rate = WHISPER_SAMPLE_RATE; + + int32_t vad_probe_ms = 200; + int32_t vad_silence_ms = 800; + int32_t vad_pre_roll_ms = 300; + + std::string vad_model = ""; // path to silero .bin model +}; + +void whisper_print_usage(int argc, char ** argv, const whisper_params & params); + +enum class pcm_format { + f32, + s16, +}; + +static bool whisper_params_parse(int argc, char ** argv, whisper_params & params) { + for (int i = 1; i < argc; i++) { + std::string arg = argv[i]; + + if (arg == "-h" || arg == "--help") { + whisper_print_usage(argc, argv, params); + exit(0); + } + else if (arg == "-t" || arg == "--threads") { params.n_threads = std::stoi(argv[++i]); } + else if ( arg == "--step") { params.step_ms = std::stoi(argv[++i]); } + else if ( arg == "--length") { params.length_ms = std::stoi(argv[++i]); } + else if ( arg == "--keep") { params.keep_ms = std::stoi(argv[++i]); } + else if (arg == "-mt" || arg == "--max-tokens") { params.max_tokens = std::stoi(argv[++i]); } + else if (arg == "-ac" || arg == "--audio-ctx") { params.audio_ctx = std::stoi(argv[++i]); } + else if (arg == "-bs" || arg == "--beam-size") { params.beam_size = std::stoi(argv[++i]); } + else if (arg == "-vth" || arg == "--vad-thold") { params.vad_thold = std::stof(argv[++i]); } + else if (arg == "-fth" || arg == "--freq-thold") { params.freq_thold = std::stof(argv[++i]); } + else if (arg == "-tr" || arg == "--translate") { params.translate = true; } + else if (arg == "-nf" || arg == "--no-fallback") { params.no_fallback = true; } + else if (arg == "-ps" || arg == "--print-special") { params.print_special = true; } + else if (arg == "-kc" || arg == "--keep-context") { params.no_context = false; } + else if (arg == "-l" || arg == "--language") { params.language = argv[++i]; } + else if (arg == "-m" || arg == "--model") { params.model = argv[++i]; } + else if (arg == "-f" || arg == "--file") { params.fname_out = argv[++i]; } + else if (arg == "-tdrz" || arg == "--tinydiarize") { params.tinydiarize = true; } + else if (arg == "-sa" || arg == "--save-audio") { params.save_audio = true; } + else if (arg == "-ng" || arg == "--no-gpu") { params.use_gpu = false; } + else if (arg == "-fa" || arg == "--flash-attn") { params.flash_attn = true; } + else if (arg == "-nfa" || arg == "--no-flash-attn") { params.flash_attn = false; } + else if (arg == "-i" || arg == "--input") { params.input = argv[++i]; } + else if (arg == "--format") { params.format = argv[++i]; } + else if (arg == "--sample-rate") { params.sample_rate = std::stoi(argv[++i]); } + else if (arg == "--debug") { params.debug = true; } + else if (arg == "--vad") { params.use_vad = true; } + else if (arg == "--vad-probe-ms") { params.vad_probe_ms = std::stoi(argv[++i]); } + else if (arg == "--vad-silence-ms") { params.vad_silence_ms = std::stoi(argv[++i]); } + else if (arg == "--vad-pre-roll-ms") { params.vad_pre_roll_ms = std::stoi(argv[++i]); } + else if (arg == "-vm" || arg == "--vad-model") { params.vad_model = argv[++i]; } + + else { + fprintf(stderr, "error: unknown argument: %s\n", arg.c_str()); + whisper_print_usage(argc, argv, params); + exit(0); + } + } + + return true; +} + +void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & params) { + fprintf(stderr, "\n"); + fprintf(stderr, "usage: %s [options]\n", argv[0]); + fprintf(stderr, "\n"); + fprintf(stderr, "options:\n"); + fprintf(stderr, " -h, --help [default] show this help message and exit\n"); + fprintf(stderr, " -t N, --threads N [%-7d] number of threads to use during computation\n", params.n_threads); + fprintf(stderr, " --step N [%-7d] audio step size in milliseconds\n", params.step_ms); + fprintf(stderr, " --length N [%-7d] audio length in milliseconds\n", params.length_ms); + fprintf(stderr, " --keep N [%-7d] audio to keep from previous step in ms\n", params.keep_ms); + fprintf(stderr, " -mt N, --max-tokens N [%-7d] maximum number of tokens per audio chunk\n", params.max_tokens); + fprintf(stderr, " -ac N, --audio-ctx N [%-7d] audio context size (0 - all)\n", params.audio_ctx); + fprintf(stderr, " -bs N, --beam-size N [%-7d] beam size for beam search\n", params.beam_size); + fprintf(stderr, " -vth N, --vad-thold N [%-7.2f] voice activity detection threshold\n", params.vad_thold); + fprintf(stderr, " -fth N, --freq-thold N [%-7.2f] high-pass frequency cutoff\n", params.freq_thold); + fprintf(stderr, " -tr, --translate [%-7s] translate from source language to english\n", params.translate ? "true" : "false"); + fprintf(stderr, " -nf, --no-fallback [%-7s] do not use temperature fallback while decoding\n", params.no_fallback ? "true" : "false"); + fprintf(stderr, " -ps, --print-special [%-7s] print special tokens\n", params.print_special ? "true" : "false"); + fprintf(stderr, " -kc, --keep-context [%-7s] keep context between audio chunks\n", params.no_context ? "false" : "true"); + fprintf(stderr, " -l LANG, --language LANG [%-7s] spoken language\n", params.language.c_str()); + fprintf(stderr, " -m FNAME, --model FNAME [%-7s] model path\n", params.model.c_str()); + fprintf(stderr, " -f FNAME, --file FNAME [%-7s] text output file name\n", params.fname_out.c_str()); + fprintf(stderr, " -tdrz, --tinydiarize [%-7s] enable tinydiarize (requires a tdrz model)\n", params.tinydiarize ? "true" : "false"); + fprintf(stderr, " -sa, --save-audio [%-7s] save the recorded audio to a file\n", params.save_audio ? "true" : "false"); + fprintf(stderr, " -ng, --no-gpu [%-7s] disable GPU inference\n", params.use_gpu ? "false" : "true"); + fprintf(stderr, " -fa, --flash-attn [%-7s] enable flash attention during inference\n", params.flash_attn ? "true" : "false"); + fprintf(stderr, " -nfa, --no-flash-attn [%-7s] disable flash attention during inference\n", params.flash_attn ? "false" : "true"); + fprintf(stderr, " -i PATH, --input PATH [%-7s] input path ('-' for stdin)\n", params.input.c_str()); + fprintf(stderr, " --format FMT [%-7s] input format: f32 or s16 (little-endian)\n", params.format.c_str()); + fprintf(stderr, " --sample-rate N [%-7d] input sample rate (must be 16000)\n", params.sample_rate); + fprintf(stderr, " --debug [%-7s] enable debug logging\n", params.debug ? "true" : "false"); + fprintf(stderr, " --vad [%-7s] enable VAD-based segmentation\n", params.use_vad ? "true" : "false"); + fprintf(stderr, " --vad-probe-ms N [%-5d] VAD probe chunk size (ms)\n", params.vad_probe_ms); + fprintf(stderr, " --vad-silence-ms N [%-5d] silence duration to end segment (ms)\n", params.vad_silence_ms); + fprintf(stderr, " --vad-pre-roll-ms N[%-5d] audio prepended before VAD trigger (ms)\n", params.vad_pre_roll_ms); + fprintf(stderr, " -vm PATH, --vad-model PATH [%-5s] path to Silero VAD model (enables silero VAD)\n", params.vad_model.c_str()); + fprintf(stderr, "\n"); +} + +class pcm_async { +public: + pcm_async(int len_ms, int sample_rate, pcm_format format) + : m_len_ms(len_ms), m_sample_rate(sample_rate), m_format(format) { + m_running = false; + m_stop = false; + m_eof = false; + } + + ~pcm_async() { + pause(); + } + + bool init(const std::string & input_path) { +#if defined(_WIN32) + _setmode(_fileno(stdin), _O_BINARY); +#endif + + if (input_path == "-") { + m_in = stdin; + m_owns_input = false; + } else { + m_in = fopen(input_path.c_str(), "rb"); + m_owns_input = true; + } + + if (!m_in) { + fprintf(stderr, "%s: failed to open input '%s'\n", __func__, input_path.c_str()); + return false; + } + + m_audio.resize((m_sample_rate*m_len_ms)/1000); + m_audio_pos = 0; + m_audio_len = 0; + + return true; + } + + bool resume() { + if (!m_in) { + fprintf(stderr, "%s: no input to resume!\n", __func__); + return false; + } + + if (m_running) { + fprintf(stderr, "%s: already running!\n", __func__); + return false; + } + + m_stop = false; + m_thread = std::thread(&pcm_async::reader_loop, this); + m_running = true; + + return true; + } + + bool pause() { + if (!m_running) { + return true; + } + + m_stop = true; + + if (m_owns_input && m_in) { + fclose(m_in); + m_in = nullptr; + } + + if (m_thread.joinable()) { + if (m_owns_input) { + m_thread.join(); + } else { + m_thread.detach(); + } + } + + m_running = false; + + return true; + } + + bool clear() { + std::lock_guard lock(m_mutex); + m_audio_pos = 0; + m_audio_len = 0; + m_audio_read = 0; + return true; + } + + void get(int ms, std::vector & result) { + result.clear(); + + std::lock_guard lock(m_mutex); + + if (ms <= 0) { + ms = m_len_ms; + } + + size_t n_samples = (m_sample_rate * ms) / 1000; + if (n_samples > m_audio_len) { + n_samples = m_audio_len; + } + + result.resize(n_samples); + + int s0 = (int) m_audio_pos - (int) n_samples; + if (s0 < 0) { + s0 += (int) m_audio.size(); + } + + if (s0 + (int) n_samples > (int) m_audio.size()) { + const size_t n0 = m_audio.size() - (size_t) s0; + memcpy(result.data(), &m_audio[s0], n0 * sizeof(float)); + memcpy(&result[n0], &m_audio[0], (n_samples - n0) * sizeof(float)); + } else { + memcpy(result.data(), &m_audio[s0], n_samples * sizeof(float)); + } + } + + void pop_ms(int ms, std::vector & result) { + result.clear(); + + std::lock_guard lock(m_mutex); + + if (ms <= 0) { + ms = m_len_ms; + } + + size_t n_samples = (m_sample_rate * ms) / 1000; + if (n_samples > m_audio_len) { + n_samples = m_audio_len; + } + + if (n_samples == 0) { + return; + } + + result.resize(n_samples); + + size_t s0 = m_audio_read; + if (s0 + n_samples > m_audio.size()) { + const size_t n0 = m_audio.size() - s0; + memcpy(result.data(), &m_audio[s0], n0 * sizeof(float)); + memcpy(&result[n0], &m_audio[0], (n_samples - n0) * sizeof(float)); + } else { + memcpy(result.data(), &m_audio[s0], n_samples * sizeof(float)); + } + + m_audio_read = (m_audio_read + n_samples) % m_audio.size(); + m_audio_len -= n_samples; + } + + size_t available_samples() const { + std::lock_guard lock(m_mutex); + return m_audio_len; + } + + bool is_eof() const { + return m_eof.load(); + } + +private: + void reader_loop() { + const size_t bytes_per_sample = (m_format == pcm_format::f32) ? 4 : 2; + std::vector buffer(4096); + std::vector carry; + + while (!m_stop) { + size_t n_read = fread(buffer.data(), 1, buffer.size(), m_in); + + if (n_read == 0) { + if (feof(m_in)) { + m_eof = true; + } + break; + } + + std::vector data; + data.reserve(carry.size() + n_read); + if (!carry.empty()) { + data.insert(data.end(), carry.begin(), carry.end()); + carry.clear(); + } + data.insert(data.end(), buffer.begin(), buffer.begin() + n_read); + + const size_t total_bytes = data.size(); + const size_t n_samples = total_bytes / bytes_per_sample; + const size_t rem = total_bytes % bytes_per_sample; + + if (rem > 0) { + carry.insert(carry.end(), data.end() - rem, data.end()); + } + + if (n_samples == 0) { + continue; + } + + std::vector samples(n_samples); + + if (m_format == pcm_format::f32) { + for (size_t i = 0; i < n_samples; ++i) { + float v = 0.0f; + memcpy(&v, &data[i * 4], sizeof(float)); + samples[i] = v; + } + } else { + for (size_t i = 0; i < n_samples; ++i) { + int16_t v = 0; + memcpy(&v, &data[i * 2], sizeof(int16_t)); + samples[i] = v / 32768.0f; + } + } + + push_samples(samples.data(), samples.size()); + } + } + + void push_samples(const float * data, size_t n_samples) { + if (n_samples == 0) { + return; + } + + std::lock_guard lock(m_mutex); + + if (n_samples > m_audio.size()) { + data += (n_samples - m_audio.size()); + n_samples = m_audio.size(); + } + + if (n_samples > m_audio.size() - m_audio_len) { + const size_t drop = n_samples - (m_audio.size() - m_audio_len); + m_audio_read = (m_audio_read + drop) % m_audio.size(); + m_audio_len -= drop; + } + + if (m_audio_pos + n_samples > m_audio.size()) { + const size_t n0 = m_audio.size() - m_audio_pos; + memcpy(&m_audio[m_audio_pos], data, n0 * sizeof(float)); + memcpy(&m_audio[0], data + n0, (n_samples - n0) * sizeof(float)); + } else { + memcpy(&m_audio[m_audio_pos], data, n_samples * sizeof(float)); + } + + m_audio_pos = (m_audio_pos + n_samples) % m_audio.size(); + m_audio_len = std::min(m_audio_len + n_samples, m_audio.size()); + } + + FILE * m_in = nullptr; + bool m_owns_input = false; + + int m_len_ms = 0; + int m_sample_rate = 0; + pcm_format m_format = pcm_format::f32; + + std::atomic_bool m_running; + std::atomic_bool m_stop; + std::atomic_bool m_eof; + + mutable std::mutex m_mutex; + + std::vector m_audio; + size_t m_audio_pos = 0; + size_t m_audio_len = 0; + size_t m_audio_read = 0; + + std::thread m_thread; +}; + +static std::atomic_bool g_running(true); + +static void signal_handler(int) { + g_running = false; +} + +int main(int argc, char ** argv) { + ggml_backend_load_all(); + + whisper_params params; + + if (whisper_params_parse(argc, argv, params) == false) { + return 1; + } + + if (params.sample_rate != WHISPER_SAMPLE_RATE) { + fprintf(stderr, "error: only --sample-rate %d is supported (got %d). resample before streaming.\n", + WHISPER_SAMPLE_RATE, params.sample_rate); + return 1; + } + + pcm_format input_format = pcm_format::f32; + if (params.format == "f32") { + input_format = pcm_format::f32; + } else if (params.format == "s16") { + input_format = pcm_format::s16; + } else { + fprintf(stderr, "error: unknown --format '%s' (expected f32 or s16)\n", params.format.c_str()); + return 1; + } + + if (!params.use_vad) { + if (params.step_ms <= 0) { + fprintf(stderr, "error: --step must be > 0 unless --vad is enabled\n"); + return 1; + } + params.keep_ms = std::min(params.keep_ms, params.step_ms); + params.length_ms = std::max(params.length_ms, params.step_ms); + } else { + if (params.length_ms <= 0) { + params.length_ms = 5000; + } + params.keep_ms = 0; + } + + const bool use_vad = params.use_vad; + const int n_samples_step = use_vad ? 0 : (1e-3*params.step_ms )*WHISPER_SAMPLE_RATE; + const int n_samples_len = (1e-3*params.length_ms)*WHISPER_SAMPLE_RATE; + const int n_samples_keep = (1e-3*params.keep_ms )*WHISPER_SAMPLE_RATE; + const int n_samples_30s = (1e-3*30000.0 )*WHISPER_SAMPLE_RATE; + + const int n_new_line = !use_vad ? std::max(1, params.length_ms / params.step_ms - 1) : 1; // number of steps to print new line + + params.no_timestamps = !use_vad; + params.no_context |= use_vad; + params.max_tokens = 0; + + std::signal(SIGINT, signal_handler); + + // init audio + pcm_async audio(params.length_ms, WHISPER_SAMPLE_RATE, input_format); + if (!audio.init(params.input)) { + fprintf(stderr, "%s: audio.init() failed!\n", __func__); + return 1; + } + + auto debug_log = [&](const char * fmt, ...) { + if (!params.debug) { + return; + } + va_list args; + va_start(args, fmt); + vfprintf(stderr, fmt, args); + va_end(args); + }; + + // whisper init + if (params.language != "auto" && whisper_lang_id(params.language.c_str()) == -1){ + fprintf(stderr, "error: unknown language '%s'\n", params.language.c_str()); + whisper_print_usage(argc, argv, params); + exit(0); + } + + struct whisper_context_params cparams = whisper_context_default_params(); + + cparams.use_gpu = params.use_gpu; + cparams.flash_attn = params.flash_attn; + + struct whisper_context * ctx = whisper_init_from_file_with_params(params.model.c_str(), cparams); + if (ctx == nullptr) { + fprintf(stderr, "error: failed to initialize whisper context\n"); + return 2; + } + + // init Silero VAD context (if requested) + struct whisper_vad_context * vad_ctx = nullptr; + if (use_vad && !params.vad_model.empty()) { + struct whisper_vad_context_params vparams = whisper_vad_default_context_params(); + vparams.n_threads = params.n_threads; + vparams.use_gpu = false; // Silero VAD is tiny, always runs on CPU + vad_ctx = whisper_vad_init_from_file_with_params(params.vad_model.c_str(), vparams); + if (!vad_ctx) { + fprintf(stderr, "error: failed to init VAD model '%s'\n", params.vad_model.c_str()); + whisper_free(ctx); + return 2; + } + } + + std::vector pcmf32 (n_samples_30s, 0.0f); + std::vector pcmf32_old; + std::vector pcmf32_new(n_samples_30s, 0.0f); + + std::vector prompt_tokens; + + // print some info about the processing + { + fprintf(stderr, "\n"); + if (!whisper_is_multilingual(ctx)) { + if (params.language != "en" || params.translate) { + params.language = "en"; + params.translate = false; + fprintf(stderr, "%s: WARNING: model is not multilingual, ignoring language and translation options\n", __func__); + } + } + fprintf(stderr, "%s: processing %d samples (step = %.1f sec / len = %.1f sec / keep = %.1f sec), %d threads, lang = %s, task = %s, timestamps = %d ...\n", + __func__, + n_samples_step, + float(n_samples_step)/WHISPER_SAMPLE_RATE, + float(n_samples_len )/WHISPER_SAMPLE_RATE, + float(n_samples_keep)/WHISPER_SAMPLE_RATE, + params.n_threads, + params.language.c_str(), + params.translate ? "translate" : "transcribe", + params.no_timestamps ? 0 : 1); + + if (!use_vad) { + fprintf(stderr, "%s: n_new_line = %d, no_context = %d\n", __func__, n_new_line, params.no_context); + } else { + fprintf(stderr, "%s: using VAD (%s), will transcribe on speech activity\n", __func__, vad_ctx ? "silero" : "simple"); + } + + fprintf(stderr, "\n"); + } + + debug_log("debug: input='%s' format=%s sample_rate=%d step_ms=%d length_ms=%d keep_ms=%d vad=%d probe_ms=%d silence_ms=%d pre_roll_ms=%d last_ms=%d\n", + params.input.c_str(), params.format.c_str(), params.sample_rate, params.step_ms, params.length_ms, params.keep_ms, + params.use_vad ? 1 : 0, params.vad_probe_ms, params.vad_silence_ms, params.vad_pre_roll_ms, std::max(1, std::min(1000, params.vad_probe_ms / 2))); + + int n_iter = 0; + + bool is_running = true; + + std::ofstream fout; + if (params.fname_out.length() > 0) { + fout.open(params.fname_out); + if (!fout.is_open()) { + fprintf(stderr, "%s: failed to open output file '%s'!\n", __func__, params.fname_out.c_str()); + return 1; + } + } + + wav_writer wavWriter; + // save wav file + if (params.save_audio) { + // Get current date/time for filename + time_t now = time(0); + char buffer[80]; + strftime(buffer, sizeof(buffer), "%Y%m%d%H%M%S", localtime(&now)); + std::string filename = std::string(buffer) + ".wav"; + + wavWriter.open(filename, WHISPER_SAMPLE_RATE, 16, 1); + } + + audio.resume(); + printf("[Start streaming]\n"); + fflush(stdout); + + int64_t total_samples = 0; + + // main audio loop + auto run_inference = [&](const std::vector & audio_buf, int64_t seg_start_sample, int64_t seg_end_sample) -> bool { + if (audio_buf.empty()) { + return true; + } + + whisper_full_params wparams = whisper_full_default_params(params.beam_size > 1 ? WHISPER_SAMPLING_BEAM_SEARCH : WHISPER_SAMPLING_GREEDY); + + wparams.print_progress = false; + wparams.print_special = params.print_special; + wparams.print_realtime = false; + wparams.print_timestamps = !params.no_timestamps; + wparams.translate = params.translate; + wparams.single_segment = !use_vad; + wparams.max_tokens = params.max_tokens; + wparams.language = params.language.c_str(); + wparams.n_threads = params.n_threads; + wparams.beam_search.beam_size = params.beam_size; + + wparams.audio_ctx = params.audio_ctx; + + wparams.tdrz_enable = params.tinydiarize; // [TDRZ] + + // disable temperature fallback + //wparams.temperature_inc = -1.0f; + wparams.temperature_inc = params.no_fallback ? 0.0f : wparams.temperature_inc; + + wparams.prompt_tokens = params.no_context ? nullptr : prompt_tokens.data(); + wparams.prompt_n_tokens = params.no_context ? 0 : (int) prompt_tokens.size(); + + if (whisper_full(ctx, wparams, audio_buf.data(), (int) audio_buf.size()) != 0) { + fprintf(stderr, "%s: failed to process audio\n", argv[0]); + return false; + } + + // print result; + { + if (!use_vad) { + printf("\33[2K\r"); + + // print long empty line to clear the previous line + printf("%s", std::string(100, ' ').c_str()); + + printf("\33[2K\r"); + } else { + const int64_t t0_ms = std::max(0, seg_start_sample * 1000 / WHISPER_SAMPLE_RATE); + const int64_t t1_ms = std::max(0, seg_end_sample * 1000 / WHISPER_SAMPLE_RATE); + + printf("\n"); + printf("### Transcription %d START | t0 = %d ms | t1 = %d ms\n", n_iter, (int) t0_ms, (int) t1_ms); + printf("\n"); + } + + const int n_segments = whisper_full_n_segments(ctx); + for (int i = 0; i < n_segments; ++i) { + const char * text = whisper_full_get_segment_text(ctx, i); + + if (params.no_timestamps) { + printf("%s", text); + fflush(stdout); + + if (params.fname_out.length() > 0) { + fout << text; + } + } else { + const int64_t t0 = whisper_full_get_segment_t0(ctx, i); + const int64_t t1 = whisper_full_get_segment_t1(ctx, i); + + std::string output = "[" + to_timestamp(t0, false) + " --> " + to_timestamp(t1, false) + "] " + text; + + if (whisper_full_get_segment_speaker_turn_next(ctx, i)) { + output += " [SPEAKER_TURN]"; + } + + output += "\n"; + + printf("%s", output.c_str()); + fflush(stdout); + + if (params.fname_out.length() > 0) { + fout << output; + } + } + } + + if (params.fname_out.length() > 0) { + fout << std::endl; + } + + if (use_vad) { + printf("\n"); + printf("### Transcription %d END\n", n_iter); + } + } + + ++n_iter; + fflush(stdout); + return true; + }; + + std::vector pre_roll; + std::vector speech_buf; + int64_t segment_start_sample = 0; + size_t silence_samples = 0; + bool in_speech = false; + + const int vad_probe_ms = std::max(1, params.vad_probe_ms); + const int vad_last_ms = std::max(1, std::min(1000, vad_probe_ms / 2)); + const size_t vad_pre_roll_samples = (size_t) (WHISPER_SAMPLE_RATE * std::max(0, params.vad_pre_roll_ms) / 1000); + const size_t vad_silence_samples = (size_t) (WHISPER_SAMPLE_RATE * std::max(0, params.vad_silence_ms) / 1000); + const size_t vad_max_segment_samples = (size_t) n_samples_len; + + // main audio loop + while (is_running && g_running) { + if (!use_vad) { + const size_t available = audio.available_samples(); + if (available < (size_t) n_samples_step) { + if (audio.is_eof()) { + if (available == 0) { + break; + } + } else { + std::this_thread::sleep_for(std::chrono::milliseconds(5)); + continue; + } + } + + audio.pop_ms(params.step_ms, pcmf32_new); + debug_log("debug: step audio.pop -> %zu samples (available=%zu eof=%d)\n", + pcmf32_new.size(), audio.available_samples(), audio.is_eof() ? 1 : 0); + + if (pcmf32_new.empty()) { + if (audio.is_eof()) { + break; + } + continue; + } + + total_samples += pcmf32_new.size(); + + if (params.save_audio && !pcmf32_new.empty()) { + wavWriter.write(pcmf32_new.data(), pcmf32_new.size()); + debug_log("debug: save_audio wrote %zu samples (step)\n", pcmf32_new.size()); + } + + const int n_samples_new = (int) pcmf32_new.size(); + const int n_samples_take = std::min((int) pcmf32_old.size(), std::max(0, n_samples_keep + n_samples_len - n_samples_new)); + + pcmf32.resize(n_samples_new + n_samples_take); + + for (int i = 0; i < n_samples_take; i++) { + pcmf32[i] = pcmf32_old[pcmf32_old.size() - n_samples_take + i]; + } + + memcpy(pcmf32.data() + n_samples_take, pcmf32_new.data(), n_samples_new*sizeof(float)); + + pcmf32_old = pcmf32; + + if (!run_inference(pcmf32, -1, -1)) { + return 6; + } + + if (n_iter % n_new_line == 0) { + printf("\n"); + + // keep part of the audio for next iteration to try to mitigate word boundary issues + if (n_samples_keep > 0 && (int) pcmf32.size() >= n_samples_keep) { + pcmf32_old = std::vector(pcmf32.end() - n_samples_keep, pcmf32.end()); + } + + // Add tokens of the last full length segment as the prompt + if (!params.no_context) { + prompt_tokens.clear(); + + const int n_segments = whisper_full_n_segments(ctx); + for (int i = 0; i < n_segments; ++i) { + const int token_count = whisper_full_n_tokens(ctx, i); + for (int j = 0; j < token_count; ++j) { + prompt_tokens.push_back(whisper_full_get_token_id(ctx, i, j)); + } + } + } + } + } else { + const size_t available = audio.available_samples(); + if (available == 0) { + if (audio.is_eof()) { + if (in_speech && !speech_buf.empty()) { + const int64_t seg_end_sample = segment_start_sample + (int64_t) speech_buf.size(); + if (!run_inference(speech_buf, segment_start_sample, seg_end_sample)) { + return 6; + } + speech_buf.clear(); + in_speech = false; + } + break; + } + std::this_thread::sleep_for(std::chrono::milliseconds(5)); + continue; + } + + audio.pop_ms(vad_probe_ms, pcmf32_new); + debug_log("debug: vad probe audio.pop -> %zu samples (available=%zu eof=%d)\n", + pcmf32_new.size(), audio.available_samples(), audio.is_eof() ? 1 : 0); + + if (pcmf32_new.empty()) { + continue; + } + + total_samples += pcmf32_new.size(); + + if (params.save_audio && !pcmf32_new.empty()) { + wavWriter.write(pcmf32_new.data(), pcmf32_new.size()); + debug_log("debug: save_audio wrote %zu samples (probe)\n", pcmf32_new.size()); + } + + bool silence; + if (vad_ctx) { + // Silero VAD: run model on probe chunk, average probs + if (whisper_vad_detect_speech(vad_ctx, pcmf32_new.data(), (int) pcmf32_new.size())) { + const int n_probs = whisper_vad_n_probs(vad_ctx); + const float * probs = whisper_vad_probs(vad_ctx); + float avg = 0.0f; + for (int i = 0; i < n_probs; ++i) { + avg += probs[i]; + } + avg = (n_probs > 0) ? avg / n_probs : 0.0f; + silence = (avg < params.vad_thold); + debug_log("debug: silero vad avg=%.3f thold=%.2f silence=%d\n", avg, params.vad_thold, silence ? 1 : 0); + } else { + // detect_speech failed — treat as silence to avoid false triggers + silence = true; + debug_log("debug: silero vad detect_speech failed, treating as silence\n"); + } + } else { + silence = ::vad_simple(pcmf32_new, WHISPER_SAMPLE_RATE, vad_last_ms, params.vad_thold, params.freq_thold, false); + debug_log("debug: vad silence=%d (last_ms=%d)\n", silence ? 1 : 0, vad_last_ms); + } + + if (!in_speech) { + if (!silence) { + in_speech = true; + silence_samples = 0; + speech_buf.clear(); + if (!pre_roll.empty()) { + speech_buf.insert(speech_buf.end(), pre_roll.begin(), pre_roll.end()); + } + speech_buf.insert(speech_buf.end(), pcmf32_new.begin(), pcmf32_new.end()); + segment_start_sample = total_samples - (int64_t) speech_buf.size(); + debug_log("debug: vad speech start (segment_start_sample=%lld)\n", (long long) segment_start_sample); + } + } else { + speech_buf.insert(speech_buf.end(), pcmf32_new.begin(), pcmf32_new.end()); + if (!silence) { + silence_samples = 0; + } else { + silence_samples += pcmf32_new.size(); + } + + if (speech_buf.size() >= vad_max_segment_samples || silence_samples >= vad_silence_samples) { + const int64_t seg_end_sample = segment_start_sample + (int64_t) speech_buf.size(); + if (!run_inference(speech_buf, segment_start_sample, seg_end_sample)) { + return 6; + } + speech_buf.clear(); + in_speech = false; + silence_samples = 0; + debug_log("debug: vad speech end (segment_end_sample=%lld)\n", (long long) seg_end_sample); + } + } + + if (vad_pre_roll_samples > 0) { + pre_roll.insert(pre_roll.end(), pcmf32_new.begin(), pcmf32_new.end()); + if (pre_roll.size() > vad_pre_roll_samples) { + pre_roll.erase(pre_roll.begin(), pre_roll.end() - vad_pre_roll_samples); + } + } + } + } + + audio.pause(); + + whisper_print_timings(ctx); + whisper_free(ctx); + + if (vad_ctx) { + whisper_vad_free(vad_ctx); + } + + return 0; +} diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 09e77ea89c2..5503435fee5 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -110,3 +110,13 @@ target_compile_definitions(${VAD_TEST} PRIVATE SAMPLE_PATH="${PROJECT_SOURCE_DIR}/samples/jfk.wav") add_test(NAME ${VAD_TEST} COMMAND ${VAD_TEST}) set_tests_properties(${VAD_TEST} PROPERTIES LABELS "base;en") + +if (WHISPER_BUILD_EXAMPLES) + set(TEST_TARGET test-stream-pcm) + add_executable(${TEST_TARGET} ${TEST_TARGET}.cpp) + target_compile_definitions(${TEST_TARGET} PRIVATE + WHISPER_STREAM_PCM_PATH="$" + WHISPER_TEST_MODEL_PATH="${PROJECT_SOURCE_DIR}/models/for-tests-ggml-tiny.en.bin") + add_test(NAME ${TEST_TARGET} COMMAND ${TEST_TARGET}) + set_tests_properties(${TEST_TARGET} PROPERTIES LABELS "tiny;stream") +endif() diff --git a/tests/test-stream-pcm.cpp b/tests/test-stream-pcm.cpp new file mode 100644 index 00000000000..59c0f6b2492 --- /dev/null +++ b/tests/test-stream-pcm.cpp @@ -0,0 +1,100 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(_WIN32) +#include +#endif +#ifndef WHISPER_STREAM_PCM_PATH +#error "WHISPER_STREAM_PCM_PATH is not defined" +#endif + +#ifndef WHISPER_TEST_MODEL_PATH +#error "WHISPER_TEST_MODEL_PATH is not defined" +#endif + +static std::filesystem::path temp_pcm_path() { + std::error_code ec; + auto dir = std::filesystem::temp_directory_path(ec); + if (ec) { + return std::filesystem::path("whisper_stream_pcm_test.raw"); + } + return dir / "whisper_stream_pcm_test.raw"; +} + +int main() { + const int sample_rate = 16000; + const int seconds = 2; + const size_t n_samples = (size_t) sample_rate * seconds; + + std::vector zeros(n_samples, 0.0f); + + auto pcm_path = temp_pcm_path(); + pcm_path.make_preferred(); + std::ofstream out(pcm_path, std::ios::binary); + if (!out.is_open()) { + fprintf(stderr, "failed to open temp PCM path: %s\n", pcm_path.string().c_str()); + return 1; + } + + out.write(reinterpret_cast(zeros.data()), zeros.size() * sizeof(float)); + out.close(); + + const std::string stream_bin = std::filesystem::path(WHISPER_STREAM_PCM_PATH).make_preferred().string(); + const std::string model_path = std::filesystem::path(WHISPER_TEST_MODEL_PATH).make_preferred().string(); + + std::vector args = { + stream_bin, + "-m", model_path, + "--input", pcm_path.string(), + "--format", "f32", + "--sample-rate", "16000", + "--step", "500", + "--length", "2000", + "-t", "1", + "-ng", + }; + + int rc = 1; +#if defined(_WIN32) + std::vector argv; + argv.reserve(args.size() + 1); + for (const auto & arg : args) { + argv.push_back(arg.c_str()); + } + argv.push_back(nullptr); + + rc = (int) _spawnv(_P_WAIT, stream_bin.c_str(), argv.data()); + if (rc == -1) { + fprintf(stderr, "failed to spawn whisper-stream-pcm: %s\n", std::strerror(errno)); + rc = 1; + } +#else + std::string cmd; + cmd.reserve(1024); + for (const auto & arg : args) { + if (!cmd.empty()) { + cmd += " "; + } + cmd += "\""; + cmd += arg; + cmd += "\""; + } + rc = std::system(cmd.c_str()); +#endif + + std::error_code ec; + std::filesystem::remove(pcm_path, ec); + + if (rc != 0) { + fprintf(stderr, "whisper-stream-pcm exited with code %d\n", rc); + return 1; + } + + return 0; +}