22 #ifndef WIN32_LEAN_AND_MEAN 23 #define WIN32_LEAN_AND_MEAN 1 36 static void find_all_microphones_ds();
37 friend void find_all_microphones_ds();
54 static void delete_buffers(AudioBuffers &buffers);
56 friend class MicrophoneAudioCursorDS;
62 static void init_type() {
63 MicrophoneAudio::init_type();
65 MicrophoneAudio::get_class_type());
68 return get_class_type();
70 virtual TypeHandle force_init_type() {init_type();
return get_class_type();}
85 typedef MicrophoneAudioDS::AudioBuffers AudioBuffers;
86 MicrophoneAudioCursorDS(MicrophoneAudioDS *src, AudioBuffers &bufs, HWAVEIN hwav);
87 virtual ~MicrophoneAudioCursorDS();
89 AudioBuffers _buffers;
91 int _samples_per_buffer;
95 virtual int ready()
const;
108 static void init_type() {
109 MovieAudioCursor::init_type();
111 MovieAudioCursor::get_class_type());
114 return get_class_type();
116 virtual TypeHandle force_init_type() {init_type();
return get_class_type();}
122 TypeHandle MicrophoneAudioCursorDS::_type_handle;
128 void MicrophoneAudioDS::
129 find_all_microphones_ds() {
131 static int freqs[] = { 11025, 22050, 44100, 48000, 0 };
133 int ndevs = waveInGetNumDevs();
134 for (
int i=0; i<ndevs; i++) {
136 stat = waveInGetDevCaps(i, &caps,
sizeof(caps));
137 if (stat != MMSYSERR_NOERROR)
continue;
138 for (
int chan=1; chan<=2; chan++) {
139 for (
int fselect=0; freqs[fselect]; fselect++) {
141 int freq = freqs[fselect];
142 format.wFormatTag = WAVE_FORMAT_PCM;
143 format.nChannels = chan;
144 format.nSamplesPerSec = freq;
145 format.nAvgBytesPerSec = freq * chan * 2;
146 format.nBlockAlign = 2 * chan;
147 format.wBitsPerSample = 16;
149 stat = waveInOpen(
nullptr, i, &format, 0, 0, WAVE_FORMAT_QUERY);
150 if (stat == MMSYSERR_NOERROR) {
151 PT(MicrophoneAudioDS) p =
new MicrophoneAudioDS();
152 std::ostringstream name;
153 name <<
"WaveIn: " << caps.szPname <<
" Chan:" << chan <<
" HZ:" << freq;
154 p->set_name(name.str());
156 p->_manufacturer_id = caps.wMid;
157 p->_product_id = caps.wPid;
160 _all_microphones.push_back((MicrophoneAudioDS*)p);
167 void find_all_microphones_ds() {
168 MicrophoneAudioDS::init_type();
170 MicrophoneAudioDS::find_all_microphones_ds();
176 void MicrophoneAudioDS::
177 delete_buffers(AudioBuffers &buffers) {
178 for (
int i=0; i<(int)buffers.size(); i++) {
179 AudioBuf &buf = buffers[i];
180 if (buf._header_gh) {
181 GlobalUnlock(buf._header_gh);
182 GlobalFree(buf._header_gh);
184 if (buf._storage_gh) {
185 GlobalUnlock(buf._storage_gh);
186 GlobalFree(buf._storage_gh);
201 case 11025: samples=512;
break;
202 case 22050: samples=1024;
break;
203 case 44100: samples=2048;
break;
205 int bytes = _channels * samples * 2;
208 AudioBuffers buffers;
209 for (
int i=0; i<64; i++) {
216 buf._storage_gh = GlobalAlloc(GMEM_MOVEABLE | GMEM_SHARE, bytes);
217 buf._header_gh = GlobalAlloc(GMEM_MOVEABLE | GMEM_SHARE,
sizeof(WAVEHDR));
218 if (buf._storage_gh != 0) {
219 buf._storage = (LPSTR)GlobalLock(buf._storage_gh);
221 if (buf._header_gh != 0) {
222 buf._header = (LPWAVEHDR)GlobalLock(buf._header_gh);
224 if (buf._storage && buf._header) {
225 ZeroMemory(buf._header,
sizeof(WAVEHDR));
226 buf._header->lpData = buf._storage;
227 buf._header->dwBufferLength = bytes;
231 buffers.push_back(buf);
236 delete_buffers(buffers);
237 nassert_raise(
"Could not allocate audio input buffers.");
242 format.wFormatTag = WAVE_FORMAT_PCM;
243 format.nChannels = _channels;
244 format.nSamplesPerSec = _rate;
245 format.nAvgBytesPerSec = _rate * _channels * 2;
246 format.nBlockAlign = 2 * _channels;
247 format.wBitsPerSample = 16;
251 MMRESULT stat = waveInOpen(&hwav, _device_id, &format, 0, 0, CALLBACK_NULL);
253 if (stat != MMSYSERR_NOERROR) {
254 delete_buffers(buffers);
255 nassert_raise(
"Could not open audio input device.");
259 for (
int i=0; i<(int)buffers.size(); i++) {
260 stat = waveInPrepareHeader(hwav, buffers[i]._header,
sizeof(WAVEHDR));
261 if (stat == MMSYSERR_NOERROR) {
262 stat = waveInAddBuffer(hwav, buffers[i]._header,
sizeof(WAVEHDR));
264 if (stat != MMSYSERR_NOERROR) {
266 delete_buffers(buffers);
267 nassert_raise(
"Could not queue buffers for audio input device.");
271 stat = waveInStart(hwav);
272 if (stat != MMSYSERR_NOERROR) {
274 delete_buffers(buffers);
275 nassert_raise(
"Could not start recording on input device.");
278 return new MicrophoneAudioCursorDS(
this, buffers, hwav);
284 MicrophoneAudioCursorDS::
285 MicrophoneAudioCursorDS(MicrophoneAudioDS *src, AudioBuffers &bufs, HWAVEIN hwav) :
292 _audio_rate = src->get_rate();
293 _audio_channels = src->get_channels();
296 _can_seek_fast =
false;
298 _samples_per_buffer = bufs[0]._header->dwBufferLength / (2 * _audio_channels);
304 void MicrophoneAudioCursorDS::
307 waveInClose(_handle);
310 MicrophoneAudioDS::delete_buffers(_buffers);
318 MicrophoneAudioCursorDS::
319 ~MicrophoneAudioCursorDS() {
326 void MicrophoneAudioCursorDS::
327 read_samples(
int n, int16_t *data) {
331 int index = _next % _buffers.size();
332 if ((_buffers[index]._header->dwFlags & WHDR_DONE)==0) {
337 int16_t *src = (int16_t*)(_buffers[index]._storage);
338 src += (_offset * _audio_channels);
341 int samples = _samples_per_buffer;
343 if (samples > n) samples = n;
346 memcpy(data, src, samples * 2 * _audio_channels);
349 data += samples * _audio_channels;
352 _samples_read += samples;
353 if (_offset != _samples_per_buffer) {
356 _buffers[index]._header->dwFlags &= ~(WHDR_DONE);
357 MMRESULT stat = waveInUnprepareHeader(_handle, _buffers[index]._header,
sizeof(WAVEHDR));
358 if (stat == MMSYSERR_NOERROR) {
359 stat = waveInPrepareHeader(_handle, _buffers[index]._header,
sizeof(WAVEHDR));
361 if (stat == MMSYSERR_NOERROR) {
362 stat = waveInAddBuffer(_handle, _buffers[index]._header,
sizeof(WAVEHDR));
364 if (stat != MMSYSERR_NOERROR) {
365 movies_cat.error() <<
"Could not requeue audio buffers, closing microphone.\n";
374 memcpy(data, 0, n*2*_audio_channels);
381 int MicrophoneAudioCursorDS::
383 if (_handle == 0)
return 0;
385 for (
int i=0; i<(int)_buffers.size(); i++) {
386 int index = (_next + i) % (_buffers.size());
387 if ((_buffers[index]._header->dwFlags & WHDR_DONE)==0) {
390 total += _samples_per_buffer;
397 #endif // HAVE_DIRECTSHOW void read_samples(int n, Datagram *dg)
Read audio samples from the stream into a Datagram.
void register_type(TypeHandle &type_handle, const std::string &name)
This inline function is just a convenient way to call TypeRegistry::register_type(),...
Class MicrophoneAudio provides the means to read raw audio samples from a microphone.
This is our own Panda specialization on the default STL vector.
A MovieAudio is actually any source that provides a sequence of audio samples.
TypeHandle is the identifier used to differentiate C++ class types.
virtual int ready() const
Returns the number of audio samples that are ready to read.