svapp  1.9
AudioGenerator.cpp
Go to the documentation of this file.
00001 /* -*- c-basic-offset: 4 indent-tabs-mode: nil -*-  vi:set ts=8 sts=4 sw=4: */
00002 
00003 /*
00004     Sonic Visualiser
00005     An audio file viewer and annotation editor.
00006     Centre for Digital Music, Queen Mary, University of London.
00007     This file copyright 2006 Chris Cannam.
00008     
00009     This program is free software; you can redistribute it and/or
00010     modify it under the terms of the GNU General Public License as
00011     published by the Free Software Foundation; either version 2 of the
00012     License, or (at your option) any later version.  See the file
00013     COPYING included with this distribution for more information.
00014 */
00015 
00016 #include "AudioGenerator.h"
00017 
00018 #include "base/TempDirectory.h"
00019 #include "base/PlayParameters.h"
00020 #include "base/PlayParameterRepository.h"
00021 #include "base/Pitch.h"
00022 #include "base/Exceptions.h"
00023 
00024 #include "data/model/NoteModel.h"
00025 #include "data/model/FlexiNoteModel.h"
00026 #include "data/model/DenseTimeValueModel.h"
00027 #include "data/model/SparseTimeValueModel.h"
00028 #include "data/model/SparseOneDimensionalModel.h"
00029 #include "data/model/NoteData.h"
00030 
00031 #include "ClipMixer.h"
00032 #include "ContinuousSynth.h"
00033 
00034 #include <iostream>
00035 #include <cmath>
00036 
00037 #include <QDir>
00038 #include <QFile>
00039 
00040 const int
00041 AudioGenerator::m_processingBlockSize = 1024;
00042 
00043 QString
00044 AudioGenerator::m_sampleDir = "";
00045 
00046 //#define DEBUG_AUDIO_GENERATOR 1
00047 
00048 AudioGenerator::AudioGenerator() :
00049     m_sourceSampleRate(0),
00050     m_targetChannelCount(1),
00051     m_waveType(0),
00052     m_soloing(false),
00053     m_channelBuffer(0),
00054     m_channelBufSiz(0),
00055     m_channelBufCount(0)
00056 {
00057     initialiseSampleDir();
00058 
00059     connect(PlayParameterRepository::getInstance(),
00060             SIGNAL(playClipIdChanged(const Playable *, QString)),
00061             this,
00062             SLOT(playClipIdChanged(const Playable *, QString)));
00063 }
00064 
00065 AudioGenerator::~AudioGenerator()
00066 {
00067 #ifdef DEBUG_AUDIO_GENERATOR
00068     SVDEBUG << "AudioGenerator::~AudioGenerator" << endl;
00069 #endif
00070 }
00071 
00072 void
00073 AudioGenerator::initialiseSampleDir()
00074 {
00075     if (m_sampleDir != "") return;
00076 
00077     try {
00078         m_sampleDir = TempDirectory::getInstance()->getSubDirectoryPath("samples");
00079     } catch (DirectoryCreationFailed f) {
00080         cerr << "WARNING: AudioGenerator::initialiseSampleDir:"
00081                   << " Failed to create temporary sample directory"
00082                   << endl;
00083         m_sampleDir = "";
00084         return;
00085     }
00086 
00087     QDir sampleResourceDir(":/samples", "*.wav");
00088 
00089     for (unsigned int i = 0; i < sampleResourceDir.count(); ++i) {
00090 
00091         QString fileName(sampleResourceDir[i]);
00092         QFile file(sampleResourceDir.filePath(fileName));
00093         QString target = QDir(m_sampleDir).filePath(fileName);
00094 
00095         if (!file.copy(target)) {
00096             cerr << "WARNING: AudioGenerator::getSampleDir: "
00097                       << "Unable to copy " << fileName
00098                       << " into temporary directory \""
00099                       << m_sampleDir << "\"" << endl;
00100         } else {
00101             QFile tf(target);
00102             tf.setPermissions(tf.permissions() |
00103                               QFile::WriteOwner |
00104                               QFile::WriteUser);
00105         }
00106     }
00107 }
00108 
00109 bool
00110 AudioGenerator::addModel(Model *model)
00111 {
00112     if (m_sourceSampleRate == 0) {
00113 
00114         m_sourceSampleRate = model->getSampleRate();
00115 
00116     } else {
00117 
00118         DenseTimeValueModel *dtvm =
00119             dynamic_cast<DenseTimeValueModel *>(model);
00120 
00121         if (dtvm) {
00122             m_sourceSampleRate = model->getSampleRate();
00123             return true;
00124         }
00125     }
00126 
00127     if (usesClipMixer(model)) {
00128         ClipMixer *mixer = makeClipMixerFor(model);
00129         if (mixer) {
00130             QMutexLocker locker(&m_mutex);
00131             m_clipMixerMap[model] = mixer;
00132             return true;
00133         }
00134     }
00135 
00136     if (usesContinuousSynth(model)) {
00137         ContinuousSynth *synth = makeSynthFor(model);
00138         if (synth) {
00139             QMutexLocker locker(&m_mutex);
00140             m_continuousSynthMap[model] = synth;
00141             return true;
00142         }
00143     }
00144 
00145     return false;
00146 }
00147 
00148 void
00149 AudioGenerator::playClipIdChanged(const Playable *playable, QString)
00150 {
00151     const Model *model = dynamic_cast<const Model *>(playable);
00152     if (!model) {
00153         cerr << "WARNING: AudioGenerator::playClipIdChanged: playable "
00154                   << playable << " is not a supported model type"
00155                   << endl;
00156         return;
00157     }
00158 
00159     if (m_clipMixerMap.find(model) == m_clipMixerMap.end()) return;
00160 
00161     ClipMixer *mixer = makeClipMixerFor(model);
00162     if (mixer) {
00163         QMutexLocker locker(&m_mutex);
00164         m_clipMixerMap[model] = mixer;
00165     }
00166 }
00167 
00168 bool
00169 AudioGenerator::usesClipMixer(const Model *model)
00170 {
00171     bool clip = 
00172         (qobject_cast<const SparseOneDimensionalModel *>(model) ||
00173          qobject_cast<const NoteModel *>(model) ||
00174          qobject_cast<const FlexiNoteModel *>(model));
00175     return clip;
00176 }
00177 
00178 bool
00179 AudioGenerator::wantsQuieterClips(const Model *model)
00180 {
00181     // basically, anything that usually has sustain (like notes) or
00182     // often has multiple sounds at once (like notes) wants to use a
00183     // quieter level than simple click tracks
00184     bool does = 
00185         (qobject_cast<const NoteModel *>(model) ||
00186          qobject_cast<const FlexiNoteModel *>(model));
00187     return does;
00188 }
00189 
00190 bool
00191 AudioGenerator::usesContinuousSynth(const Model *model)
00192 {
00193     bool cont = 
00194         (qobject_cast<const SparseTimeValueModel *>(model));
00195     return cont;
00196 }
00197 
00198 ClipMixer *
00199 AudioGenerator::makeClipMixerFor(const Model *model)
00200 {
00201     QString clipId;
00202 
00203     const Playable *playable = model;
00204     if (!playable || !playable->canPlay()) return 0;
00205 
00206     PlayParameters *parameters =
00207         PlayParameterRepository::getInstance()->getPlayParameters(playable);
00208     if (parameters) {
00209         clipId = parameters->getPlayClipId();
00210     }
00211 
00212     std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): sample id = " << clipId << std::endl;
00213 
00214     if (clipId == "") {
00215         SVDEBUG << "AudioGenerator::makeClipMixerFor(" << model << "): no sample, skipping" << endl;
00216         return 0;
00217     }
00218 
00219     ClipMixer *mixer = new ClipMixer(m_targetChannelCount,
00220                                      m_sourceSampleRate,
00221                                      m_processingBlockSize);
00222 
00223     float clipF0 = Pitch::getFrequencyForPitch(60, 0, 440.0f); // required
00224 
00225     QString clipPath = QString("%1/%2.wav").arg(m_sampleDir).arg(clipId);
00226 
00227     float level = wantsQuieterClips(model) ? 0.5 : 1.0;
00228     if (!mixer->loadClipData(clipPath, clipF0, level)) {
00229         delete mixer;
00230         return 0;
00231     }
00232 
00233     std::cerr << "AudioGenerator::makeClipMixerFor(" << model << "): loaded clip " << clipId << std::endl;
00234 
00235     return mixer;
00236 }
00237 
00238 ContinuousSynth *
00239 AudioGenerator::makeSynthFor(const Model *model)
00240 {
00241     const Playable *playable = model;
00242     if (!playable || !playable->canPlay()) return 0;
00243 
00244     ContinuousSynth *synth = new ContinuousSynth(m_targetChannelCount,
00245                                                  m_sourceSampleRate,
00246                                                  m_processingBlockSize,
00247                                                  m_waveType);
00248 
00249     std::cerr << "AudioGenerator::makeSynthFor(" << model << "): created synth" << std::endl;
00250 
00251     return synth;
00252 }
00253 
00254 void
00255 AudioGenerator::removeModel(Model *model)
00256 {
00257     SparseOneDimensionalModel *sodm =
00258         dynamic_cast<SparseOneDimensionalModel *>(model);
00259     if (!sodm) return; // nothing to do
00260 
00261     QMutexLocker locker(&m_mutex);
00262 
00263     if (m_clipMixerMap.find(sodm) == m_clipMixerMap.end()) return;
00264 
00265     ClipMixer *mixer = m_clipMixerMap[sodm];
00266     m_clipMixerMap.erase(sodm);
00267     delete mixer;
00268 }
00269 
00270 void
00271 AudioGenerator::clearModels()
00272 {
00273     QMutexLocker locker(&m_mutex);
00274 
00275     while (!m_clipMixerMap.empty()) {
00276         ClipMixer *mixer = m_clipMixerMap.begin()->second;
00277         m_clipMixerMap.erase(m_clipMixerMap.begin());
00278         delete mixer;
00279     }
00280 }    
00281 
00282 void
00283 AudioGenerator::reset()
00284 {
00285     QMutexLocker locker(&m_mutex);
00286 
00287     cerr << "AudioGenerator::reset()" << endl;
00288 
00289     for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
00290         if (i->second) {
00291             i->second->reset();
00292         }
00293     }
00294 
00295     m_noteOffs.clear();
00296 }
00297 
00298 void
00299 AudioGenerator::setTargetChannelCount(int targetChannelCount)
00300 {
00301     if (m_targetChannelCount == targetChannelCount) return;
00302 
00303 //    SVDEBUG << "AudioGenerator::setTargetChannelCount(" << targetChannelCount << ")" << endl;
00304 
00305     QMutexLocker locker(&m_mutex);
00306     m_targetChannelCount = targetChannelCount;
00307 
00308     for (ClipMixerMap::iterator i = m_clipMixerMap.begin(); i != m_clipMixerMap.end(); ++i) {
00309         if (i->second) i->second->setChannelCount(targetChannelCount);
00310     }
00311 }
00312 
00313 int
00314 AudioGenerator::getBlockSize() const
00315 {
00316     return m_processingBlockSize;
00317 }
00318 
00319 void
00320 AudioGenerator::setSoloModelSet(std::set<Model *> s)
00321 {
00322     QMutexLocker locker(&m_mutex);
00323 
00324     m_soloModelSet = s;
00325     m_soloing = true;
00326 }
00327 
00328 void
00329 AudioGenerator::clearSoloModelSet()
00330 {
00331     QMutexLocker locker(&m_mutex);
00332 
00333     m_soloModelSet.clear();
00334     m_soloing = false;
00335 }
00336 
00337 int
00338 AudioGenerator::mixModel(Model *model, int startFrame, int frameCount,
00339                          float **buffer, int fadeIn, int fadeOut)
00340 {
00341     if (m_sourceSampleRate == 0) {
00342         cerr << "WARNING: AudioGenerator::mixModel: No base source sample rate available" << endl;
00343         return frameCount;
00344     }
00345 
00346     QMutexLocker locker(&m_mutex);
00347 
00348     Playable *playable = model;
00349     if (!playable || !playable->canPlay()) return frameCount;
00350 
00351     PlayParameters *parameters =
00352         PlayParameterRepository::getInstance()->getPlayParameters(playable);
00353     if (!parameters) return frameCount;
00354 
00355     bool playing = !parameters->isPlayMuted();
00356     if (!playing) {
00357 #ifdef DEBUG_AUDIO_GENERATOR
00358         cout << "AudioGenerator::mixModel(" << model << "): muted" << endl;
00359 #endif
00360         return frameCount;
00361     }
00362 
00363     if (m_soloing) {
00364         if (m_soloModelSet.find(model) == m_soloModelSet.end()) {
00365 #ifdef DEBUG_AUDIO_GENERATOR
00366             cout << "AudioGenerator::mixModel(" << model << "): not one of the solo'd models" << endl;
00367 #endif
00368             return frameCount;
00369         }
00370     }
00371 
00372     float gain = parameters->getPlayGain();
00373     float pan = parameters->getPlayPan();
00374 
00375     DenseTimeValueModel *dtvm = dynamic_cast<DenseTimeValueModel *>(model);
00376     if (dtvm) {
00377         return mixDenseTimeValueModel(dtvm, startFrame, frameCount,
00378                                       buffer, gain, pan, fadeIn, fadeOut);
00379     }
00380 
00381     if (usesClipMixer(model)) {
00382         return mixClipModel(model, startFrame, frameCount,
00383                             buffer, gain, pan);
00384     }
00385 
00386     if (usesContinuousSynth(model)) {
00387         return mixContinuousSynthModel(model, startFrame, frameCount,
00388                                        buffer, gain, pan);
00389     }
00390 
00391     std::cerr << "AudioGenerator::mixModel: WARNING: Model " << model << " of type " << model->getTypeName() << " is marked as playable, but I have no mechanism to play it" << std::endl;
00392 
00393     return frameCount;
00394 }
00395 
00396 int
00397 AudioGenerator::mixDenseTimeValueModel(DenseTimeValueModel *dtvm,
00398                                        int startFrame, int frames,
00399                                        float **buffer, float gain, float pan,
00400                                        int fadeIn, int fadeOut)
00401 {
00402     int maxFrames = frames + std::max(fadeIn, fadeOut);
00403 
00404     int modelChannels = dtvm->getChannelCount();
00405 
00406     if (m_channelBufSiz < maxFrames || m_channelBufCount < modelChannels) {
00407 
00408         for (int c = 0; c < m_channelBufCount; ++c) {
00409             delete[] m_channelBuffer[c];
00410         }
00411 
00412         delete[] m_channelBuffer;
00413         m_channelBuffer = new float *[modelChannels];
00414 
00415         for (int c = 0; c < modelChannels; ++c) {
00416             m_channelBuffer[c] = new float[maxFrames];
00417         }
00418 
00419         m_channelBufCount = modelChannels;
00420         m_channelBufSiz = maxFrames;
00421     }
00422 
00423     int got = 0;
00424 
00425     if (startFrame >= fadeIn/2) {
00426         got = dtvm->getData(0, modelChannels - 1,
00427                             startFrame - fadeIn/2,
00428                             frames + fadeOut/2 + fadeIn/2,
00429                             m_channelBuffer);
00430     } else {
00431         int missing = fadeIn/2 - startFrame;
00432 
00433         for (int c = 0; c < modelChannels; ++c) {
00434             m_channelBuffer[c] += missing;
00435         }
00436 
00437         if (missing > 0) {
00438             cerr << "note: channelBufSiz = " << m_channelBufSiz
00439                  << ", frames + fadeOut/2 = " << frames + fadeOut/2 
00440                  << ", startFrame = " << startFrame 
00441                  << ", missing = " << missing << endl;
00442         }
00443 
00444         got = dtvm->getData(0, modelChannels - 1,
00445                             startFrame,
00446                             frames + fadeOut/2,
00447                             m_channelBuffer);
00448 
00449         for (int c = 0; c < modelChannels; ++c) {
00450             m_channelBuffer[c] -= missing;
00451         }
00452 
00453         got += missing;
00454     }       
00455 
00456     for (int c = 0; c < m_targetChannelCount; ++c) {
00457 
00458         int sourceChannel = (c % modelChannels);
00459 
00460 //      SVDEBUG << "mixing channel " << c << " from source channel " << sourceChannel << endl;
00461 
00462         float channelGain = gain;
00463         if (pan != 0.0) {
00464             if (c == 0) {
00465                 if (pan > 0.0) channelGain *= 1.0 - pan;
00466             } else {
00467                 if (pan < 0.0) channelGain *= pan + 1.0;
00468             }
00469         }
00470 
00471         for (int i = 0; i < fadeIn/2; ++i) {
00472             float *back = buffer[c];
00473             back -= fadeIn/2;
00474             back[i] += (channelGain * m_channelBuffer[sourceChannel][i] * i) / fadeIn;
00475         }
00476 
00477         for (int i = 0; i < frames + fadeOut/2; ++i) {
00478             float mult = channelGain;
00479             if (i < fadeIn/2) {
00480                 mult = (mult * i) / fadeIn;
00481             }
00482             if (i > frames - fadeOut/2) {
00483                 mult = (mult * ((frames + fadeOut/2) - i)) / fadeOut;
00484             }
00485             float val = m_channelBuffer[sourceChannel][i];
00486             if (i >= got) val = 0.f;
00487             buffer[c][i] += mult * val;
00488         }
00489     }
00490 
00491     return got;
00492 }
00493   
00494 int
00495 AudioGenerator::mixClipModel(Model *model,
00496                              int startFrame, int frames,
00497                              float **buffer, float gain, float pan)
00498 {
00499     ClipMixer *clipMixer = m_clipMixerMap[model];
00500     if (!clipMixer) return 0;
00501 
00502     int blocks = frames / m_processingBlockSize;
00503     
00505 
00507     //buffer is a multiple of the plugin's buffer size doesn't mean
00508     //that we always get called for a multiple of it here (because it
00509     //also depends on the JACK block size).  how should we ensure that
00510     //all models write the same amount in to the mix, and that we
00511     //always have a multiple of the plugin buffer size?  I guess this
00512     //class has to be queryable for the plugin buffer size & the
00513     //callback play source has to use that as a multiple for all the
00514     //calls to mixModel
00515 
00516     int got = blocks * m_processingBlockSize;
00517 
00518 #ifdef DEBUG_AUDIO_GENERATOR
00519     cout << "mixModel [clip]: frames " << frames
00520               << ", blocks " << blocks << endl;
00521 #endif
00522 
00523     ClipMixer::NoteStart on;
00524     ClipMixer::NoteEnd off;
00525 
00526     NoteOffSet &noteOffs = m_noteOffs[model];
00527 
00528     float **bufferIndexes = new float *[m_targetChannelCount];
00529 
00530     for (int i = 0; i < blocks; ++i) {
00531 
00532         int reqStart = startFrame + i * m_processingBlockSize;
00533 
00534         NoteList notes;
00535         NoteExportable *exportable = dynamic_cast<NoteExportable *>(model);
00536         if (exportable) {
00537             notes = exportable->getNotesWithin(reqStart,
00538                                                reqStart + m_processingBlockSize);
00539         }
00540 
00541         std::vector<ClipMixer::NoteStart> starts;
00542         std::vector<ClipMixer::NoteEnd> ends;
00543 
00544         for (NoteList::const_iterator ni = notes.begin();
00545              ni != notes.end(); ++ni) {
00546 
00547             int noteFrame = ni->start;
00548 
00549             if (noteFrame < reqStart ||
00550                 noteFrame >= reqStart + m_processingBlockSize) continue;
00551 
00552             while (noteOffs.begin() != noteOffs.end() &&
00553                    noteOffs.begin()->frame <= noteFrame) {
00554 
00555                 int eventFrame = noteOffs.begin()->frame;
00556                 if (eventFrame < reqStart) eventFrame = reqStart;
00557 
00558                 off.frameOffset = eventFrame - reqStart;
00559                 off.frequency = noteOffs.begin()->frequency;
00560 
00561 #ifdef DEBUG_AUDIO_GENERATOR
00562                 cerr << "mixModel [clip]: adding note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
00563 #endif
00564 
00565                 ends.push_back(off);
00566                 noteOffs.erase(noteOffs.begin());
00567             }
00568 
00569             on.frameOffset = noteFrame - reqStart;
00570             on.frequency = ni->getFrequency();
00571             on.level = float(ni->velocity) / 127.0;
00572             on.pan = pan;
00573 
00574 #ifdef DEBUG_AUDIO_GENERATOR
00575             cout << "mixModel [clip]: adding note at frame " << noteFrame << ", frame offset " << on.frameOffset << " frequency " << on.frequency << ", level " << on.level << endl;
00576 #endif
00577             
00578             starts.push_back(on);
00579             noteOffs.insert
00580                 (NoteOff(on.frequency, noteFrame + ni->duration));
00581         }
00582 
00583         while (noteOffs.begin() != noteOffs.end() &&
00584                noteOffs.begin()->frame <= reqStart + m_processingBlockSize) {
00585 
00586             int eventFrame = noteOffs.begin()->frame;
00587             if (eventFrame < reqStart) eventFrame = reqStart;
00588 
00589             off.frameOffset = eventFrame - reqStart;
00590             off.frequency = noteOffs.begin()->frequency;
00591 
00592 #ifdef DEBUG_AUDIO_GENERATOR
00593             cerr << "mixModel [clip]: adding leftover note-off at frame " << eventFrame << " frame offset " << off.frameOffset << " frequency " << off.frequency << endl;
00594 #endif
00595 
00596             ends.push_back(off);
00597             noteOffs.erase(noteOffs.begin());
00598         }
00599 
00600         for (int c = 0; c < m_targetChannelCount; ++c) {
00601             bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
00602         }
00603 
00604         clipMixer->mix(bufferIndexes, gain, starts, ends);
00605     }
00606 
00607     delete[] bufferIndexes;
00608 
00609     return got;
00610 }
00611 
00612 int
00613 AudioGenerator::mixContinuousSynthModel(Model *model,
00614                                         int startFrame,
00615                                         int frames,
00616                                         float **buffer,
00617                                         float gain, 
00618                                         float pan)
00619 {
00620     ContinuousSynth *synth = m_continuousSynthMap[model];
00621     if (!synth) return 0;
00622 
00623     // only type we support here at the moment
00624     SparseTimeValueModel *stvm = qobject_cast<SparseTimeValueModel *>(model);
00625     if (stvm->getScaleUnits() != "Hz") return 0;
00626 
00627     int blocks = frames / m_processingBlockSize;
00628 
00630 
00631     int got = blocks * m_processingBlockSize;
00632 
00633 #ifdef DEBUG_AUDIO_GENERATOR
00634     cout << "mixModel [synth]: frames " << frames
00635               << ", blocks " << blocks << endl;
00636 #endif
00637     
00638     float **bufferIndexes = new float *[m_targetChannelCount];
00639 
00640     for (int i = 0; i < blocks; ++i) {
00641 
00642         int reqStart = startFrame + i * m_processingBlockSize;
00643 
00644         for (int c = 0; c < m_targetChannelCount; ++c) {
00645             bufferIndexes[c] = buffer[c] + i * m_processingBlockSize;
00646         }
00647 
00648         SparseTimeValueModel::PointList points = 
00649             stvm->getPoints(reqStart, reqStart + m_processingBlockSize);
00650 
00651         // by default, repeat last frequency
00652         float f0 = 0.f;
00653 
00654         // go straight to the last freq that is genuinely in this range
00655         for (SparseTimeValueModel::PointList::const_iterator itr = points.end();
00656              itr != points.begin(); ) {
00657             --itr;
00658             if (itr->frame >= reqStart &&
00659                 itr->frame < reqStart + m_processingBlockSize) {
00660                 f0 = itr->value;
00661                 break;
00662             }
00663         }
00664 
00665         // if we found no such frequency and the next point is further
00666         // away than twice the model resolution, go silent (same
00667         // criterion TimeValueLayer uses for ending a discrete curve
00668         // segment)
00669         if (f0 == 0.f) {
00670             SparseTimeValueModel::PointList nextPoints = 
00671                 stvm->getNextPoints(reqStart + m_processingBlockSize);
00672             if (nextPoints.empty() ||
00673                 nextPoints.begin()->frame > reqStart + 2 * stvm->getResolution()) {
00674                 f0 = -1.f;
00675             }
00676         }
00677 
00678 //        cerr << "f0 = " << f0 << endl;
00679 
00680         synth->mix(bufferIndexes,
00681                    gain,
00682                    pan,
00683                    f0);
00684     }
00685 
00686     delete[] bufferIndexes;
00687 
00688     return got;
00689 }
00690