App-MHFS

 view release on metacpan or  search on metacpan

lib/App/MHFS.pm  view on Meta::CPAN

        if($ipos != -1) {
            if(substr($self->{'client'}{'inbuf'}, 0, $ipos+2, '') =~ /^(([^\s]+)\s+([^\s]+)\s+(?:HTTP\/1\.([0-1])))\r\n/) {
                my $rl = $1;
                $self->{'method'}    = $2;
                $self->{'uri'}       = $3;
                $self->{'httpproto'} = $4;
                my $rid = int(clock_gettime(CLOCK_MONOTONIC) * rand()); # insecure uid
                $self->{'outheaders'}{'X-MHFS-REQUEST-ID'} = sprintf("%X", $rid);
                say "X-MHFS-CONN-ID: " . $self->{'outheaders'}{'X-MHFS-CONN-ID'} . " X-MHFS-REQUEST-ID: " . $self->{'outheaders'}{'X-MHFS-REQUEST-ID'};
                say "RECV: $rl";
                if(($self->{'method'} ne 'GET') && ($self->{'method'} ne 'HEAD') && ($self->{'method'} ne 'PUT')) {
                    say "X-MHFS-CONN-ID: " . $self->{'outheaders'}{'X-MHFS-CONN-ID'} . 'Invalid method: ' . $self->{'method'}. ', closing conn';
                    return undef;
                }
                my ($path, $querystring) = ($self->{'uri'} =~ /^([^\?]+)(?:\?)?(.*)$/g);
                say("raw path: $path\nraw querystring: $querystring");

                # transformations
                ## Path
                $path = uri_unescape($path);
                my %pathStruct = ( 'unescapepath' => $path );

lib/App/MHFS.pm  view on Meta::CPAN

            }
            # redirect to slash path
            else {
                $self->SendRedirect(301, basename($requestfile).'/');
                return;
            }
        }
        $self->Send404;
    }

    sub PUTBuf_old {
        my ($self, $handler) = @_;
        if(length($self->{'client'}{'inbuf'}) < $self->{'header'}{'Content-Length'}) {
            $self->{'client'}->SetEvents(POLLIN | MHFS::EventLoop::Poll->ALWAYSMASK );
        }
        my $sdata;
        $self->{'on_read_ready'} = sub {
            my $contentlength = $self->{'header'}{'Content-Length'};
            $sdata .= $self->{'client'}{'inbuf'};
            my $dlength = length($sdata);
            if($dlength >= $contentlength) {
                say 'PUTBuf datalength ' . $dlength;
                my $data;
                if($dlength > $contentlength) {
                    $data = substr($sdata, 0, $contentlength);
                    $self->{'client'}{'inbuf'} = substr($sdata, $contentlength);
                    $dlength = length($data)
                }
                else {
                    $data = $sdata;
                    $self->{'client'}{'inbuf'} = '';
                }

lib/App/MHFS.pm  view on Meta::CPAN

            }
            else {
                $self->{'client'}{'inbuf'} = '';
            }
            #return '';
            return 1;
        };
        $self->{'on_read_ready'}->();
    }

    sub PUTBuf {
        my ($self, $handler) = @_;
        if($self->{'header'}{'Content-Length'} > 20000000) {
            say "PUTBuf too big";
            $self->{'client'}->SetEvents(POLLIN | MHFS::EventLoop::Poll->ALWAYSMASK );
            $self->{'on_read_ready'} = sub { return undef };
            return;
        }
        if(length($self->{'client'}{'inbuf'}) < $self->{'header'}{'Content-Length'}) {
            $self->{'client'}->SetEvents(POLLIN | MHFS::EventLoop::Poll->ALWAYSMASK );
        }
        $self->{'on_read_ready'} = sub {
            my $contentlength = $self->{'header'}{'Content-Length'};
            my $dlength = length($self->{'client'}{'inbuf'});
            if($dlength >= $contentlength) {
                say 'PUTBuf datalength ' . $dlength;
                my $data;
                if($dlength > $contentlength) {
                    $data = substr($self->{'client'}{'inbuf'}, 0, $contentlength, '');
                }
                else {
                    $data = $self->{'client'}{'inbuf'};
                    $self->{'client'}{'inbuf'} = '';
                }
                $self->{'on_read_ready'} = undef;
                $handler->($data);

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

    +----------------------+----------------------------------------------------------------------------------------------------------------------------------+
    | MA_LOG_LEVEL [level] | Sets the logging level. Set level to one of the following:                                                                       |
    |                      |                                                                                                                                  |
    |                      |     ```                                                                                                                          |
    |                      |     MA_LOG_LEVEL_VERBOSE                                                                                                         |
    |                      |     MA_LOG_LEVEL_INFO                                                                                                            |
    |                      |     MA_LOG_LEVEL_WARNING                                                                                                         |
    |                      |     MA_LOG_LEVEL_ERROR                                                                                                           |
    |                      |     ```                                                                                                                          |
    +----------------------+----------------------------------------------------------------------------------------------------------------------------------+
    | MA_DEBUG_OUTPUT      | Enable printf() debug output.                                                                                                    |
    +----------------------+----------------------------------------------------------------------------------------------------------------------------------+
    | MA_COINIT_VALUE      | Windows only. The value to pass to internal calls to `CoInitializeEx()`. Defaults to `COINIT_MULTITHREADED`.                     |
    +----------------------+----------------------------------------------------------------------------------------------------------------------------------+
    | MA_API               | Controls how public APIs should be decorated. Defaults to `extern`.                                                              |
    +----------------------+----------------------------------------------------------------------------------------------------------------------------------+
    | MA_DLL               | If set, configures MA_API to either import or export APIs depending on whether or not the implementation is being defined. If    |
    |                      | defining the implementation, MA_API will be configured to export. Otherwise it will be configured to import. This has no effect  |
    |                      | if MA_API is defined externally.                                                                                                 |
    +----------------------+----------------------------------------------------------------------------------------------------------------------------------+

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

#endif
#ifdef MA_SUPPORT_OPENSL
        struct
        {
            ma_handle libOpenSLES;
            ma_handle SL_IID_ENGINE;
            ma_handle SL_IID_AUDIOIODEVICECAPABILITIES;
            ma_handle SL_IID_ANDROIDSIMPLEBUFFERQUEUE;
            ma_handle SL_IID_RECORD;
            ma_handle SL_IID_PLAY;
            ma_handle SL_IID_OUTPUTMIX;
            ma_proc   slCreateEngine;
        } opensl;
#endif
#ifdef MA_SUPPORT_WEBAUDIO
        struct
        {
            int _unused;
        } webaudio;
#endif
#ifdef MA_SUPPORT_NULL

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

/* Posts a log message. */
static void ma_post_log_message(ma_context* pContext, ma_device* pDevice, ma_uint32 logLevel, const char* message)
{
    if (pContext == NULL) {
        if (pDevice != NULL) {
            pContext = pDevice->pContext;
        }
    }

    /* All logs must be output when debug output is enabled. */
#if defined(MA_DEBUG_OUTPUT)
    printf("%s: %s\n", ma_log_level_to_string(logLevel), message);
#endif

    if (pContext == NULL) {
        return;
    }
    
#if defined(MA_LOG_LEVEL)
    if (logLevel <= MA_LOG_LEVEL) {
        ma_log_proc onLog;

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

        return 0;   /* We don't free anything here because we never allocate the object on the heap. */
    }

    return (ULONG)newRefCount;
}

static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceStateChanged(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, DWORD dwNewState)
{
    ma_bool32 isThisDevice = MA_FALSE;

#ifdef MA_DEBUG_OUTPUT
    /*printf("IMMNotificationClient_OnDeviceStateChanged(pDeviceID=%S, dwNewState=%u)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)", (unsigned int)dwNewState);*/
#endif

    if ((dwNewState & MA_MM_DEVICE_STATE_ACTIVE) != 0) {
        return S_OK;
    }

    /*
    There have been reports of a hang when a playback device is disconnected. The idea with this code is to explicitly stop the device if we detect
    that the device is disabled or has been unplugged.

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN


    if (isThisDevice) {
        ma_device_stop(pThis->pDevice);
    }
    
    return S_OK;
}

static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceAdded(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID)
{
#ifdef MA_DEBUG_OUTPUT
    /*printf("IMMNotificationClient_OnDeviceAdded(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)");*/
#endif

    /* We don't need to worry about this event for our purposes. */
    (void)pThis;
    (void)pDeviceID;
    return S_OK;
}

static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceRemoved(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID)
{
#ifdef MA_DEBUG_OUTPUT
    /*printf("IMMNotificationClient_OnDeviceRemoved(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)");*/
#endif

    /* We don't need to worry about this event for our purposes. */
    (void)pThis;
    (void)pDeviceID;
    return S_OK;
}

static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDefaultDeviceChanged(ma_IMMNotificationClient* pThis, ma_EDataFlow dataFlow, ma_ERole role, LPCWSTR pDefaultDeviceID)
{
#ifdef MA_DEBUG_OUTPUT
    /*printf("IMMNotificationClient_OnDefaultDeviceChanged(dataFlow=%d, role=%d, pDefaultDeviceID=%S)\n", dataFlow, role, (pDefaultDeviceID != NULL) ? pDefaultDeviceID : L"(NULL)");*/
#endif

    /* We only ever use the eConsole role in miniaudio. */
    if (role != ma_eConsole) {
        return S_OK;
    }

    /* We only care about devices with the same data flow and role as the current device. */
    if ((pThis->pDevice->type == ma_device_type_playback && dataFlow != ma_eRender) ||

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

    if (dataFlow == ma_eCapture || pThis->pDevice->type == ma_device_type_loopback) {
        c89atomic_exchange_32(&pThis->pDevice->wasapi.hasDefaultCaptureDeviceChanged,  MA_TRUE);
    }

    (void)pDefaultDeviceID;
    return S_OK;
}

static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnPropertyValueChanged(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, const PROPERTYKEY key)
{
#ifdef MA_DEBUG_OUTPUT
    /*printf("IMMNotificationClient_OnPropertyValueChanged(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)");*/
#endif

    (void)pThis;
    (void)pDeviceID;
    (void)key;
    return S_OK;
}

static ma_IMMNotificationClientVtbl g_maNotificationCientVtbl = {

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

                    UINT32 desiredPeriodInFrames = pData->periodSizeInFramesOut;
                    UINT32 actualPeriodInFrames  = desiredPeriodInFrames;

                    /* Make sure the period size is a multiple of fundamentalPeriodInFrames. */
                    actualPeriodInFrames = actualPeriodInFrames / fundamentalPeriodInFrames;
                    actualPeriodInFrames = actualPeriodInFrames * fundamentalPeriodInFrames;

                    /* The period needs to be clamped between minPeriodInFrames and maxPeriodInFrames. */
                    actualPeriodInFrames = ma_clamp(actualPeriodInFrames, minPeriodInFrames, maxPeriodInFrames);

                #if defined(MA_DEBUG_OUTPUT)
                    printf("[WASAPI] Trying IAudioClient3_InitializeSharedAudioStream(actualPeriodInFrames=%d)\n", actualPeriodInFrames);
                    printf("    defaultPeriodInFrames=%d\n", defaultPeriodInFrames);
                    printf("    fundamentalPeriodInFrames=%d\n", fundamentalPeriodInFrames);
                    printf("    minPeriodInFrames=%d\n", minPeriodInFrames);
                    printf("    maxPeriodInFrames=%d\n", maxPeriodInFrames);
                #endif

                    /* If the client requested a largish buffer than we don't actually want to use low latency shared mode because it forces small buffers. */
                    if (actualPeriodInFrames >= desiredPeriodInFrames) {
                        /*
                        MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM | MA_AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY must not be in the stream flags. If either of these are specified,
                        IAudioClient3_InitializeSharedAudioStream() will fail.
                        */
                        hr = ma_IAudioClient3_InitializeSharedAudioStream(pAudioClient3, streamFlags & ~(MA_AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM | MA_AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY), actualPeriodInFrames, (WAVEFORMATEX*)&wf, NULL);
                        if (SUCCEEDED(hr)) {
                            wasInitializedUsingIAudioClient3 = MA_TRUE;
                            pData->periodSizeInFramesOut = actualPeriodInFrames;
                        #if defined(MA_DEBUG_OUTPUT)
                            printf("[WASAPI] Using IAudioClient3\n");
                            printf("    periodSizeInFramesOut=%d\n", pData->periodSizeInFramesOut);
                        #endif
                        } else {
                        #if defined(MA_DEBUG_OUTPUT)
                            printf("[WASAPI] IAudioClient3_InitializeSharedAudioStream failed. Falling back to IAudioClient.\n");
                        #endif    
                        }
                    } else {
                    #if defined(MA_DEBUG_OUTPUT)
                        printf("[WASAPI] Not using IAudioClient3 because the desired period size is larger than the maximum supported by IAudioClient3.\n");
                    #endif
                    }
                } else {
                #if defined(MA_DEBUG_OUTPUT)
                    printf("[WASAPI] IAudioClient3_GetSharedModeEnginePeriod failed. Falling back to IAudioClient.\n");
                #endif
                }

                ma_IAudioClient3_Release(pAudioClient3);
                pAudioClient3 = NULL;
            }
        }
#else
    #if defined(MA_DEBUG_OUTPUT)
        printf("[WASAPI] Not using IAudioClient3 because MA_WASAPI_NO_LOW_LATENCY_SHARED_MODE is enabled.\n");
    #endif
#endif

        /* If we don't have an IAudioClient3 then we need to use the normal initialization routine. */
        if (!wasInitializedUsingIAudioClient3) {
            MA_REFERENCE_TIME bufferDuration = periodDurationInMicroseconds * pData->periodsOut * 10;   /* <-- Multiply by 10 for microseconds to 100-nanoseconds. */
            hr = ma_IAudioClient_Initialize((ma_IAudioClient*)pData->pAudioClient, shareMode, streamFlags, bufferDuration, 0, (WAVEFORMATEX*)&wf, NULL);
            if (FAILED(hr)) {
                if (hr == E_ACCESSDENIED) {

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

    }

    if (deviceType == ma_device_type_playback) {
        c89atomic_exchange_32(&pDevice->wasapi.hasDefaultPlaybackDeviceChanged, MA_FALSE);
    }
    if (deviceType == ma_device_type_capture || deviceType == ma_device_type_loopback) {
        c89atomic_exchange_32(&pDevice->wasapi.hasDefaultCaptureDeviceChanged,  MA_FALSE);
    }
    

    #ifdef MA_DEBUG_OUTPUT
        printf("=== CHANGING DEVICE ===\n");
    #endif

    result = ma_device_reinit__wasapi(pDevice, deviceType);
    if (result != MA_SUCCESS) {
        return result;
    }

    ma_device__post_init_setup(pDevice, deviceType);

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

                        if (FAILED(hr)) {
                            ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from capture device in preparation for writing to the device.", ma_result_from_HRESULT(hr));
                            exitLoop = MA_TRUE;
                            break;
                        }


                        /* Overrun detection. */
                        if ((flagsCapture & MA_AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) != 0) {
                            /* Glitched. Probably due to an overrun. */
                        #ifdef MA_DEBUG_OUTPUT
                            printf("[WASAPI] Data discontinuity (possible overrun). framesAvailableCapture=%d, mappedBufferSizeInFramesCapture=%d\n", framesAvailableCapture, mappedDeviceBufferSizeInFramesCapture);
                        #endif

                            /*
                            Exeriment: If we get an overrun it probably means we're straddling the end of the buffer. In order to prevent a never-ending sequence of glitches let's experiment
                            by dropping every frame until we're left with only a single period. To do this we just keep retrieving and immediately releasing buffers until we're down to the
                            last period.
                            */
                            if (framesAvailableCapture >= pDevice->wasapi.actualPeriodSizeInFramesCapture) {
                            #ifdef MA_DEBUG_OUTPUT
                                printf("[WASAPI] Synchronizing capture stream. ");
                            #endif
                                do
                                {
                                    hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, mappedDeviceBufferSizeInFramesCapture);
                                    if (FAILED(hr)) {
                                        break;
                                    }

                                    framesAvailableCapture -= mappedDeviceBufferSizeInFramesCapture;

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

                                        if (FAILED(hr)) {
                                            ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from capture device in preparation for writing to the device.", ma_result_from_HRESULT(hr));
                                            exitLoop = MA_TRUE;
                                            break;
                                        }
                                    } else {
                                        pMappedDeviceBufferCapture = NULL;
                                        mappedDeviceBufferSizeInFramesCapture = 0;
                                    }
                                } while (framesAvailableCapture > periodSizeInFramesCapture);
                            #ifdef MA_DEBUG_OUTPUT
                                printf("framesAvailableCapture=%d, mappedBufferSizeInFramesCapture=%d\n", framesAvailableCapture, mappedDeviceBufferSizeInFramesCapture);
                            #endif
                            }
                        } else {
                        #ifdef MA_DEBUG_OUTPUT
                            if (flagsCapture != 0) {
                                printf("[WASAPI] Capture Flags: %ld\n", flagsCapture);
                            }
                        #endif
                        }

                        mappedDeviceBufferFramesRemainingCapture = mappedDeviceBufferSizeInFramesCapture;
                    }


share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

                hr = ma_IAudioCaptureClient_GetBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, (BYTE**)&pMappedDeviceBufferCapture, &mappedDeviceBufferSizeInFramesCapture, &flagsCapture, NULL, NULL);
                if (FAILED(hr)) {
                    ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from capture device in preparation for writing to the device.", ma_result_from_HRESULT(hr));
                    exitLoop = MA_TRUE;
                    break;
                }

                /* Overrun detection. */
                if ((flagsCapture & MA_AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) != 0) {
                    /* Glitched. Probably due to an overrun. */
                #ifdef MA_DEBUG_OUTPUT
                    printf("[WASAPI] Data discontinuity (possible overrun). framesAvailableCapture=%d, mappedBufferSizeInFramesCapture=%d\n", framesAvailableCapture, mappedDeviceBufferSizeInFramesCapture);
                #endif

                    /*
                    Exeriment: If we get an overrun it probably means we're straddling the end of the buffer. In order to prevent a never-ending sequence of glitches let's experiment
                    by dropping every frame until we're left with only a single period. To do this we just keep retrieving and immediately releasing buffers until we're down to the
                    last period.
                    */
                    if (framesAvailableCapture >= pDevice->wasapi.actualPeriodSizeInFramesCapture) {
                    #ifdef MA_DEBUG_OUTPUT
                        printf("[WASAPI] Synchronizing capture stream. ");
                    #endif
                        do
                        {
                            hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, mappedDeviceBufferSizeInFramesCapture);
                            if (FAILED(hr)) {
                                break;
                            }

                            framesAvailableCapture -= mappedDeviceBufferSizeInFramesCapture;

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

                                if (FAILED(hr)) {
                                    ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from capture device in preparation for writing to the device.", ma_result_from_HRESULT(hr));
                                    exitLoop = MA_TRUE;
                                    break;
                                }
                            } else {
                                pMappedDeviceBufferCapture = NULL;
                                mappedDeviceBufferSizeInFramesCapture = 0;
                            }
                        } while (framesAvailableCapture > periodSizeInFramesCapture);
                    #ifdef MA_DEBUG_OUTPUT
                        printf("framesAvailableCapture=%d, mappedBufferSizeInFramesCapture=%d\n", framesAvailableCapture, mappedDeviceBufferSizeInFramesCapture);
                    #endif
                    }
                } else {
                #ifdef MA_DEBUG_OUTPUT
                    if (flagsCapture != 0) {
                        printf("[WASAPI] Capture Flags: %ld\n", flagsCapture);
                    }
                #endif
                }

                /* We should have a buffer at this point, but let's just do a sanity check anyway. */
                if (mappedDeviceBufferSizeInFramesCapture > 0 && pMappedDeviceBufferCapture != NULL) {
                    ma_device__send_frames_to_client(pDevice, mappedDeviceBufferSizeInFramesCapture, pMappedDeviceBufferCapture);

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

                        prevPlayCursorInBytesPlayback  = physicalPlayCursorInBytes;

                        /* If there's any bytes available for writing we can do that now. The space between the virtual cursor position and play cursor. */
                        if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) {
                            /* Same loop iteration. The available bytes wraps all the way around from the virtual write cursor to the physical play cursor. */
                            if (physicalPlayCursorInBytes <= virtualWriteCursorInBytesPlayback) {
                                availableBytesPlayback  = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback;
                                availableBytesPlayback += physicalPlayCursorInBytes;    /* Wrap around. */
                            } else {
                                /* This is an error. */
                            #ifdef MA_DEBUG_OUTPUT
                                printf("[DirectSound] (Duplex/Playback) WARNING: Play cursor has moved in front of the write cursor (same loop iterations). physicalPlayCursorInBytes=%ld, virtualWriteCursorInBytes=%ld.\n", physicalPlayCursorInBytes, v...
                            #endif
                                availableBytesPlayback = 0;
                            }
                        } else {
                            /* Different loop iterations. The available bytes only goes from the virtual write cursor to the physical play cursor. */
                            if (physicalPlayCursorInBytes >= virtualWriteCursorInBytesPlayback) {
                                availableBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback;
                            } else {
                                /* This is an error. */
                            #ifdef MA_DEBUG_OUTPUT
                                printf("[DirectSound] (Duplex/Playback) WARNING: Write cursor has moved behind the play cursor (different loop iterations). physicalPlayCursorInBytes=%ld, virtualWriteCursorInBytes=%ld.\n", physicalPlayCursorInBytes, v...
                            #endif
                                availableBytesPlayback = 0;
                            }
                        }

                    #ifdef MA_DEBUG_OUTPUT
                        /*printf("[DirectSound] (Duplex/Playback) physicalPlayCursorInBytes=%d, availableBytesPlayback=%d\n", physicalPlayCursorInBytes, availableBytesPlayback);*/
                    #endif

                        /* If there's no room available for writing we need to wait for more. */
                        if (availableBytesPlayback == 0) {
                            /* If we haven't started the device yet, this will never get beyond 0. In this case we need to get the device started. */
                            if (!isPlaybackDeviceStarted) {
                                hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING);
                                if (FAILED(hr)) {
                                    ma_IDirectSoundCaptureBuffer_Stop((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer);

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

                        endless glitching due to it constantly running out of data.
                        */
                        if (isPlaybackDeviceStarted) {
                            DWORD bytesQueuedForPlayback = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - availableBytesPlayback;
                            if (bytesQueuedForPlayback < (pDevice->playback.internalPeriodSizeInFrames*bpfDevicePlayback)) {
                                silentPaddingInBytes   = (pDevice->playback.internalPeriodSizeInFrames*2*bpfDevicePlayback) - bytesQueuedForPlayback;
                                if (silentPaddingInBytes > lockSizeInBytesPlayback) {
                                    silentPaddingInBytes = lockSizeInBytesPlayback;
                                }

                        #ifdef MA_DEBUG_OUTPUT
                                printf("[DirectSound] (Duplex/Playback) Playback buffer starved. availableBytesPlayback=%ld, silentPaddingInBytes=%ld\n", availableBytesPlayback, silentPaddingInBytes);
                        #endif
                            }
                        }

                        /* At this point we have a buffer for output. */
                        if (silentPaddingInBytes > 0) {
                            MA_ZERO_MEMORY(pMappedDeviceBufferPlayback, silentPaddingInBytes);
                            framesWrittenThisIteration = silentPaddingInBytes/bpfDevicePlayback;
                        } else {

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

                        /* Lock up to the end of the buffer. */
                        lockOffsetInBytesCapture = prevReadCursorInBytesCapture;
                        lockSizeInBytesCapture   = (pDevice->capture.internalPeriodSizeInFrames*pDevice->capture.internalPeriods*bpfDeviceCapture) - prevReadCursorInBytesCapture;
                    } else {
                        /* Lock starting from the start of the buffer. */
                        lockOffsetInBytesCapture = 0;
                        lockSizeInBytesCapture   = physicalReadCursorInBytes;
                    }
                }

            #ifdef MA_DEBUG_OUTPUT
                /*printf("[DirectSound] (Capture) physicalCaptureCursorInBytes=%d, physicalReadCursorInBytes=%d\n", physicalCaptureCursorInBytes, physicalReadCursorInBytes);*/
                /*printf("[DirectSound] (Capture) lockOffsetInBytesCapture=%d, lockSizeInBytesCapture=%d\n", lockOffsetInBytesCapture, lockSizeInBytesCapture);*/
            #endif

                if (lockSizeInBytesCapture < pDevice->capture.internalPeriodSizeInFrames) {
                    ma_sleep(waitTimeInMilliseconds);
                    continue; /* Nothing is available in the capture buffer. */
                }

                hr = ma_IDirectSoundCaptureBuffer_Lock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, lockOffsetInBytesCapture, lockSizeInBytesCapture, &pMappedDeviceBufferCapture, &mappedSizeInBytesCapture, NULL, NULL, 0);
                if (FAILED(hr)) {
                    return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from capture device in preparation for writing to the device.", ma_result_from_HRESULT(hr));
                }

            #ifdef MA_DEBUG_OUTPUT
                if (lockSizeInBytesCapture != mappedSizeInBytesCapture) {
                    printf("[DirectSound] (Capture) lockSizeInBytesCapture=%ld != mappedSizeInBytesCapture=%ld\n", lockSizeInBytesCapture, mappedSizeInBytesCapture);
                }
            #endif

                ma_device__send_frames_to_client(pDevice, mappedSizeInBytesCapture/bpfDeviceCapture, pMappedDeviceBufferCapture);

                hr = ma_IDirectSoundCaptureBuffer_Unlock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, pMappedDeviceBufferCapture, mappedSizeInBytesCapture, NULL, 0);
                if (FAILED(hr)) {
                    return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from capture device after reading from the device.", ma_result_from_HRESULT(hr));

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

                prevPlayCursorInBytesPlayback  = physicalPlayCursorInBytes;

                /* If there's any bytes available for writing we can do that now. The space between the virtual cursor position and play cursor. */
                if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) {
                    /* Same loop iteration. The available bytes wraps all the way around from the virtual write cursor to the physical play cursor. */
                    if (physicalPlayCursorInBytes <= virtualWriteCursorInBytesPlayback) {
                        availableBytesPlayback  = (pDevice->playback.internalPeriodSizeInFrames*pDevice->playback.internalPeriods*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback;
                        availableBytesPlayback += physicalPlayCursorInBytes;    /* Wrap around. */
                    } else {
                        /* This is an error. */
                    #ifdef MA_DEBUG_OUTPUT
                        printf("[DirectSound] (Playback) WARNING: Play cursor has moved in front of the write cursor (same loop iterations). physicalPlayCursorInBytes=%ld, virtualWriteCursorInBytes=%ld.\n", physicalPlayCursorInBytes, virtualWriteCurs...
                    #endif
                        availableBytesPlayback = 0;
                    }
                } else {
                    /* Different loop iterations. The available bytes only goes from the virtual write cursor to the physical play cursor. */
                    if (physicalPlayCursorInBytes >= virtualWriteCursorInBytesPlayback) {
                        availableBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback;
                    } else {
                        /* This is an error. */
                    #ifdef MA_DEBUG_OUTPUT
                        printf("[DirectSound] (Playback) WARNING: Write cursor has moved behind the play cursor (different loop iterations). physicalPlayCursorInBytes=%ld, virtualWriteCursorInBytes=%ld.\n", physicalPlayCursorInBytes, virtualWriteCurs...
                    #endif
                        availableBytesPlayback = 0;
                    }
                }

            #ifdef MA_DEBUG_OUTPUT
                /*printf("[DirectSound] (Playback) physicalPlayCursorInBytes=%d, availableBytesPlayback=%d\n", physicalPlayCursorInBytes, availableBytesPlayback);*/
            #endif

                /* If there's no room available for writing we need to wait for more. */
                if (availableBytesPlayback < pDevice->playback.internalPeriodSizeInFrames) {
                    /* If we haven't started the device yet, this will never get beyond 0. In this case we need to get the device started. */
                    if (availableBytesPlayback == 0 && !isPlaybackDeviceStarted) {
                        hr = ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING);
                        if (FAILED(hr)) {
                            return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed.", ma_result_from_HRESULT(hr));

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN


    for (;;) {
        resultALSA = ((ma_snd_pcm_readi_proc)pDevice->pContext->alsa.snd_pcm_readi)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture, pFramesOut, frameCount);
        if (resultALSA >= 0) {
            break;  /* Success. */
        } else {
            if (resultALSA == -EAGAIN) {
                /*printf("TRACE: EGAIN (read)\n");*/
                continue;   /* Try again. */
            } else if (resultALSA == -EPIPE) {
            #if defined(MA_DEBUG_OUTPUT)
                printf("TRACE: EPIPE (read)\n");
            #endif

                /* Overrun. Recover and try again. If this fails we need to return an error. */
                resultALSA = ((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture, resultALSA, MA_TRUE);
                if (resultALSA < 0) {
                    return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to recover device after overrun.", ma_result_from_errno((int)-resultALSA));
                }

                resultALSA = ((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture);

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN


    for (;;) {
        resultALSA = ((ma_snd_pcm_writei_proc)pDevice->pContext->alsa.snd_pcm_writei)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback, pFrames, frameCount);
        if (resultALSA >= 0) {
            break;  /* Success. */
        } else {
            if (resultALSA == -EAGAIN) {
                /*printf("TRACE: EGAIN (write)\n");*/
                continue;   /* Try again. */
            } else if (resultALSA == -EPIPE) {
            #if defined(MA_DEBUG_OUTPUT)
                printf("TRACE: EPIPE (write)\n");
            #endif

                /* Underrun. Recover and try again. If this fails we need to return an error. */
                resultALSA = ((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback, resultALSA, MA_TRUE);
                if (resultALSA < 0) { /* MA_TRUE=silent (don't print anything on error). */
                    return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to recover device after underrun.", ma_result_from_errno((int)-resultALSA));
                }

                /*

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

            default: break;
        } 
    }

    /* Here is where the device needs to be stopped. */
    if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) {
        ((ma_snd_pcm_drain_proc)pDevice->pContext->alsa.snd_pcm_drain)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture);

        /* We need to prepare the device again, otherwise we won't be able to restart the device. */
        if (((ma_snd_pcm_prepare_proc)pDevice->pContext->alsa.snd_pcm_prepare)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture) < 0) {
    #ifdef MA_DEBUG_OUTPUT
            printf("[ALSA] Failed to prepare capture device after stopping.\n");
    #endif
        }
    }

    if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) {
        ((ma_snd_pcm_drain_proc)pDevice->pContext->alsa.snd_pcm_drain)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback);

        /* We need to prepare the device again, otherwise we won't be able to restart the device. */
        if (((ma_snd_pcm_prepare_proc)pDevice->pContext->alsa.snd_pcm_prepare)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback) < 0) {
    #ifdef MA_DEBUG_OUTPUT
            printf("[ALSA] Failed to prepare playback device after stopping.\n");
    #endif
        }
    }

    return result;
}

static ma_result ma_context_uninit__alsa(ma_context* pContext)
{

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

    size_t i;

    for (i = 0; i < ma_countof(libasoundNames); ++i) {
        pContext->alsa.asoundSO = ma_dlopen(pContext, libasoundNames[i]);
        if (pContext->alsa.asoundSO != NULL) {
            break;
        }
    }

    if (pContext->alsa.asoundSO == NULL) {
#ifdef MA_DEBUG_OUTPUT
        printf("[ALSA] Failed to open shared object.\n");
#endif
        return MA_NO_BACKEND;
    }

    pContext->alsa.snd_pcm_open                           = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_open");
    pContext->alsa.snd_pcm_close                          = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_close");
    pContext->alsa.snd_pcm_hw_params_sizeof               = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_sizeof");
    pContext->alsa.snd_pcm_hw_params_any                  = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_any");
    pContext->alsa.snd_pcm_hw_params_set_format           = (ma_proc)ma_dlsym(pContext, pContext->alsa.asoundSO, "snd_pcm_hw_params_set_format");

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

        if (state == MA_PA_CONTEXT_READY) {
            break;  /* Success. */
        }
        if (state == MA_PA_CONTEXT_CONNECTING || state == MA_PA_CONTEXT_AUTHORIZING || state == MA_PA_CONTEXT_SETTING_NAME) {
            error = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)(pMainLoop, 1, NULL);
            if (error < 0) {
                result = ma_result_from_pulse(error);
                goto done;
            }

#ifdef MA_DEBUG_OUTPUT
            printf("[PulseAudio] pa_context_get_state() returned %d. Waiting.\n", state);
#endif
            continue;   /* Keep trying. */
        }
        if (state == MA_PA_CONTEXT_UNCONNECTED || state == MA_PA_CONTEXT_FAILED || state == MA_PA_CONTEXT_TERMINATED) {
#ifdef MA_DEBUG_OUTPUT
            printf("[PulseAudio] pa_context_get_state() returned %d. Failed.\n", state);
#endif
            goto done;  /* Failed. */
        }
    }


    /* Playback. */
    if (!callbackData.isTerminated) {
        pOP = ((ma_pa_context_get_sink_info_list_proc)pContext->pulse.pa_context_get_sink_info_list)(pPulseContext, ma_context_enumerate_devices_sink_callback__pulse, &callbackData);

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

        if (state == MA_PA_CONTEXT_READY) {
            break;  /* Success. */
        }
        if (state == MA_PA_CONTEXT_CONNECTING || state == MA_PA_CONTEXT_AUTHORIZING || state == MA_PA_CONTEXT_SETTING_NAME) {
            error = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)(pMainLoop, 1, NULL);
            if (error < 0) {
                result = ma_result_from_pulse(error);
                goto done;
            }

#ifdef MA_DEBUG_OUTPUT
            printf("[PulseAudio] pa_context_get_state() returned %d. Waiting.\n", state);
#endif
            continue;   /* Keep trying. */
        }
        if (state == MA_PA_CONTEXT_UNCONNECTED || state == MA_PA_CONTEXT_FAILED || state == MA_PA_CONTEXT_TERMINATED) {
#ifdef MA_DEBUG_OUTPUT
            printf("[PulseAudio] pa_context_get_state() returned %d. Failed.\n", state);
#endif
            goto done;  /* Failed. */
        }
    }

    if (deviceType == ma_device_type_playback) {
        pOP = ((ma_pa_context_get_sink_info_by_name_proc)pContext->pulse.pa_context_get_sink_info_by_name)(pPulseContext, pDeviceID->pulse, ma_context_get_device_info_sink_callback__pulse, &callbackData);
    } else {
        pOP = ((ma_pa_context_get_source_info_by_name_proc)pContext->pulse.pa_context_get_source_info_by_name)(pPulseContext, pDeviceID->pulse, ma_context_get_device_info_source_callback__pulse, &callbackData);

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

            goto on_error3;
        }

        ss = sourceInfo.sample_spec;
        cmap = sourceInfo.channel_map;

        pDevice->capture.internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(periodSizeInMilliseconds, ss.rate);
        pDevice->capture.internalPeriods            = pConfig->periods;

        attr = ma_device__pa_buffer_attr_new(pDevice->capture.internalPeriodSizeInFrames, pConfig->periods, &ss);
    #ifdef MA_DEBUG_OUTPUT
        printf("[PulseAudio] Capture attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; internalPeriodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDevice->capture.internalPeriodSizeInFram...
    #endif

        pDevice->pulse.pStreamCapture = ma_device__pa_stream_new__pulse(pDevice, pConfig->pulse.pStreamNameCapture, &ss, &cmap);
        if (pDevice->pulse.pStreamCapture == NULL) {
            result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create PulseAudio capture stream.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
            goto on_error3;
        }

        streamFlags = MA_PA_STREAM_START_CORKED | MA_PA_STREAM_FIX_FORMAT | MA_PA_STREAM_FIX_RATE | MA_PA_STREAM_FIX_CHANNELS;

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

            pDevice->capture.internalChannelMap[iChannel] = ma_channel_position_from_pulse(cmap.map[iChannel]);
        }

        /* Buffer. */
        pActualAttr = ((ma_pa_stream_get_buffer_attr_proc)pContext->pulse.pa_stream_get_buffer_attr)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
        if (pActualAttr != NULL) {
            attr = *pActualAttr;
        }
        pDevice->capture.internalPeriods            = attr.maxlength / attr.fragsize;
        pDevice->capture.internalPeriodSizeInFrames = attr.maxlength / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels) / pDevice->capture.internalPeriods;
    #ifdef MA_DEBUG_OUTPUT
        printf("[PulseAudio] Capture actual attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; internalPeriodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDevice->capture.internalPeriodSiz...
    #endif

        /* Name. */
        devCapture = ((ma_pa_stream_get_device_name_proc)pContext->pulse.pa_stream_get_device_name)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
        if (devCapture != NULL) {
            ma_pa_operation* pOP = ((ma_pa_context_get_source_info_by_name_proc)pContext->pulse.pa_context_get_source_info_by_name)((ma_pa_context*)pDevice->pulse.pPulseContext, devCapture, ma_device_source_name_callback, pDevice);
            if (pOP != NULL) {
                ma_device__wait_for_operation__pulse(pDevice, pOP);
                ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

            goto on_error3;
        }

        ss = sinkInfo.sample_spec;
        cmap = sinkInfo.channel_map;

        pDevice->playback.internalPeriodSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(periodSizeInMilliseconds, ss.rate);
        pDevice->playback.internalPeriods            = pConfig->periods;

        attr = ma_device__pa_buffer_attr_new(pDevice->playback.internalPeriodSizeInFrames, pConfig->periods, &ss);
    #ifdef MA_DEBUG_OUTPUT
        printf("[PulseAudio] Playback attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; internalPeriodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDevice->playback.internalPeriodSizeInFr...
    #endif

        pDevice->pulse.pStreamPlayback = ma_device__pa_stream_new__pulse(pDevice, pConfig->pulse.pStreamNamePlayback, &ss, &cmap);
        if (pDevice->pulse.pStreamPlayback == NULL) {
            result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to create PulseAudio playback stream.", MA_FAILED_TO_OPEN_BACKEND_DEVICE);
            goto on_error3;
        }

        streamFlags = MA_PA_STREAM_START_CORKED | MA_PA_STREAM_FIX_FORMAT | MA_PA_STREAM_FIX_RATE | MA_PA_STREAM_FIX_CHANNELS;

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

            pDevice->playback.internalChannelMap[iChannel] = ma_channel_position_from_pulse(cmap.map[iChannel]);
        }

        /* Buffer. */
        pActualAttr = ((ma_pa_stream_get_buffer_attr_proc)pContext->pulse.pa_stream_get_buffer_attr)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
        if (pActualAttr != NULL) {
            attr = *pActualAttr;
        }
        pDevice->playback.internalPeriods            = attr.maxlength / attr.tlength;
        pDevice->playback.internalPeriodSizeInFrames = attr.maxlength / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels) / pDevice->playback.internalPeriods;
    #ifdef MA_DEBUG_OUTPUT
        printf("[PulseAudio] Playback actual attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; internalPeriodSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDevice->playback.internalPeriodS...
    #endif

        /* Name. */
        devPlayback = ((ma_pa_stream_get_device_name_proc)pContext->pulse.pa_stream_get_device_name)((ma_pa_stream*)pDevice->pulse.pStreamPlayback);
        if (devPlayback != NULL) {
            ma_pa_operation* pOP = ((ma_pa_context_get_sink_info_by_name_proc)pContext->pulse.pa_context_get_sink_info_by_name)((ma_pa_context*)pDevice->pulse.pPulseContext, devPlayback, ma_device_sink_name_callback, pDevice);
            if (pOP != NULL) {
                ma_device__wait_for_operation__pulse(pDevice, pOP);
                ((ma_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP);

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN


            /*
            This little bit of logic here is specifically for PulseAudio and it's hole management. The buffer pointer will be set to NULL
            when the current fragment is a hole. For a hole we just output silence.
            */
            if (pDevice->pulse.pMappedBufferCapture != NULL) {
                const void* pSrc = (const ma_uint8*)pDevice->pulse.pMappedBufferCapture + (mappedBufferFramesConsumed * bpf);
                MA_COPY_MEMORY(pDst, pSrc, framesToCopy * bpf);
            } else {
                MA_ZERO_MEMORY(pDst, framesToCopy * bpf);
            #if defined(MA_DEBUG_OUTPUT)
                printf("[PulseAudio] ma_device_read__pulse: Filling hole with silence.\n");
            #endif
            }

            pDevice->pulse.mappedBufferFramesRemainingCapture -= framesToCopy;
            totalFramesRead += framesToCopy;
        }

        /*
        Getting here means we've run out of data in the currently mapped chunk. We need to drop this from the device and then try
        mapping another chunk. If this fails we need to wait for data to become available.
        */
        if (pDevice->pulse.mappedBufferFramesCapacityCapture > 0 && pDevice->pulse.mappedBufferFramesRemainingCapture == 0) {
            int error;

        #if defined(MA_DEBUG_OUTPUT)
            printf("[PulseAudio] ma_device_read__pulse: Call pa_stream_drop()\n");
        #endif

            error = ((ma_pa_stream_drop_proc)pDevice->pContext->pulse.pa_stream_drop)((ma_pa_stream*)pDevice->pulse.pStreamCapture);
            if (error != 0) {
                return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to drop fragment.", ma_result_from_pulse(error));
            }

            pDevice->pulse.pMappedBufferCapture = NULL;
            pDevice->pulse.mappedBufferFramesRemainingCapture = 0;

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

        for (;;) {
            int error;
            size_t bytesMapped;

            if (ma_device__get_state(pDevice) != MA_STATE_STARTED) {
                break;
            }

            /* If the device has been corked, don't try to continue. */
            if (((ma_pa_stream_is_corked_proc)pDevice->pContext->pulse.pa_stream_is_corked)((ma_pa_stream*)pDevice->pulse.pStreamCapture)) {
            #if defined(MA_DEBUG_OUTPUT)
                printf("[PulseAudio] ma_device_read__pulse: Corked.\n");
            #endif
                break;
            }

            MA_ASSERT(pDevice->pulse.pMappedBufferCapture == NULL); /* <-- We're about to map a buffer which means we shouldn't have an existing mapping. */

            error = ((ma_pa_stream_peek_proc)pDevice->pContext->pulse.pa_stream_peek)((ma_pa_stream*)pDevice->pulse.pStreamCapture, &pDevice->pulse.pMappedBufferCapture, &bytesMapped);
            if (error < 0) {
                return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to peek capture buffer.", ma_result_from_pulse(error));
            }

            if (bytesMapped > 0) {
                pDevice->pulse.mappedBufferFramesCapacityCapture  = bytesMapped / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);
                pDevice->pulse.mappedBufferFramesRemainingCapture = pDevice->pulse.mappedBufferFramesCapacityCapture;

                #if defined(MA_DEBUG_OUTPUT)
                    printf("[PulseAudio] ma_device_read__pulse: Mapped. mappedBufferFramesCapacityCapture=%d, mappedBufferFramesRemainingCapture=%d\n", pDevice->pulse.mappedBufferFramesCapacityCapture, pDevice->pulse.mappedBufferFramesRemainingCaptur...
                #endif

                if (pDevice->pulse.pMappedBufferCapture == NULL) {
                    /* It's a hole. */
                    #if defined(MA_DEBUG_OUTPUT)
                        printf("[PulseAudio] ma_device_read__pulse: Call pa_stream_peek(). Hole.\n");
                    #endif
                }

                break;
            } else {
                if (pDevice->pulse.pMappedBufferCapture == NULL) {
                    /* Nothing available yet. Need to wait for more. */

                    /*

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

                    error = ((ma_pa_mainloop_iterate_proc)pDevice->pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pDevice->pulse.pMainLoop, 0, NULL);
                    if (error < 0) {
                        return ma_result_from_pulse(error);
                    }

                    /* Sleep for a bit if nothing was dispatched. */
                    if (error == 0) {
                        ma_sleep(1);
                    }

                #if defined(MA_DEBUG_OUTPUT)
                    printf("[PulseAudio] ma_device_read__pulse: No data available. Waiting. mappedBufferFramesCapacityCapture=%d, mappedBufferFramesRemainingCapture=%d\n", pDevice->pulse.mappedBufferFramesCapacityCapture, pDevice->pulse.mappedBufferF...
                #endif
                } else {
                    /* Getting here means we mapped 0 bytes, but have a non-NULL buffer. I don't think this should ever happen. */
                    MA_ASSERT(MA_FALSE);
                }
            }
        }
    }

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

typedef OSStatus (* ma_AudioOutputUnitStart_proc)(AudioUnit inUnit);
typedef OSStatus (* ma_AudioOutputUnitStop_proc)(AudioUnit inUnit);
typedef OSStatus (* ma_AudioUnitAddPropertyListener_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitPropertyListenerProc inProc, void* inProcUserData);
typedef OSStatus (* ma_AudioUnitGetPropertyInfo_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, UInt32* outDataSize, Boolean* outWriteable);
typedef OSStatus (* ma_AudioUnitGetProperty_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, void* outData, UInt32* ioDataSize);
typedef OSStatus (* ma_AudioUnitSetProperty_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, const void* inData, UInt32 inDataSize);
typedef OSStatus (* ma_AudioUnitInitialize_proc)(AudioUnit inUnit);
typedef OSStatus (* ma_AudioUnitRender_proc)(AudioUnit inUnit, AudioUnitRenderActionFlags* ioActionFlags, const AudioTimeStamp* inTimeStamp, UInt32 inOutputBusNumber, UInt32 inNumberFrames, AudioBufferList* ioData);


#define MA_COREAUDIO_OUTPUT_BUS    0
#define MA_COREAUDIO_INPUT_BUS     1

#if defined(MA_APPLE_DESKTOP)
static ma_result ma_device_reinit_internal__coreaudio(ma_device* pDevice, ma_device_type deviceType, ma_bool32 disposePreviousAudioUnit);
#endif

/*
Core Audio

So far, Core Audio has been the worst backend to work with due to being both unintuitive and having almost no documentation
apart from comments in the headers (which admittedly are quite good). For my own purposes, and for anybody out there whose

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

    AudioUnitElement deviceBus;
    UInt32 channelLayoutSize;
    OSStatus status;
    AudioChannelLayout* pChannelLayout;
    ma_result result;

    MA_ASSERT(pContext != NULL);
    
    if (deviceType == ma_device_type_playback) {
        deviceScope = kAudioUnitScope_Output;
        deviceBus = MA_COREAUDIO_OUTPUT_BUS;
    } else {
        deviceScope = kAudioUnitScope_Input;
        deviceBus = MA_COREAUDIO_INPUT_BUS;
    }
    
    status = ((ma_AudioUnitGetPropertyInfo_proc)pContext->coreaudio.AudioUnitGetPropertyInfo)(audioUnit, kAudioUnitProperty_AudioChannelLayout, deviceScope, deviceBus, &channelLayoutSize, NULL);
    if (status != noErr) {
        return ma_result_from_OSStatus(status);
    }
    
    pChannelLayout = (AudioChannelLayout*)ma__malloc_from_callbacks(channelLayoutSize, &pContext->allocationCallbacks);
    if (pChannelLayout == NULL) {
        return MA_OUT_OF_MEMORY;

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

        if (component == NULL) {
            return MA_FAILED_TO_INIT_BACKEND;
        }
    
        status = ((ma_AudioComponentInstanceNew_proc)pContext->coreaudio.AudioComponentInstanceNew)(component, &audioUnit);
        if (status != noErr) {
            return ma_result_from_OSStatus(status);
        }
    
        formatScope   = (deviceType == ma_device_type_playback) ? kAudioUnitScope_Input : kAudioUnitScope_Output;
        formatElement = (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_INPUT_BUS;
    
        propSize = sizeof(bestFormat);
        status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, &propSize);
        if (status != noErr) {
            ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(audioUnit);
            return ma_result_from_OSStatus(status);
        }
    
        ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(audioUnit);
        audioUnit = NULL;

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

}


static OSStatus ma_on_output__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pActionFlags, const AudioTimeStamp* pTimeStamp, UInt32 busNumber, UInt32 frameCount, AudioBufferList* pBufferList)
{
    ma_device* pDevice = (ma_device*)pUserData;
    ma_stream_layout layout;

    MA_ASSERT(pDevice != NULL);

#if defined(MA_DEBUG_OUTPUT)
    printf("INFO: Output Callback: busNumber=%d, frameCount=%d, mNumberBuffers=%d\n", busNumber, frameCount, pBufferList->mNumberBuffers);
#endif

    /* We need to check whether or not we are outputting interleaved or non-interleaved samples. The way we do this is slightly different for each type. */
    layout = ma_stream_layout_interleaved;
    if (pBufferList->mBuffers[0].mNumberChannels != pDevice->playback.internalChannels) {
        layout = ma_stream_layout_deinterleaved;
    }
    
    if (layout == ma_stream_layout_interleaved) {

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

            if (pBufferList->mBuffers[iBuffer].mNumberChannels == pDevice->playback.internalChannels) {
                ma_uint32 frameCountForThisBuffer = pBufferList->mBuffers[iBuffer].mDataByteSize / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels);
                if (frameCountForThisBuffer > 0) {
                    if (pDevice->type == ma_device_type_duplex) {
                        ma_device__handle_duplex_callback_playback(pDevice, frameCountForThisBuffer, pBufferList->mBuffers[iBuffer].mData, &pDevice->coreaudio.duplexRB);
                    } else {
                        ma_device__read_frames_from_client(pDevice, frameCountForThisBuffer, pBufferList->mBuffers[iBuffer].mData);
                    }
                }
                
            #if defined(MA_DEBUG_OUTPUT)
                printf("  frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", frameCount, pBufferList->mBuffers[iBuffer].mNumberChannels, pBufferList->mBuffers[iBuffer].mDataByteSize);
            #endif
            } else {
                /*
                This case is where the number of channels in the output buffer do not match our internal channels. It could mean that it's
                not interleaved, in which case we can't handle right now since miniaudio does not yet support non-interleaved streams. We just
                output silence here.
                */
                MA_ZERO_MEMORY(pBufferList->mBuffers[iBuffer].mData, pBufferList->mBuffers[iBuffer].mDataByteSize);

            #if defined(MA_DEBUG_OUTPUT)
                printf("  WARNING: Outputting silence. frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", frameCount, pBufferList->mBuffers[iBuffer].mNumberChannels, pBufferList->mBuffers[iBuffer].mDataByteSize);
            #endif
            }
        }
    } else {
        /* This is the deinterleaved case. We need to update each buffer in groups of internalChannels. This assumes each buffer is the same size. */
        MA_ASSERT(pDevice->playback.internalChannels <= MA_MAX_CHANNELS);   /* This should heve been validated at initialization time. */
        
        /*
        For safety we'll check that the internal channels is a multiple of the buffer count. If it's not it means something

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

    
    pRenderedBufferList = (AudioBufferList*)pDevice->coreaudio.pAudioBufferList;
    MA_ASSERT(pRenderedBufferList);
    
    /* We need to check whether or not we are outputting interleaved or non-interleaved samples. The way we do this is slightly different for each type. */
    layout = ma_stream_layout_interleaved;
    if (pRenderedBufferList->mBuffers[0].mNumberChannels != pDevice->capture.internalChannels) {
        layout = ma_stream_layout_deinterleaved;
    }
    
#if defined(MA_DEBUG_OUTPUT)
    printf("INFO: Input Callback: busNumber=%d, frameCount=%d, mNumberBuffers=%d\n", busNumber, frameCount, pRenderedBufferList->mNumberBuffers);
#endif
    
    status = ((ma_AudioUnitRender_proc)pDevice->pContext->coreaudio.AudioUnitRender)((AudioUnit)pDevice->coreaudio.audioUnitCapture, pActionFlags, pTimeStamp, busNumber, frameCount, pRenderedBufferList);
    if (status != noErr) {
    #if defined(MA_DEBUG_OUTPUT)
        printf("  ERROR: AudioUnitRender() failed with %d\n", status);
    #endif
        return status;
    }
    
    if (layout == ma_stream_layout_interleaved) {
        UInt32 iBuffer;
        for (iBuffer = 0; iBuffer < pRenderedBufferList->mNumberBuffers; ++iBuffer) {
            if (pRenderedBufferList->mBuffers[iBuffer].mNumberChannels == pDevice->capture.internalChannels) {
                if (pDevice->type == ma_device_type_duplex) {
                    ma_device__handle_duplex_callback_capture(pDevice, frameCount, pRenderedBufferList->mBuffers[iBuffer].mData, &pDevice->coreaudio.duplexRB);
                } else {
                    ma_device__send_frames_to_client(pDevice, frameCount, pRenderedBufferList->mBuffers[iBuffer].mData);
                }
            #if defined(MA_DEBUG_OUTPUT)
                printf("  mDataByteSize=%d\n", pRenderedBufferList->mBuffers[iBuffer].mDataByteSize);
            #endif
            } else {
                /*
                This case is where the number of channels in the output buffer do not match our internal channels. It could mean that it's
                not interleaved, in which case we can't handle right now since miniaudio does not yet support non-interleaved streams.
                */
                ma_uint8 silentBuffer[4096];
                ma_uint32 framesRemaining;
                

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

                    
                    if (pDevice->type == ma_device_type_duplex) {
                        ma_device__handle_duplex_callback_capture(pDevice, framesToSend, silentBuffer, &pDevice->coreaudio.duplexRB);
                    } else {
                        ma_device__send_frames_to_client(pDevice, framesToSend, silentBuffer);
                    }
                    
                    framesRemaining -= framesToSend;
                }
                
            #if defined(MA_DEBUG_OUTPUT)
                printf("  WARNING: Outputting silence. frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", frameCount, pRenderedBufferList->mBuffers[iBuffer].mNumberChannels, pRenderedBufferList->mBuffers[iBuffer].mDataByteSize);
            #endif
            }
        }
    } else {
        /* This is the deinterleaved case. We need to interleave the audio data before sending it to the client. This assumes each buffer is the same size. */
        MA_ASSERT(pDevice->capture.internalChannels <= MA_MAX_CHANNELS);    /* This should have been validated at initialization time. */
        
        /*
        For safety we'll check that the internal channels is a multiple of the buffer count. If it's not it means something

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN


-(void)handle_route_change:(NSNotification*)pNotification
{
    AVAudioSession* pSession = [AVAudioSession sharedInstance];

    NSInteger reason = [[[pNotification userInfo] objectForKey:AVAudioSessionRouteChangeReasonKey] integerValue];
    switch (reason)
    {
        case AVAudioSessionRouteChangeReasonOldDeviceUnavailable:
        {
        #if defined(MA_DEBUG_OUTPUT)
            printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonOldDeviceUnavailable\n");
        #endif
        } break;

        case AVAudioSessionRouteChangeReasonNewDeviceAvailable:
        {
        #if defined(MA_DEBUG_OUTPUT)
            printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonNewDeviceAvailable\n");
        #endif
        } break;

        case AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory:
        {
        #if defined(MA_DEBUG_OUTPUT)
            printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory\n");
        #endif
        } break;

        case AVAudioSessionRouteChangeReasonWakeFromSleep:
        {
        #if defined(MA_DEBUG_OUTPUT)
            printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonWakeFromSleep\n");
        #endif
        } break;

        case AVAudioSessionRouteChangeReasonOverride:
        {
        #if defined(MA_DEBUG_OUTPUT)
            printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonOverride\n");
        #endif
        } break;

        case AVAudioSessionRouteChangeReasonCategoryChange:
        {
        #if defined(MA_DEBUG_OUTPUT)
            printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonCategoryChange\n");
        #endif
        } break;

        case AVAudioSessionRouteChangeReasonUnknown:
        default:
        {
        #if defined(MA_DEBUG_OUTPUT)
            printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonUnknown\n");
        #endif
        } break;
    }

    m_pDevice->sampleRate = (ma_uint32)pSession.sampleRate;

    if (m_pDevice->type == ma_device_type_capture || m_pDevice->type == ma_device_type_duplex) {
        m_pDevice->capture.channels = (ma_uint32)pSession.inputNumberOfChannels;
        ma_device__post_init_setup(m_pDevice, ma_device_type_capture);

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

        return ma_result_from_OSStatus(status);
    }
    
    
    /* The input/output buses need to be explicitly enabled and disabled. We set the flag based on the output unit first, then we just swap it for input. */
    enableIOFlag = 1;
    if (deviceType == ma_device_type_capture) {
        enableIOFlag = 0;
    }
    
    status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, MA_COREAUDIO_OUTPUT_BUS, &enableIOFlag, sizeof(enableIOFlag));
    if (status != noErr) {
        ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
        return ma_result_from_OSStatus(status);
    }
    
    enableIOFlag = (enableIOFlag == 0) ? 1 : 0;
    status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, MA_COREAUDIO_INPUT_BUS, &enableIOFlag, sizeof(enableIOFlag));
    if (status != noErr) {
        ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
        return ma_result_from_OSStatus(status);
    }
    
    
    /* Set the device to use with this audio unit. This is only used on desktop since we are using defaults on mobile. */
#if defined(MA_APPLE_DESKTOP)
    status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_I...
    if (status != noErr) {
        ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
        return ma_result_from_OSStatus(result);
    }
#else
    /*
    For some reason it looks like Apple is only allowing selection of the input device. There does not appear to be any way to change
    the default output route. I have no idea why this is like this, but for now we'll only be able to configure capture devices.
    */
    if (pDeviceID != NULL) {

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

      3) There's a priority that miniaudio prefers.
    
    Ideally we would like to use a format that's as close to the hardware as possible so we can get as close to a passthrough as possible. The
    most important property is the sample rate. miniaudio can do format conversion for any sample rate and channel count, but cannot do the same
    for the sample data format. If the sample data format is not supported by miniaudio it must be ignored completely.
    
    On mobile platforms this is a bit different. We just force the use of whatever the audio unit's current format is set to.
    */
    {
        AudioUnitScope   formatScope   = (deviceType == ma_device_type_playback) ? kAudioUnitScope_Input : kAudioUnitScope_Output;
        AudioUnitElement formatElement = (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_INPUT_BUS;

    #if defined(MA_APPLE_DESKTOP)
        AudioStreamBasicDescription origFormat;
        UInt32 origFormatSize;
        
        origFormatSize = sizeof(origFormat);
        status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &origFormat, &origFormatSize);
        if (status != noErr) {
            ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
            return result;

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

    /*
    During testing I discovered that the buffer size can be too big. You'll get an error like this:
    
      kAudioUnitErr_TooManyFramesToProcess : inFramesToProcess=4096, mMaxFramesPerSlice=512
    
    Note how inFramesToProcess is smaller than mMaxFramesPerSlice. To fix, we need to set kAudioUnitProperty_MaximumFramesPerSlice to that
    of the size of our buffer, or do it the other way around and set our buffer size to the kAudioUnitProperty_MaximumFramesPerSlice.
    */
    {
        /*AudioUnitScope propScope = (deviceType == ma_device_type_playback) ? kAudioUnitScope_Input : kAudioUnitScope_Output;
        AudioUnitElement propBus = (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_INPUT_BUS;
    
        status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, propScope, propBus, &actualBufferSizeInFrames, sizeof(actualBufferSizeInFrames));
        if (status != noErr) {
            ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
            return ma_result_from_OSStatus(status);
        }*/
        
        status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &actualPeriodSizeInFrames, sizeof(actualPeriodSizeInFrames));
        if (status != noErr) {
            ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

            }
        }
        
        pData->pAudioBufferList = pBufferList;
    }
    
    /* Callbacks. */
    callbackInfo.inputProcRefCon = pDevice_DoNotReference;
    if (deviceType == ma_device_type_playback) {
        callbackInfo.inputProc = ma_on_output__coreaudio;
        status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, MA_COREAUDIO_OUTPUT_BUS, &callbackInfo, sizeof(callbackInfo));
        if (status != noErr) {
            ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
            return ma_result_from_OSStatus(status);
        }
    } else {
        callbackInfo.inputProc = ma_on_input__coreaudio;
        status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, MA_COREAUDIO_INPUT_BUS, &callbackInfo, sizeof(callbackInfo));
        if (status != noErr) {
            ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
            return ma_result_from_OSStatus(status);
        }
    }
    
    /* We need to listen for stop events. */
    if (pData->registerStopEvent) {
        status = ((ma_AudioUnitAddPropertyListener_proc)pContext->coreaudio.AudioUnitAddPropertyListener)(pData->audioUnit, kAudioOutputUnitProperty_IsRunning, on_start_stop__coreaudio, pDevice_DoNotReference);
        if (status != noErr) {

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

    } else {
        pDevice->sndio.handlePlayback                = handle;
        pDevice->playback.internalFormat             = internalFormat;
        pDevice->playback.internalChannels           = internalChannels;
        pDevice->playback.internalSampleRate         = internalSampleRate;
        ma_get_standard_channel_map(ma_standard_channel_map_sndio, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap);
        pDevice->playback.internalPeriodSizeInFrames = internalPeriodSizeInFrames;
        pDevice->playback.internalPeriods            = internalPeriods;
    }

#ifdef MA_DEBUG_OUTPUT
    printf("DEVICE INFO\n");
    printf("    Format:      %s\n", ma_get_format_name(internalFormat));
    printf("    Channels:    %d\n", internalChannels);
    printf("    Sample Rate: %d\n", internalSampleRate);
    printf("    Period Size: %d\n", internalPeriodSizeInFrames);
    printf("    Periods:     %d\n", internalPeriods);
    printf("    appbufsz:    %d\n", par.appbufsz);
    printf("    round:       %d\n", par.round);
#endif

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

                    sometimes be empty in which case we just fall back to "ai.name" which is less user
                    friendly, but usually has a value.
                    */
                    if (ai.handle[0] != '\0') {
                        ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), ai.handle, (size_t)-1);
                    } else {
                        ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), ai.name, (size_t)-1);
                    }

                    /* The device can be both playback and capture. */
                    if (!isTerminating && (ai.caps & PCM_CAP_OUTPUT) != 0) {
                        isTerminating = !callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
                    }
                    if (!isTerminating && (ai.caps & PCM_CAP_INPUT) != 0) {
                        isTerminating = !callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
                    }

                    if (isTerminating) {
                        break;
                    }
                }
            }
        }
    } else {

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

    result = ioctl(fdTemp, SNDCTL_SYSINFO, &si);
    if (result != -1) {
        int iAudioDevice;
        for (iAudioDevice = 0; iAudioDevice < si.numaudios; ++iAudioDevice) {
            oss_audioinfo ai;
            ai.dev = iAudioDevice;
            result = ioctl(fdTemp, SNDCTL_AUDIOINFO, &ai);
            if (result != -1) {
                if (ma_strcmp(ai.devnode, pDeviceID->oss) == 0) {
                    /* It has the same name, so now just confirm the type. */
                    if ((deviceType == ma_device_type_playback && ((ai.caps & PCM_CAP_OUTPUT) != 0)) ||
                        (deviceType == ma_device_type_capture  && ((ai.caps & PCM_CAP_INPUT)  != 0))) {
                        unsigned int formatMask;

                        /* ID */
                        ma_strncpy_s(pDeviceInfo->id.oss, sizeof(pDeviceInfo->id.oss), ai.devnode, (size_t)-1);

                        /*
                        The human readable device name should be in the "ai.handle" variable, but it can
                        sometimes be empty in which case we just fall back to "ai.name" which is less user
                        friendly, but usually has a value.
                        */

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

typedef int32_t ma_aaudio_sharing_mode_t;
typedef int32_t ma_aaudio_format_t;
typedef int32_t ma_aaudio_stream_state_t;
typedef int32_t ma_aaudio_performance_mode_t;
typedef int32_t ma_aaudio_data_callback_result_t;

/* Result codes. miniaudio only cares about the success code. */
#define MA_AAUDIO_OK                               0

/* Directions. */
#define MA_AAUDIO_DIRECTION_OUTPUT                 0
#define MA_AAUDIO_DIRECTION_INPUT                  1

/* Sharing modes. */
#define MA_AAUDIO_SHARING_MODE_EXCLUSIVE           0
#define MA_AAUDIO_SHARING_MODE_SHARED              1

/* Formats. */
#define MA_AAUDIO_FORMAT_PCM_I16                   1
#define MA_AAUDIO_FORMAT_PCM_FLOAT                 2

/* Stream states. */

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

    return MA_ERROR;
}

static void ma_stream_error_callback__aaudio(ma_AAudioStream* pStream, void* pUserData, ma_aaudio_result_t error)
{
    ma_device* pDevice = (ma_device*)pUserData;
    MA_ASSERT(pDevice != NULL);

    (void)error;

#if defined(MA_DEBUG_OUTPUT)
    printf("[AAudio] ERROR CALLBACK: error=%d, AAudioStream_getState()=%d\n", error, ((MA_PFN_AAudioStream_getState)pDevice->pContext->aaudio.AAudioStream_getState)(pStream));
#endif

    /*
    From the documentation for AAudio, when a device is disconnected all we can do is stop it. However, we cannot stop it from the callback - we need
    to do it from another thread. Therefore we are going to use an event thread for the AAudio backend to do this cleanly and safely.
    */
    if (((MA_PFN_AAudioStream_getState)pDevice->pContext->aaudio.AAudioStream_getState)(pStream) == MA_AAUDIO_STREAM_STATE_DISCONNECTED) {
#if defined(MA_DEBUG_OUTPUT)
        printf("[AAudio] Device Disconnected.\n");
#endif
    }
}

static ma_aaudio_data_callback_result_t ma_stream_data_callback_capture__aaudio(ma_AAudioStream* pStream, void* pUserData, void* pAudioData, int32_t frameCount)
{
    ma_device* pDevice = (ma_device*)pUserData;
    MA_ASSERT(pDevice != NULL);

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN


    resultAA = ((MA_PFN_AAudio_createStreamBuilder)pContext->aaudio.AAudio_createStreamBuilder)(&pBuilder);
    if (resultAA != MA_AAUDIO_OK) {
        return ma_result_from_aaudio(resultAA);
    }

    if (pDeviceID != NULL) {
        ((MA_PFN_AAudioStreamBuilder_setDeviceId)pContext->aaudio.AAudioStreamBuilder_setDeviceId)(pBuilder, pDeviceID->aaudio);
    }

    ((MA_PFN_AAudioStreamBuilder_setDirection)pContext->aaudio.AAudioStreamBuilder_setDirection)(pBuilder, (deviceType == ma_device_type_playback) ? MA_AAUDIO_DIRECTION_OUTPUT : MA_AAUDIO_DIRECTION_INPUT);
    ((MA_PFN_AAudioStreamBuilder_setSharingMode)pContext->aaudio.AAudioStreamBuilder_setSharingMode)(pBuilder, (shareMode == ma_share_mode_shared) ? MA_AAUDIO_SHARING_MODE_SHARED : MA_AAUDIO_SHARING_MODE_EXCLUSIVE);

    if (pConfig != NULL) {
        ma_uint32 bufferCapacityInFrames;

        if (pDevice == NULL || !pDevice->usingDefaultSampleRate) {
            ((MA_PFN_AAudioStreamBuilder_setSampleRate)pContext->aaudio.AAudioStreamBuilder_setSampleRate)(pBuilder, pConfig->sampleRate);
        }

        if (deviceType == ma_device_type_capture) {

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN


typedef SLresult (SLAPIENTRY * ma_slCreateEngine_proc)(SLObjectItf* pEngine, SLuint32 numOptions, SLEngineOption* pEngineOptions, SLuint32 numInterfaces, SLInterfaceID* pInterfaceIds, SLboolean* pInterfaceRequired);

/* OpenSL|ES has one-per-application objects :( */
static SLObjectItf g_maEngineObjectSL    = NULL;
static SLEngineItf g_maEngineSL          = NULL;
static ma_uint32   g_maOpenSLInitCounter = 0;
static ma_spinlock g_maOpenSLSpinlock    = 0;   /* For init/uninit. */

#define MA_OPENSL_OBJ(p)         (*((SLObjectItf)(p)))
#define MA_OPENSL_OUTPUTMIX(p)   (*((SLOutputMixItf)(p)))
#define MA_OPENSL_PLAY(p)        (*((SLPlayItf)(p)))
#define MA_OPENSL_RECORD(p)      (*((SLRecordItf)(p)))

#ifdef MA_ANDROID
#define MA_OPENSL_BUFFERQUEUE(p) (*((SLAndroidSimpleBufferQueueItf)(p)))
#else
#define MA_OPENSL_BUFFERQUEUE(p) (*((SLBufferQueueItf)(p)))
#endif

static ma_result ma_result_from_OpenSL(SLuint32 result)

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

        ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), (const char*)desc.deviceName, (size_t)-1);
    }

    goto return_detailed_info;
#else
    goto return_default_device;
#endif

return_default_device:
    if (pDeviceID != NULL) {
        if ((deviceType == ma_device_type_playback && pDeviceID->opensl != SL_DEFAULTDEVICEID_AUDIOOUTPUT) ||
            (deviceType == ma_device_type_capture  && pDeviceID->opensl != SL_DEFAULTDEVICEID_AUDIOINPUT)) {
            return MA_NO_DEVICE;   /* Don't know the device. */
        }
    }

    /* Name / Description */
    if (deviceType == ma_device_type_playback) {
        ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
    } else {
        ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
    }

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN


    if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
        ma_SLDataFormat_PCM pcm;
        SLDataLocator_IODevice locatorDevice;
        SLDataSource source;
        SLDataSink sink;

        ma_SLDataFormat_PCM_init__opensl(pConfig->capture.format, pConfig->capture.channels, pConfig->sampleRate, pConfig->capture.channelMap, &pcm);

        locatorDevice.locatorType = SL_DATALOCATOR_IODEVICE;
        locatorDevice.deviceType  = SL_IODEVICE_AUDIOINPUT;
        locatorDevice.deviceID    = (pConfig->capture.pDeviceID == NULL) ? SL_DEFAULTDEVICEID_AUDIOINPUT : pConfig->capture.pDeviceID->opensl;
        locatorDevice.device      = NULL;

        source.pLocator = &locatorDevice;
        source.pFormat  = NULL;

        sink.pLocator = &queue;
        sink.pFormat  = (SLDataFormat_PCM*)&pcm;

        resultSL = (*g_maEngineSL)->CreateAudioRecorder(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioRecorderObj, &source, &sink, 1, itfIDs1, itfIDsRequired1);
        if (resultSL == SL_RESULT_CONTENT_UNSUPPORTED) {

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

            ma_device_uninit__opensl(pDevice);
            return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to create output mix.", ma_result_from_OpenSL(resultSL));
        }

        resultSL = MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->Realize((SLObjectItf)pDevice->opensl.pOutputMixObj, SL_BOOLEAN_FALSE);
        if (resultSL != SL_RESULT_SUCCESS) {
            ma_device_uninit__opensl(pDevice);
            return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to realize output mix object.", ma_result_from_OpenSL(resultSL));
        }

        resultSL = MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->GetInterface((SLObjectItf)pDevice->opensl.pOutputMixObj, (SLInterfaceID)pContext->opensl.SL_IID_OUTPUTMIX, &pDevice->opensl.pOutputMix);
        if (resultSL != SL_RESULT_SUCCESS) {
            ma_device_uninit__opensl(pDevice);
            return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_OUTPUTMIX interface.", ma_result_from_OpenSL(resultSL));
        }

        /* Set the output device. */
        if (pConfig->playback.pDeviceID != NULL) {
            SLuint32 deviceID_OpenSL = pConfig->playback.pDeviceID->opensl;
            MA_OPENSL_OUTPUTMIX(pDevice->opensl.pOutputMix)->ReRoute((SLOutputMixItf)pDevice->opensl.pOutputMix, 1, &deviceID_OpenSL);
        }
        
        source.pLocator = &queue;
        source.pFormat  = (SLDataFormat_PCM*)&pcm;

        outmixLocator.locatorType = SL_DATALOCATOR_OUTPUTMIX;
        outmixLocator.outputMix   = (SLObjectItf)pDevice->opensl.pOutputMixObj;

        sink.pLocator = &outmixLocator;
        sink.pFormat  = NULL;

        resultSL = (*g_maEngineSL)->CreateAudioPlayer(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioPlayerObj, &source, &sink, 1, itfIDs1, itfIDsRequired1);
        if (resultSL == SL_RESULT_CONTENT_UNSUPPORTED) {
            /* Unsupported format. Fall back to something safer and try again. If this fails, just abort. */
            pcm.formatType = SL_DATAFORMAT_PCM;
            pcm.numChannels = 2;

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

    result = ma_dlsym_SLInterfaceID__opensl(pContext, "SL_IID_RECORD", &pContext->opensl.SL_IID_RECORD);
    if (result != MA_SUCCESS) {
        return result;
    }

    result = ma_dlsym_SLInterfaceID__opensl(pContext, "SL_IID_PLAY", &pContext->opensl.SL_IID_PLAY);
    if (result != MA_SUCCESS) {
        return result;
    }

    result = ma_dlsym_SLInterfaceID__opensl(pContext, "SL_IID_OUTPUTMIX", &pContext->opensl.SL_IID_OUTPUTMIX);
    if (result != MA_SUCCESS) {
        return result;
    }

    pContext->opensl.slCreateEngine = (ma_proc)ma_dlsym(pContext, pContext->opensl.libOpenSLES, "slCreateEngine");
    if (pContext->opensl.slCreateEngine == NULL) {
        ma_post_log_message(pContext, NULL, MA_LOG_LEVEL_INFO, "[OpenSL|ES] Cannot find symbol slCreateEngine.");
        return MA_NO_BACKEND;
    }

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

        if (result == MA_SUCCESS) {
            result = ma_mutex_init(&pContext->deviceEnumLock);
            if (result != MA_SUCCESS) {
                ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_WARNING, "Failed to initialize mutex for device enumeration. ma_context_get_devices() is not thread safe.", result);
            }
            result = ma_mutex_init(&pContext->deviceInfoLock);
            if (result != MA_SUCCESS) {
                ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_WARNING, "Failed to initialize mutex for device info retrieval. ma_context_get_device_info() is not thread safe.", result);
            }

#ifdef MA_DEBUG_OUTPUT
            printf("[miniaudio] Endian:  %s\n", ma_is_little_endian() ? "LE" : "BE");
            printf("[miniaudio] SSE2:    %s\n", ma_has_sse2()    ? "YES" : "NO");
            printf("[miniaudio] AVX2:    %s\n", ma_has_avx2()    ? "YES" : "NO");
            printf("[miniaudio] AVX512F: %s\n", ma_has_avx512f() ? "YES" : "NO");
            printf("[miniaudio] NEON:    %s\n", ma_has_neon()    ? "YES" : "NO");
#endif

            pContext->backend = backend;
            return result;
        }

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

static ma_uint64 ma_decoder_internal_on_read_pcm_frames__mp3(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount)
{
    drmp3* pMP3;

    MA_ASSERT(pDecoder   != NULL);
    MA_ASSERT(pFramesOut != NULL);

    pMP3 = (drmp3*)pDecoder->pInternalDecoder;
    MA_ASSERT(pMP3 != NULL);

#if defined(DR_MP3_FLOAT_OUTPUT)
    MA_ASSERT(pDecoder->internalFormat == ma_format_f32);
    return drmp3_read_pcm_frames_f32(pMP3, frameCount, (float*)pFramesOut);
#else
    MA_ASSERT(pDecoder->internalFormat == ma_format_s16);
    return drmp3_read_pcm_frames_s16(pMP3, frameCount, (drmp3_int16*)pFramesOut);
#endif
}

static ma_result ma_decoder_internal_on_seek_to_pcm_frame__mp3(ma_decoder* pDecoder, ma_uint64 frameIndex)
{

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

        return MA_OUT_OF_MEMORY;
    }

    allocationCallbacks.pUserData = pDecoder->allocationCallbacks.pUserData;
    allocationCallbacks.onMalloc  = pDecoder->allocationCallbacks.onMalloc;
    allocationCallbacks.onRealloc = pDecoder->allocationCallbacks.onRealloc;
    allocationCallbacks.onFree    = pDecoder->allocationCallbacks.onFree;

    /*
    Try opening the decoder first. We always use whatever dr_mp3 reports for channel count and sample rate. The format is determined by
    the presence of DR_MP3_FLOAT_OUTPUT.
    */
    if (!drmp3_init(pMP3, ma_decoder_internal_on_read__mp3, ma_decoder_internal_on_seek__mp3, pDecoder, &allocationCallbacks)) {
        ma__free_from_callbacks(pMP3, &pDecoder->allocationCallbacks);
        return MA_ERROR;
    }

    /* If we get here it means we successfully initialized the MP3 decoder. We can now initialize the rest of the ma_decoder. */
    pDecoder->onReadPCMFrames        = ma_decoder_internal_on_read_pcm_frames__mp3;
    pDecoder->onSeekToPCMFrame       = ma_decoder_internal_on_seek_to_pcm_frame__mp3;
    pDecoder->onUninit               = ma_decoder_internal_on_uninit__mp3;
    pDecoder->onGetLengthInPCMFrames = ma_decoder_internal_on_get_length_in_pcm_frames__mp3;
    pDecoder->pInternalDecoder       = pMP3;

    /* Internal format. */
#if defined(DR_MP3_FLOAT_OUTPUT)
    pDecoder->internalFormat     = ma_format_f32;
#else
    pDecoder->internalFormat     = ma_format_s16;
#endif
    pDecoder->internalChannels   = pMP3->channels;
    pDecoder->internalSampleRate = pMP3->sampleRate;
    ma_get_standard_channel_map(ma_standard_channel_map_default, pDecoder->internalChannels, pDecoder->internalChannelMap);

    return MA_SUCCESS;
}

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

            y[2*18] = t[1][i] + t[1][i + 1];
            y[3*18] = t[2][i + 1] + t[3][i] + t[3][i + 1];
        }
        y[0*18] = t[0][7];
        y[1*18] = t[2][7] + t[3][7];
        y[2*18] = t[1][7];
        y[3*18] = t[3][7];
    }
#endif
}
#ifndef DR_MP3_FLOAT_OUTPUT
typedef drmp3_int16 drmp3d_sample_t;
static drmp3_int16 drmp3d_scale_pcm(float sample)
{
    drmp3_int16 s;
#if DRMP3_HAVE_ARMV6
    drmp3_int32 s32 = (drmp3_int32)(sample + .5f);
    s32 -= (s32 < 0);
    s = (drmp3_int16)drmp3_clip_int16_arm(s32);
#else
    if (sample >=  32766.5) return (drmp3_int16) 32767;

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

        zlin[4*i]     = xl[18*(31 - i)];
        zlin[4*i + 1] = xr[18*(31 - i)];
        zlin[4*i + 2] = xl[1 + 18*(31 - i)];
        zlin[4*i + 3] = xr[1 + 18*(31 - i)];
        zlin[4*i + 64] = xl[1 + 18*(1 + i)];
        zlin[4*i + 64 + 1] = xr[1 + 18*(1 + i)];
        zlin[4*i - 64 + 2] = xl[18*(1 + i)];
        zlin[4*i - 64 + 3] = xr[18*(1 + i)];
        DRMP3_V0(0) DRMP3_V2(1) DRMP3_V1(2) DRMP3_V2(3) DRMP3_V1(4) DRMP3_V2(5) DRMP3_V1(6) DRMP3_V2(7)
        {
#ifndef DR_MP3_FLOAT_OUTPUT
#if DRMP3_HAVE_SSE
            static const drmp3_f4 g_max = { 32767.0f, 32767.0f, 32767.0f, 32767.0f };
            static const drmp3_f4 g_min = { -32768.0f, -32768.0f, -32768.0f, -32768.0f };
            __m128i pcm8 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_max_ps(_mm_min_ps(a, g_max), g_min)),
                                           _mm_cvtps_epi32(_mm_max_ps(_mm_min_ps(b, g_max), g_min)));
            dstr[(15 - i)*nch] = (drmp3_int16)_mm_extract_epi16(pcm8, 1);
            dstr[(17 + i)*nch] = (drmp3_int16)_mm_extract_epi16(pcm8, 5);
            dstl[(15 - i)*nch] = (drmp3_int16)_mm_extract_epi16(pcm8, 0);
            dstl[(17 + i)*nch] = (drmp3_int16)_mm_extract_epi16(pcm8, 4);
            dstr[(47 - i)*nch] = (drmp3_int16)_mm_extract_epi16(pcm8, 3);

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

    if (pMP3 == NULL) {
        return;
    }
#ifndef DR_MP3_NO_STDIO
    if (pMP3->onRead == drmp3__on_read_stdio) {
        fclose((FILE*)pMP3->pUserData);
    }
#endif
    drmp3__free_from_callbacks(pMP3->pData, &pMP3->allocationCallbacks);
}
#if defined(DR_MP3_FLOAT_OUTPUT)
static void drmp3_f32_to_s16(drmp3_int16* dst, const float* src, drmp3_uint64 sampleCount)
{
    drmp3_uint64 i;
    drmp3_uint64 i4;
    drmp3_uint64 sampleCount4;
    i = 0;
    sampleCount4 = sampleCount >> 2;
    for (i4 = 0; i4 < sampleCount4; i4 += 1) {
        float x0 = src[i+0];
        float x1 = src[i+1];

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

        i += 4;
    }
    for (; i < sampleCount; i += 1) {
        float x = src[i];
        x = ((x < -1) ? -1 : ((x > 1) ? 1 : x));
        x = x * 32767.0f;
        dst[i] = (drmp3_int16)x;
    }
}
#endif
#if !defined(DR_MP3_FLOAT_OUTPUT)
static void drmp3_s16_to_f32(float* dst, const drmp3_int16* src, drmp3_uint64 sampleCount)
{
    drmp3_uint64 i;
    for (i = 0; i < sampleCount; i += 1) {
        float x = (float)src[i];
        x = x * 0.000030517578125f;
        dst[i] = x;
    }
}
#endif
static drmp3_uint64 drmp3_read_pcm_frames_raw(drmp3* pMP3, drmp3_uint64 framesToRead, void* pBufferOut)
{
    drmp3_uint64 totalFramesRead = 0;
    DRMP3_ASSERT(pMP3 != NULL);
    DRMP3_ASSERT(pMP3->onRead != NULL);
    while (framesToRead > 0) {
        drmp3_uint32 framesToConsume = (drmp3_uint32)DRMP3_MIN(pMP3->pcmFramesRemainingInMP3Frame, framesToRead);
        if (pBufferOut != NULL) {
        #if defined(DR_MP3_FLOAT_OUTPUT)
            float* pFramesOutF32 = (float*)DRMP3_OFFSET_PTR(pBufferOut,          sizeof(float) * totalFramesRead                   * pMP3->channels);
            float* pFramesInF32  = (float*)DRMP3_OFFSET_PTR(&pMP3->pcmFrames[0], sizeof(float) * pMP3->pcmFramesConsumedInMP3Frame * pMP3->mp3FrameChannels);
            DRMP3_COPY_MEMORY(pFramesOutF32, pFramesInF32, sizeof(float) * framesToConsume * pMP3->channels);
        #else
            drmp3_int16* pFramesOutS16 = (drmp3_int16*)DRMP3_OFFSET_PTR(pBufferOut,          sizeof(drmp3_int16) * totalFramesRead                   * pMP3->channels);
            drmp3_int16* pFramesInS16  = (drmp3_int16*)DRMP3_OFFSET_PTR(&pMP3->pcmFrames[0], sizeof(drmp3_int16) * pMP3->pcmFramesConsumedInMP3Frame * pMP3->mp3FrameChannels);
            DRMP3_COPY_MEMORY(pFramesOutS16, pFramesInS16, sizeof(drmp3_int16) * framesToConsume * pMP3->channels);
        #endif
        }
        pMP3->currentPCMFrame              += framesToConsume;

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

            break;
        }
    }
    return totalFramesRead;
}
DRMP3_API drmp3_uint64 drmp3_read_pcm_frames_f32(drmp3* pMP3, drmp3_uint64 framesToRead, float* pBufferOut)
{
    if (pMP3 == NULL || pMP3->onRead == NULL) {
        return 0;
    }
#if defined(DR_MP3_FLOAT_OUTPUT)
    return drmp3_read_pcm_frames_raw(pMP3, framesToRead, pBufferOut);
#else
    {
        drmp3_int16 pTempS16[8192];
        drmp3_uint64 totalPCMFramesRead = 0;
        while (totalPCMFramesRead < framesToRead) {
            drmp3_uint64 framesJustRead;
            drmp3_uint64 framesRemaining = framesToRead - totalPCMFramesRead;
            drmp3_uint64 framesToReadNow = DRMP3_COUNTOF(pTempS16) / pMP3->channels;
            if (framesToReadNow > framesRemaining) {

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

        }
        return totalPCMFramesRead;
    }
#endif
}
DRMP3_API drmp3_uint64 drmp3_read_pcm_frames_s16(drmp3* pMP3, drmp3_uint64 framesToRead, drmp3_int16* pBufferOut)
{
    if (pMP3 == NULL || pMP3->onRead == NULL) {
        return 0;
    }
#if !defined(DR_MP3_FLOAT_OUTPUT)
    return drmp3_read_pcm_frames_raw(pMP3, framesToRead, pBufferOut);
#else
    {
        float pTempF32[4096];
        drmp3_uint64 totalPCMFramesRead = 0;
        while (totalPCMFramesRead < framesToRead) {
            drmp3_uint64 framesJustRead;
            drmp3_uint64 framesRemaining = framesToRead - totalPCMFramesRead;
            drmp3_uint64 framesToReadNow = DRMP3_COUNTOF(pTempF32) / pMP3->channels;
            if (framesToReadNow > framesRemaining) {

share/public_html/static/music_inc/src/miniaudio.h  view on Meta::CPAN

    DRMP3_ASSERT(pMP3->onSeek != NULL);
    if (!drmp3__on_seek(pMP3, 0, drmp3_seek_origin_start)) {
        return DRMP3_FALSE;
    }
    drmp3_reset(pMP3);
    return DRMP3_TRUE;
}
static drmp3_bool32 drmp3_seek_forward_by_pcm_frames__brute_force(drmp3* pMP3, drmp3_uint64 frameOffset)
{
    drmp3_uint64 framesRead;
#if defined(DR_MP3_FLOAT_OUTPUT)
    framesRead = drmp3_read_pcm_frames_f32(pMP3, frameOffset, NULL);
#else
    framesRead = drmp3_read_pcm_frames_s16(pMP3, frameOffset, NULL);
#endif
    if (framesRead != frameOffset) {
        return DRMP3_FALSE;
    }
    return DRMP3_TRUE;
}
static drmp3_bool32 drmp3_seek_to_pcm_frame__brute_force(drmp3* pMP3, drmp3_uint64 frameIndex)

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

    +----------------------------------+--------------------------------------------------------------------+
    | MA_NO_RUNTIME_LINKING            | Disables runtime linking. This is useful for passing Apple's       |
    |                                  | notarization process. When enabling this, you may need to avoid    |
    |                                  | using `-std=c89` or `-std=c99` on Linux builds or else you may end |
    |                                  | up with compilation errors due to conflicts with `timespec` and    |
    |                                  | `timeval` data types.                                              |
    |                                  |                                                                    |
    |                                  | You may need to enable this if your target platform does not allow |
    |                                  | runtime linking via `dlopen()`.                                    |
    +----------------------------------+--------------------------------------------------------------------+
    | MA_DEBUG_OUTPUT                  | Enable `printf()` output of debug logs (`MA_LOG_LEVEL_DEBUG`).     |
    +----------------------------------+--------------------------------------------------------------------+
    | MA_COINIT_VALUE                  | Windows only. The value to pass to internal calls to               |
    |                                  | `CoInitializeEx()`. Defaults to `COINIT_MULTITHREADED`.            |
    +----------------------------------+--------------------------------------------------------------------+
    | MA_API                           | Controls how public APIs should be decorated. Default is `extern`. |
    +----------------------------------+--------------------------------------------------------------------+


3. Definitions
==============

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

    |                                         | bus, and both buses must have the same channel    |
    |                                         | counts.                                           |
    +-----------------------------------------+---------------------------------------------------+
    | MA_NODE_FLAG_CONTINUOUS_PROCESSING      | Causes the processing callback to be called even  |
    |                                         | when no data is available to be read from input   |
    |                                         | attachments. This is useful for effects like      |
    |                                         | echos where there will be a tail of audio data    |
    |                                         | that still needs to be processed even when the    |
    |                                         | original data sources have reached their ends.    |
    +-----------------------------------------+---------------------------------------------------+
    | MA_NODE_FLAG_ALLOW_NULL_INPUT           | Used in conjunction with                          |
    |                                         | `MA_NODE_FLAG_CONTINUOUS_PROCESSING`. When this   |
    |                                         | is set, the `ppFramesIn` parameter of the         |
    |                                         | processing callback will be set to NULL when      |
    |                                         | there are no input frames are available. When     |
    |                                         | this is unset, silence will be posted to the      |
    |                                         | processing callback.                              |
    +-----------------------------------------+---------------------------------------------------+
    | MA_NODE_FLAG_DIFFERENT_PROCESSING_RATES | Used to tell miniaudio that input and output      |
    |                                         | frames are processed at different rates. You      |
    |                                         | should set this for any nodes that perform        |
    |                                         | resampling.                                       |
    +-----------------------------------------+---------------------------------------------------+
    | MA_NODE_FLAG_SILENT_OUTPUT              | Used to tell miniaudio that a node produces only  |
    |                                         | silent output. This is useful for nodes where you |
    |                                         | don't want the output to contribute to the final  |
    |                                         | mix. An example might be if you want split your   |
    |                                         | stream and have one branch be output to a file.   |
    |                                         | When using this flag, you should avoid writing to |
    |                                         | the output buffer of the node's processing        |
    |                                         | callback because miniaudio will ignore it anyway. |
    +-----------------------------------------+---------------------------------------------------+


share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

    ma_aaudio_content_type_movie,                   /* AAUDIO_CONTENT_TYPE_MOVIE */
    ma_aaudio_content_type_music,                   /* AAUDIO_CONTENT_TYPE_MUSIC */
    ma_aaudio_content_type_sonification,            /* AAUDIO_CONTENT_TYPE_SONIFICATION */
    ma_aaudio_content_type_speech                   /* AAUDIO_CONTENT_TYPE_SPEECH */
} ma_aaudio_content_type;

/* AAudio input presets. */
typedef enum
{
    ma_aaudio_input_preset_default = 0,             /* Leaves the input preset unset. */
    ma_aaudio_input_preset_generic,                 /* AAUDIO_INPUT_PRESET_GENERIC */
    ma_aaudio_input_preset_camcorder,               /* AAUDIO_INPUT_PRESET_CAMCORDER */
    ma_aaudio_input_preset_unprocessed,             /* AAUDIO_INPUT_PRESET_UNPROCESSED */
    ma_aaudio_input_preset_voice_recognition,       /* AAUDIO_INPUT_PRESET_VOICE_RECOGNITION */
    ma_aaudio_input_preset_voice_communication,     /* AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION */
    ma_aaudio_input_preset_voice_performance        /* AAUDIO_INPUT_PRESET_VOICE_PERFORMANCE */
} ma_aaudio_input_preset;


typedef union
{
    ma_int64 counter;
    double counterD;
} ma_timer;

typedef union

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

#endif
#ifdef MA_SUPPORT_OPENSL
        struct
        {
            ma_handle libOpenSLES;
            ma_handle SL_IID_ENGINE;
            ma_handle SL_IID_AUDIOIODEVICECAPABILITIES;
            ma_handle SL_IID_ANDROIDSIMPLEBUFFERQUEUE;
            ma_handle SL_IID_RECORD;
            ma_handle SL_IID_PLAY;
            ma_handle SL_IID_OUTPUTMIX;
            ma_handle SL_IID_ANDROIDCONFIGURATION;
            ma_proc   slCreateEngine;
        } opensl;
#endif
#ifdef MA_SUPPORT_WEBAUDIO
        struct
        {
            int _unused;
        } webaudio;
#endif

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN


typedef struct ma_node_graph ma_node_graph;
typedef void ma_node;


/* Node flags. */
typedef enum
{
    MA_NODE_FLAG_PASSTHROUGH                = 0x00000001,
    MA_NODE_FLAG_CONTINUOUS_PROCESSING      = 0x00000002,
    MA_NODE_FLAG_ALLOW_NULL_INPUT           = 0x00000004,
    MA_NODE_FLAG_DIFFERENT_PROCESSING_RATES = 0x00000008,
    MA_NODE_FLAG_SILENT_OUTPUT              = 0x00000010
} ma_node_flags;


/* The playback state of a node. Either started or stopped. */
typedef enum
{
    ma_node_state_started = 0,
    ma_node_state_stopped = 1
} ma_node_state;

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

typedef struct ma_node_output_bus ma_node_output_bus;
struct ma_node_output_bus
{
    /* Immutable. */
    ma_node* pNode;                                         /* The node that owns this output bus. The input node. Will be null for dummy head and tail nodes. */
    ma_uint8 outputBusIndex;                                /* The index of the output bus on pNode that this output bus represents. */
    ma_uint8 channels;                                      /* The number of channels in the audio stream for this bus. */

    /* Mutable via multiple threads. Must be used atomically. The weird ordering here is for packing reasons. */
    MA_ATOMIC(1, ma_uint8) inputNodeInputBusIndex;          /* The index of the input bus on the input. Required for detaching. */
    MA_ATOMIC(4, ma_uint32) flags;                          /* Some state flags for tracking the read state of the output buffer. A combination of MA_NODE_OUTPUT_BUS_FLAG_*. */
    MA_ATOMIC(4, ma_uint32) refCount;                       /* Reference count for some thread-safety when detaching. */
    MA_ATOMIC(4, ma_bool32) isAttached;                     /* This is used to prevent iteration of nodes that are in the middle of being detached. Used for thread safety. */
    MA_ATOMIC(4, ma_spinlock) lock;                         /* Unfortunate lock, but significantly simplifies the implementation. Required for thread-safe attaching and detaching. */
    MA_ATOMIC(4, float) volume;                             /* Linear. */
    MA_ATOMIC(MA_SIZEOF_PTR, ma_node_output_bus*) pNext;    /* If null, it's the tail node or detached. */
    MA_ATOMIC(MA_SIZEOF_PTR, ma_node_output_bus*) pPrev;    /* If null, it's the head node or detached. */
    MA_ATOMIC(MA_SIZEOF_PTR, ma_node*) pInputNode;          /* The node that this output bus is attached to. Required for detaching. */
};

/*

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

    switch (logLevel)
    {
        case MA_LOG_LEVEL_DEBUG:   return "DEBUG";
        case MA_LOG_LEVEL_INFO:    return "INFO";
        case MA_LOG_LEVEL_WARNING: return "WARNING";
        case MA_LOG_LEVEL_ERROR:   return "ERROR";
        default:                   return "ERROR";
    }
}

#if defined(MA_DEBUG_OUTPUT)

/* Customize this to use a specific tag in __android_log_print() for debug output messages. */
#ifndef MA_ANDROID_LOG_TAG
#define MA_ANDROID_LOG_TAG  "miniaudio"
#endif

void ma_log_callback_debug(void* pUserData, ma_uint32 level, const char* pMessage)
{
    (void)pUserData;

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

    #ifndef MA_NO_THREADING
    {
        ma_result result = ma_mutex_init(&pLog->lock);
        if (result != MA_SUCCESS) {
            return result;
        }
    }
    #endif

    /* If we're using debug output, enable it. */
    #if defined(MA_DEBUG_OUTPUT)
    {
        ma_log_register_callback(pLog, ma_log_callback_init(ma_log_callback_debug, NULL)); /* Doesn't really matter if this fails. */
    }
    #endif

    return MA_SUCCESS;
}

MA_API void ma_log_uninit(ma_log* pLog)
{

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN


    while (c89atomic_load_32(&pAllocator->count) > 0) {
        /* CAS */
        ma_uint32 oldBitfield;
        ma_uint32 newBitfield;

        oldBitfield = c89atomic_load_32(&pAllocator->pGroups[iGroup].bitfield);  /* <-- This copy must happen. The compiler must not optimize this away. */
        newBitfield = oldBitfield & ~(1 << iBit);

        /* Debugging for checking for double-frees. */
        #if defined(MA_DEBUG_OUTPUT)
        {
            if ((oldBitfield & (1 << iBit)) == 0) {
                MA_ASSERT(MA_FALSE);    /* Double free detected.*/
            }
        }
        #endif

        if (c89atomic_compare_and_swap_32(&pAllocator->pGroups[iGroup].bitfield, oldBitfield, newBitfield) == oldBitfield) {
            c89atomic_fetch_sub_32(&pAllocator->count, 1);
            return MA_SUCCESS;

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN


    return (ULONG)newRefCount;
}

static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceStateChanged(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, DWORD dwNewState)
{
    ma_bool32 isThisDevice = MA_FALSE;
    ma_bool32 isCapture    = MA_FALSE;
    ma_bool32 isPlayback   = MA_FALSE;

#ifdef MA_DEBUG_OUTPUT
    /*ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "IMMNotificationClient_OnDeviceStateChanged(pDeviceID=%S, dwNewState=%u)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)", (unsigned int)dwNewState);*/
#endif

    /*
    There have been reports of a hang when a playback device is disconnected. The idea with this code is to explicitly stop the device if we detect
    that the device is disabled or has been unplugged.
    */
    if (pThis->pDevice->wasapi.allowCaptureAutoStreamRouting && (pThis->pDevice->type == ma_device_type_capture || pThis->pDevice->type == ma_device_type_duplex || pThis->pDevice->type == ma_device_type_loopback)) {
        isCapture = MA_TRUE;
        if (wcscmp(pThis->pDevice->capture.id.wasapi, pDeviceID) == 0) {

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

                }
            }
        }
    }

    return S_OK;
}

static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceAdded(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID)
{
#ifdef MA_DEBUG_OUTPUT
    /*ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "IMMNotificationClient_OnDeviceAdded(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)");*/
#endif

    /* We don't need to worry about this event for our purposes. */
    (void)pThis;
    (void)pDeviceID;
    return S_OK;
}

static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceRemoved(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID)
{
#ifdef MA_DEBUG_OUTPUT
    /*ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "IMMNotificationClient_OnDeviceRemoved(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)");*/
#endif

    /* We don't need to worry about this event for our purposes. */
    (void)pThis;
    (void)pDeviceID;
    return S_OK;
}

static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDefaultDeviceChanged(ma_IMMNotificationClient* pThis, ma_EDataFlow dataFlow, ma_ERole role, LPCWSTR pDefaultDeviceID)
{
#ifdef MA_DEBUG_OUTPUT
    /*ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "IMMNotificationClient_OnDefaultDeviceChanged(dataFlow=%d, role=%d, pDefaultDeviceID=%S)\n", dataFlow, role, (pDefaultDeviceID != NULL) ? pDefaultDeviceID : L"(NULL)");*/
#endif

    /* We only ever use the eConsole role in miniaudio. */
    if (role != ma_eConsole) {
        ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "[WASAPI] Stream rerouting: role != eConsole\n");
        return S_OK;
    }

    /* We only care about devices with the same data flow and role as the current device. */

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

                ma_device_start(pThis->pDevice);
            }
        }
    }

    return S_OK;
}

static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnPropertyValueChanged(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, const PROPERTYKEY key)
{
#ifdef MA_DEBUG_OUTPUT
    /*ma_log_postf(ma_device_get_log(pThis->pDevice), MA_LOG_LEVEL_DEBUG, "IMMNotificationClient_OnPropertyValueChanged(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)");*/
#endif

    (void)pThis;
    (void)pDeviceID;
    (void)key;
    return S_OK;
}

static ma_IMMNotificationClientVtbl g_maNotificationCientVtbl = {

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

typedef OSStatus (* ma_AudioOutputUnitStart_proc)(AudioUnit inUnit);
typedef OSStatus (* ma_AudioOutputUnitStop_proc)(AudioUnit inUnit);
typedef OSStatus (* ma_AudioUnitAddPropertyListener_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitPropertyListenerProc inProc, void* inProcUserData);
typedef OSStatus (* ma_AudioUnitGetPropertyInfo_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, UInt32* outDataSize, Boolean* outWriteable);
typedef OSStatus (* ma_AudioUnitGetProperty_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, void* outData, UInt32* ioDataSize);
typedef OSStatus (* ma_AudioUnitSetProperty_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, const void* inData, UInt32 inDataSize);
typedef OSStatus (* ma_AudioUnitInitialize_proc)(AudioUnit inUnit);
typedef OSStatus (* ma_AudioUnitRender_proc)(AudioUnit inUnit, AudioUnitRenderActionFlags* ioActionFlags, const AudioTimeStamp* inTimeStamp, UInt32 inOutputBusNumber, UInt32 inNumberFrames, AudioBufferList* ioData);


#define MA_COREAUDIO_OUTPUT_BUS    0
#define MA_COREAUDIO_INPUT_BUS     1

#if defined(MA_APPLE_DESKTOP)
static ma_result ma_device_reinit_internal__coreaudio(ma_device* pDevice, ma_device_type deviceType, ma_bool32 disposePreviousAudioUnit);
#endif

/*
Core Audio

So far, Core Audio has been the worst backend to work with due to being both unintuitive and having almost no documentation
apart from comments in the headers (which admittedly are quite good). For my own purposes, and for anybody out there whose

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

    AudioUnitElement deviceBus;
    UInt32 channelLayoutSize;
    OSStatus status;
    AudioChannelLayout* pChannelLayout;
    ma_result result;

    MA_ASSERT(pContext != NULL);

    if (deviceType == ma_device_type_playback) {
        deviceScope = kAudioUnitScope_Input;
        deviceBus = MA_COREAUDIO_OUTPUT_BUS;
    } else {
        deviceScope = kAudioUnitScope_Output;
        deviceBus = MA_COREAUDIO_INPUT_BUS;
    }

    status = ((ma_AudioUnitGetPropertyInfo_proc)pContext->coreaudio.AudioUnitGetPropertyInfo)(audioUnit, kAudioUnitProperty_AudioChannelLayout, deviceScope, deviceBus, &channelLayoutSize, NULL);
    if (status != noErr) {
        return ma_result_from_OSStatus(status);
    }

    pChannelLayout = (AudioChannelLayout*)ma_malloc(channelLayoutSize, &pContext->allocationCallbacks);
    if (pChannelLayout == NULL) {
        return MA_OUT_OF_MEMORY;

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

        if (component == NULL) {
            return MA_FAILED_TO_INIT_BACKEND;
        }

        status = ((ma_AudioComponentInstanceNew_proc)pContext->coreaudio.AudioComponentInstanceNew)(component, &audioUnit);
        if (status != noErr) {
            return ma_result_from_OSStatus(status);
        }

        formatScope   = (deviceType == ma_device_type_playback) ? kAudioUnitScope_Input : kAudioUnitScope_Output;
        formatElement = (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_INPUT_BUS;

        propSize = sizeof(bestFormat);
        status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, &propSize);
        if (status != noErr) {
            ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(audioUnit);
            return ma_result_from_OSStatus(status);
        }

        ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(audioUnit);
        audioUnit = NULL;

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

        return ma_result_from_OSStatus(status);
    }


    /* The input/output buses need to be explicitly enabled and disabled. We set the flag based on the output unit first, then we just swap it for input. */
    enableIOFlag = 1;
    if (deviceType == ma_device_type_capture) {
        enableIOFlag = 0;
    }

    status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, MA_COREAUDIO_OUTPUT_BUS, &enableIOFlag, sizeof(enableIOFlag));
    if (status != noErr) {
        ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
        return ma_result_from_OSStatus(status);
    }

    enableIOFlag = (enableIOFlag == 0) ? 1 : 0;
    status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, MA_COREAUDIO_INPUT_BUS, &enableIOFlag, sizeof(enableIOFlag));
    if (status != noErr) {
        ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
        return ma_result_from_OSStatus(status);
    }


    /* Set the device to use with this audio unit. This is only used on desktop since we are using defaults on mobile. */
#if defined(MA_APPLE_DESKTOP)
    status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &deviceObjectID, sizeof(deviceObjectID));
    if (status != noErr) {

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

    Ideally we would like to use a format that's as close to the hardware as possible so we can get as close to a passthrough as possible. The
    most important property is the sample rate. miniaudio can do format conversion for any sample rate and channel count, but cannot do the same
    for the sample data format. If the sample data format is not supported by miniaudio it must be ignored completely.

    On mobile platforms this is a bit different. We just force the use of whatever the audio unit's current format is set to.
    */
    {
        AudioStreamBasicDescription origFormat;
        UInt32 origFormatSize = sizeof(origFormat);
        AudioUnitScope   formatScope   = (deviceType == ma_device_type_playback) ? kAudioUnitScope_Input : kAudioUnitScope_Output;
        AudioUnitElement formatElement = (deviceType == ma_device_type_playback) ? MA_COREAUDIO_OUTPUT_BUS : MA_COREAUDIO_INPUT_BUS;

        if (deviceType == ma_device_type_playback) {
            status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, MA_COREAUDIO_OUTPUT_BUS, &origFormat, &origFormatSize);
        } else {
            status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, MA_COREAUDIO_INPUT_BUS, &origFormat, &origFormatSize);
        }
        if (status != noErr) {
            ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);
            return ma_result_from_OSStatus(status);
        }

    #if defined(MA_APPLE_DESKTOP)
        result = ma_find_best_format__coreaudio(pContext, deviceObjectID, deviceType, pData->formatIn, pData->channelsIn, pData->sampleRateIn, &origFormat, &bestFormat);
        if (result != MA_SUCCESS) {
            ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit);

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

                    sometimes be empty in which case we just fall back to "ai.name" which is less user
                    friendly, but usually has a value.
                    */
                    if (ai.handle[0] != '\0') {
                        ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), ai.handle, (size_t)-1);
                    } else {
                        ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), ai.name, (size_t)-1);
                    }

                    /* The device can be both playback and capture. */
                    if (!isTerminating && (ai.caps & PCM_CAP_OUTPUT) != 0) {
                        isTerminating = !callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
                    }
                    if (!isTerminating && (ai.caps & PCM_CAP_INPUT) != 0) {
                        isTerminating = !callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
                    }

                    if (isTerminating) {
                        break;
                    }
                }
            }
        }
    } else {

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

    result = ioctl(fdTemp, SNDCTL_SYSINFO, &si);
    if (result != -1) {
        int iAudioDevice;
        for (iAudioDevice = 0; iAudioDevice < si.numaudios; ++iAudioDevice) {
            oss_audioinfo ai;
            ai.dev = iAudioDevice;
            result = ioctl(fdTemp, SNDCTL_AUDIOINFO, &ai);
            if (result != -1) {
                if (ma_strcmp(ai.devnode, pDeviceID->oss) == 0) {
                    /* It has the same name, so now just confirm the type. */
                    if ((deviceType == ma_device_type_playback && ((ai.caps & PCM_CAP_OUTPUT) != 0)) ||
                        (deviceType == ma_device_type_capture  && ((ai.caps & PCM_CAP_INPUT)  != 0))) {
                        unsigned int formatMask;

                        /* ID */
                        ma_strncpy_s(pDeviceInfo->id.oss, sizeof(pDeviceInfo->id.oss), ai.devnode, (size_t)-1);

                        /*
                        The human readable device name should be in the "ai.handle" variable, but it can
                        sometimes be empty in which case we just fall back to "ai.name" which is less user
                        friendly, but usually has a value.
                        */

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

typedef int32_t                                         ma_aaudio_data_callback_result_t;
typedef struct ma_AAudioStreamBuilder_t*                ma_AAudioStreamBuilder;
typedef struct ma_AAudioStream_t*                       ma_AAudioStream;

#define MA_AAUDIO_UNSPECIFIED                           0

/* Result codes. miniaudio only cares about the success code. */
#define MA_AAUDIO_OK                                    0

/* Directions. */
#define MA_AAUDIO_DIRECTION_OUTPUT                      0
#define MA_AAUDIO_DIRECTION_INPUT                       1

/* Sharing modes. */
#define MA_AAUDIO_SHARING_MODE_EXCLUSIVE                0
#define MA_AAUDIO_SHARING_MODE_SHARED                   1

/* Formats. */
#define MA_AAUDIO_FORMAT_PCM_I16                        1
#define MA_AAUDIO_FORMAT_PCM_FLOAT                      2

/* Stream states. */

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

#define MA_AAUDIO_SYSTEM_USAGE_VEHICLE_STATUS           1002
#define MA_AAUDIO_SYSTEM_USAGE_ANNOUNCEMENT             1003

/* Content types. */
#define MA_AAUDIO_CONTENT_TYPE_SPEECH                   1
#define MA_AAUDIO_CONTENT_TYPE_MUSIC                    2
#define MA_AAUDIO_CONTENT_TYPE_MOVIE                    3
#define MA_AAUDIO_CONTENT_TYPE_SONIFICATION             4

/* Input presets. */
#define MA_AAUDIO_INPUT_PRESET_GENERIC                  1
#define MA_AAUDIO_INPUT_PRESET_CAMCORDER                5
#define MA_AAUDIO_INPUT_PRESET_VOICE_RECOGNITION        6
#define MA_AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION      7
#define MA_AAUDIO_INPUT_PRESET_UNPROCESSED              9
#define MA_AAUDIO_INPUT_PRESET_VOICE_PERFORMANCE        10

/* Callback results. */
#define MA_AAUDIO_CALLBACK_RESULT_CONTINUE              0
#define MA_AAUDIO_CALLBACK_RESULT_STOP                  1


typedef ma_aaudio_data_callback_result_t (* ma_AAudioStream_dataCallback) (ma_AAudioStream* pStream, void* pUserData, void* pAudioData, int32_t numFrames);
typedef void                             (* ma_AAudioStream_errorCallback)(ma_AAudioStream *pStream, void *pUserData, ma_aaudio_result_t error);

typedef ma_aaudio_result_t       (* MA_PFN_AAudio_createStreamBuilder)                   (ma_AAudioStreamBuilder** ppBuilder);

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

        case ma_aaudio_content_type_speech:       return MA_AAUDIO_CONTENT_TYPE_SPEECH;
        default: break;
    }

    return MA_AAUDIO_CONTENT_TYPE_SPEECH;
}

static ma_aaudio_input_preset_t ma_to_input_preset__aaudio(ma_aaudio_input_preset inputPreset)
{
    switch (inputPreset) {
        case ma_aaudio_input_preset_generic:             return MA_AAUDIO_INPUT_PRESET_GENERIC;
        case ma_aaudio_input_preset_camcorder:           return MA_AAUDIO_INPUT_PRESET_CAMCORDER;
        case ma_aaudio_input_preset_unprocessed:         return MA_AAUDIO_INPUT_PRESET_UNPROCESSED;
        case ma_aaudio_input_preset_voice_recognition:   return MA_AAUDIO_INPUT_PRESET_VOICE_RECOGNITION;
        case ma_aaudio_input_preset_voice_communication: return MA_AAUDIO_INPUT_PRESET_VOICE_COMMUNICATION;
        case ma_aaudio_input_preset_voice_performance:   return MA_AAUDIO_INPUT_PRESET_VOICE_PERFORMANCE;
        default: break;
    }

    return MA_AAUDIO_INPUT_PRESET_GENERIC;
}

static void ma_stream_error_callback__aaudio(ma_AAudioStream* pStream, void* pUserData, ma_aaudio_result_t error)
{
    ma_device* pDevice = (ma_device*)pUserData;
    MA_ASSERT(pDevice != NULL);

    (void)error;

    ma_log_postf(ma_device_get_log(pDevice), MA_LOG_LEVEL_INFO, "[AAudio] ERROR CALLBACK: error=%d, AAudioStream_getState()=%d\n", error, ((MA_PFN_AAudioStream_getState)pDevice->pContext->aaudio.AAudioStream_getState)(pStream));

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN


    resultAA = ((MA_PFN_AAudio_createStreamBuilder)pContext->aaudio.AAudio_createStreamBuilder)(&pBuilder);
    if (resultAA != MA_AAUDIO_OK) {
        return ma_result_from_aaudio(resultAA);
    }

    if (pDeviceID != NULL) {
        ((MA_PFN_AAudioStreamBuilder_setDeviceId)pContext->aaudio.AAudioStreamBuilder_setDeviceId)(pBuilder, pDeviceID->aaudio);
    }

    ((MA_PFN_AAudioStreamBuilder_setDirection)pContext->aaudio.AAudioStreamBuilder_setDirection)(pBuilder, (deviceType == ma_device_type_playback) ? MA_AAUDIO_DIRECTION_OUTPUT : MA_AAUDIO_DIRECTION_INPUT);
    ((MA_PFN_AAudioStreamBuilder_setSharingMode)pContext->aaudio.AAudioStreamBuilder_setSharingMode)(pBuilder, (shareMode == ma_share_mode_shared) ? MA_AAUDIO_SHARING_MODE_SHARED : MA_AAUDIO_SHARING_MODE_EXCLUSIVE);


    /* If we have a device descriptor make sure we configure the stream builder to take our requested parameters. */
    if (pDescriptor != NULL) {
        MA_ASSERT(pConfig != NULL); /* We must have a device config if we also have a descriptor. The config is required for AAudio specific configuration options. */

        if (pDescriptor->sampleRate != 0) {
            ((MA_PFN_AAudioStreamBuilder_setSampleRate)pContext->aaudio.AAudioStreamBuilder_setSampleRate)(pBuilder, pDescriptor->sampleRate);
        }

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN


typedef SLresult (SLAPIENTRY * ma_slCreateEngine_proc)(SLObjectItf* pEngine, SLuint32 numOptions, SLEngineOption* pEngineOptions, SLuint32 numInterfaces, SLInterfaceID* pInterfaceIds, SLboolean* pInterfaceRequired);

/* OpenSL|ES has one-per-application objects :( */
static SLObjectItf g_maEngineObjectSL    = NULL;
static SLEngineItf g_maEngineSL          = NULL;
static ma_uint32   g_maOpenSLInitCounter = 0;
static ma_spinlock g_maOpenSLSpinlock    = 0;   /* For init/uninit. */

#define MA_OPENSL_OBJ(p)         (*((SLObjectItf)(p)))
#define MA_OPENSL_OUTPUTMIX(p)   (*((SLOutputMixItf)(p)))
#define MA_OPENSL_PLAY(p)        (*((SLPlayItf)(p)))
#define MA_OPENSL_RECORD(p)      (*((SLRecordItf)(p)))

#ifdef MA_ANDROID
#define MA_OPENSL_BUFFERQUEUE(p) (*((SLAndroidSimpleBufferQueueItf)(p)))
#else
#define MA_OPENSL_BUFFERQUEUE(p) (*((SLBufferQueueItf)(p)))
#endif

static ma_result ma_result_from_OpenSL(SLuint32 result)

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

    goto return_default_device;
#endif

return_default_device:;
    cbResult = MA_TRUE;

    /* Playback. */
    if (cbResult) {
        ma_device_info deviceInfo;
        MA_ZERO_OBJECT(&deviceInfo);
        deviceInfo.id.opensl = SL_DEFAULTDEVICEID_AUDIOOUTPUT;
        ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
        cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData);
    }

    /* Capture. */
    if (cbResult) {
        ma_device_info deviceInfo;
        MA_ZERO_OBJECT(&deviceInfo);
        deviceInfo.id.opensl = SL_DEFAULTDEVICEID_AUDIOINPUT;
        ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
        cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData);
    }

    return MA_SUCCESS;
}

static void ma_context_add_data_format_ex__opensl(ma_context* pContext, ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_device_info* pDeviceInfo)
{
    MA_ASSERT(pContext    != NULL);

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

        ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), (const char*)desc.deviceName, (size_t)-1);
    }

    goto return_detailed_info;
#else
    goto return_default_device;
#endif

return_default_device:
    if (pDeviceID != NULL) {
        if ((deviceType == ma_device_type_playback && pDeviceID->opensl != SL_DEFAULTDEVICEID_AUDIOOUTPUT) ||
            (deviceType == ma_device_type_capture  && pDeviceID->opensl != SL_DEFAULTDEVICEID_AUDIOINPUT)) {
            return MA_NO_DEVICE;   /* Don't know the device. */
        }
    }

    /* ID and Name / Description */
    if (deviceType == ma_device_type_playback) {
        pDeviceInfo->id.opensl = SL_DEFAULTDEVICEID_AUDIOOUTPUT;
        ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1);
    } else {
        pDeviceInfo->id.opensl = SL_DEFAULTDEVICEID_AUDIOINPUT;
        ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1);
    }

    pDeviceInfo->isDefault = MA_TRUE;

    goto return_detailed_info;


return_detailed_info:

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

    if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) {
        ma_SLDataFormat_PCM pcm;
        SLDataLocator_IODevice locatorDevice;
        SLDataSource source;
        SLDataSink sink;
        SLAndroidConfigurationItf pRecorderConfig;

        ma_SLDataFormat_PCM_init__opensl(pDescriptorCapture->format, pDescriptorCapture->channels, pDescriptorCapture->sampleRate, pDescriptorCapture->channelMap, &pcm);

        locatorDevice.locatorType = SL_DATALOCATOR_IODEVICE;
        locatorDevice.deviceType  = SL_IODEVICE_AUDIOINPUT;
        locatorDevice.deviceID    = SL_DEFAULTDEVICEID_AUDIOINPUT;  /* Must always use the default device with Android. */
        locatorDevice.device      = NULL;

        source.pLocator = &locatorDevice;
        source.pFormat  = NULL;

        queue.numBuffers = pDescriptorCapture->periodCount;

        sink.pLocator = &queue;
        sink.pFormat  = (SLDataFormat_PCM*)&pcm;

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

            return ma_result_from_OpenSL(resultSL);
        }

        resultSL = MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->Realize((SLObjectItf)pDevice->opensl.pOutputMixObj, SL_BOOLEAN_FALSE);
        if (resultSL != SL_RESULT_SUCCESS) {
            ma_device_uninit__opensl(pDevice);
            ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to realize output mix object.");
            return ma_result_from_OpenSL(resultSL);
        }

        resultSL = MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->GetInterface((SLObjectItf)pDevice->opensl.pOutputMixObj, (SLInterfaceID)pDevice->pContext->opensl.SL_IID_OUTPUTMIX, &pDevice->opensl.pOutputMix);
        if (resultSL != SL_RESULT_SUCCESS) {
            ma_device_uninit__opensl(pDevice);
            ma_log_post(ma_device_get_log(pDevice), MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_OUTPUTMIX interface.");
            return ma_result_from_OpenSL(resultSL);
        }

        /* Set the output device. */
        if (pDescriptorPlayback->pDeviceID != NULL) {
            SLuint32 deviceID_OpenSL = pDescriptorPlayback->pDeviceID->opensl;
            MA_OPENSL_OUTPUTMIX(pDevice->opensl.pOutputMix)->ReRoute((SLOutputMixItf)pDevice->opensl.pOutputMix, 1, &deviceID_OpenSL);
        }

        queue.numBuffers = pDescriptorPlayback->periodCount;

        source.pLocator = &queue;
        source.pFormat  = (SLDataFormat_PCM*)&pcm;

        outmixLocator.locatorType = SL_DATALOCATOR_OUTPUTMIX;
        outmixLocator.outputMix   = (SLObjectItf)pDevice->opensl.pOutputMixObj;

        sink.pLocator = &outmixLocator;
        sink.pFormat  = NULL;

        resultSL = (*g_maEngineSL)->CreateAudioPlayer(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioPlayerObj, &source, &sink, ma_countof(itfIDs), itfIDs, itfIDsRequired);
        if (resultSL == SL_RESULT_CONTENT_UNSUPPORTED || resultSL == SL_RESULT_PARAMETER_INVALID) {
            /* Unsupported format. Fall back to something safer and try again. If this fails, just abort. */
            pcm.formatType = SL_DATAFORMAT_PCM;
            pcm.numChannels = 2;

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

        ma_dlclose(pContext, pContext->opensl.libOpenSLES);
        return result;
    }

    result = ma_dlsym_SLInterfaceID__opensl(pContext, "SL_IID_PLAY", &pContext->opensl.SL_IID_PLAY);
    if (result != MA_SUCCESS) {
        ma_dlclose(pContext, pContext->opensl.libOpenSLES);
        return result;
    }

    result = ma_dlsym_SLInterfaceID__opensl(pContext, "SL_IID_OUTPUTMIX", &pContext->opensl.SL_IID_OUTPUTMIX);
    if (result != MA_SUCCESS) {
        ma_dlclose(pContext, pContext->opensl.libOpenSLES);
        return result;
    }

    result = ma_dlsym_SLInterfaceID__opensl(pContext, "SL_IID_ANDROIDCONFIGURATION", &pContext->opensl.SL_IID_ANDROIDCONFIGURATION);
    if (result != MA_SUCCESS) {
        ma_dlclose(pContext, pContext->opensl.libOpenSLES);
        return result;
    }

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

        ma_dlclose(pContext, pContext->opensl.libOpenSLES);
        ma_log_post(ma_context_get_log(pContext), MA_LOG_LEVEL_INFO, "[OpenSL] Cannot find symbol slCreateEngine.");
        return MA_NO_BACKEND;
    }
#else
    pContext->opensl.SL_IID_ENGINE                    = (ma_handle)SL_IID_ENGINE;
    pContext->opensl.SL_IID_AUDIOIODEVICECAPABILITIES = (ma_handle)SL_IID_AUDIOIODEVICECAPABILITIES;
    pContext->opensl.SL_IID_ANDROIDSIMPLEBUFFERQUEUE  = (ma_handle)SL_IID_ANDROIDSIMPLEBUFFERQUEUE;
    pContext->opensl.SL_IID_RECORD                    = (ma_handle)SL_IID_RECORD;
    pContext->opensl.SL_IID_PLAY                      = (ma_handle)SL_IID_PLAY;
    pContext->opensl.SL_IID_OUTPUTMIX                 = (ma_handle)SL_IID_OUTPUTMIX;
    pContext->opensl.SL_IID_ANDROIDCONFIGURATION      = (ma_handle)SL_IID_ANDROIDCONFIGURATION;
    pContext->opensl.slCreateEngine                   = (ma_proc)slCreateEngine;
#endif


    /* Initialize global data first if applicable. */
    ma_spinlock_lock(&g_maOpenSLSpinlock);
    {
        result = ma_context_init_engine_nolock__opensl(pContext);
    }

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

        ma_waveform_init(&waveformConfig, &waveform);
        ma_waveform_read_pcm_frames(&waveform, pFramesOut, frameCount, NULL);
    }
    #else
    {
        (void)pFramesOut;
        (void)frameCount;
        (void)format;
        (void)channels;
        (void)sampleRate;
        #if defined(MA_DEBUG_OUTPUT)
        {
            #if _MSC_VER
                #pragma message ("ma_debug_fill_pcm_frames_with_sine_wave() will do nothing because MA_NO_GENERATION is enabled.")
            #endif
        }
        #endif
    }
    #endif
}

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

MA_API ma_result ma_node_graph_set_time(ma_node_graph* pNodeGraph, ma_uint64 globalTime)
{
    if (pNodeGraph == NULL) {
        return MA_INVALID_ARGS;
    }

    return ma_node_set_time(&pNodeGraph->endpoint, globalTime); /* Global time is just the local time of the endpoint. */
}


#define MA_NODE_OUTPUT_BUS_FLAG_HAS_READ    0x01    /* Whether or not this bus ready to read more data. Only used on nodes with multiple output buses. */

static ma_result ma_node_output_bus_init(ma_node* pNode, ma_uint32 outputBusIndex, ma_uint32 channels, ma_node_output_bus* pOutputBus)
{
    MA_ASSERT(pOutputBus != NULL);
    MA_ASSERT(outputBusIndex < MA_MAX_NODE_BUS_COUNT);
    MA_ASSERT(outputBusIndex < ma_node_get_output_bus_count(pNode));
    MA_ASSERT(channels < 256);

    MA_ZERO_OBJECT(pOutputBus);

    if (channels == 0) {
        return MA_INVALID_ARGS;
    }

    pOutputBus->pNode          = pNode;
    pOutputBus->outputBusIndex = (ma_uint8)outputBusIndex;
    pOutputBus->channels       = (ma_uint8)channels;
    pOutputBus->flags          = MA_NODE_OUTPUT_BUS_FLAG_HAS_READ; /* <-- Important that this flag is set by default. */
    pOutputBus->volume         = 1;

    return MA_SUCCESS;
}

static void ma_node_output_bus_lock(ma_node_output_bus* pOutputBus)
{
    ma_spinlock_lock(&pOutputBus->lock);
}

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN


static ma_uint32 ma_node_output_bus_get_channels(const ma_node_output_bus* pOutputBus)
{
    return pOutputBus->channels;
}


static void ma_node_output_bus_set_has_read(ma_node_output_bus* pOutputBus, ma_bool32 hasRead)
{
    if (hasRead) {
        c89atomic_fetch_or_32(&pOutputBus->flags, MA_NODE_OUTPUT_BUS_FLAG_HAS_READ);
    } else {
        c89atomic_fetch_and_32(&pOutputBus->flags, (ma_uint32)~MA_NODE_OUTPUT_BUS_FLAG_HAS_READ);
    }
}

static ma_bool32 ma_node_output_bus_has_read(ma_node_output_bus* pOutputBus)
{
    return (c89atomic_load_32(&pOutputBus->flags) & MA_NODE_OUTPUT_BUS_FLAG_HAS_READ) != 0;
}


static void ma_node_output_bus_set_is_attached(ma_node_output_bus* pOutputBus, ma_bool32 isAttached)
{
    c89atomic_exchange_32(&pOutputBus->isAttached, isAttached);
}

static ma_bool32 ma_node_output_bus_is_attached(ma_node_output_bus* pOutputBus)
{

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

    if (pFirst == NULL) {
        return MA_SUCCESS;  /* No attachments. Read nothing. */
    }

    for (pOutputBus = pFirst; pOutputBus != NULL; pOutputBus = ma_node_input_bus_next(pInputBus, pOutputBus)) {
        ma_uint32 framesProcessed = 0;
        ma_bool32 isSilentOutput = MA_FALSE;

        MA_ASSERT(pOutputBus->pNode != NULL);

        isSilentOutput = (((ma_node_base*)pOutputBus->pNode)->vtable->flags & MA_NODE_FLAG_SILENT_OUTPUT) != 0;

        if (pFramesOut != NULL) {
            /* Read. */
            float temp[MA_DATA_CONVERTER_STACK_BUFFER_SIZE / sizeof(float)];
            ma_uint32 tempCapInFrames = ma_countof(temp) / inputChannels;

            while (framesProcessed < frameCount) {
                float* pRunningFramesOut;
                ma_uint32 framesToRead;
                ma_uint32 framesJustRead;

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

                    /*
                    We need to treat nodes with continuous processing a little differently. For these ones,
                    we always want to fire the callback with the requested number of frames, regardless of
                    pNodeBase->cachedFrameCountIn, which could be 0. Also, we want to check if we can pass
                    in NULL for the input buffer to the callback.
                    */
                    if ((pNodeBase->vtable->flags & MA_NODE_FLAG_CONTINUOUS_PROCESSING) != 0) {
                        /* We're using continuous processing. Make sure we specify the whole frame count at all times. */
                        frameCountIn = framesToProcessIn;    /* Give the processing function as much input data as we've got in the buffer, including any silenced padding from short reads. */

                        if ((pNodeBase->vtable->flags & MA_NODE_FLAG_ALLOW_NULL_INPUT) != 0 && pNodeBase->consumedFrameCountIn == 0 && pNodeBase->cachedFrameCountIn == 0) {
                            consumeNullInput = MA_TRUE;
                        } else {
                            consumeNullInput = MA_FALSE;
                        }

                        /*
                        Since we're using continuous processing we're always passing in a full frame count
                        regardless of how much input data was read. If this is greater than what we read as
                        input, we'll end up with an underflow. We instead need to make sure our cached frame
                        count is set to the number of frames we'll be passing to the data callback. Not
                        doing this will result in an underflow when we "consume" the cached data later on.

                        Note that this check needs to be done after the "consumeNullInput" check above because
                        we use the property of cachedFrameCountIn being 0 to determine whether or not we
                        should be passing in a null pointer to the processing callback for when the node is
                        configured with MA_NODE_FLAG_ALLOW_NULL_INPUT.
                        */
                        if (pNodeBase->cachedFrameCountIn < (ma_uint16)frameCountIn) {
                            pNodeBase->cachedFrameCountIn = (ma_uint16)frameCountIn;
                        }
                    } else {
                        frameCountIn = pNodeBase->cachedFrameCountIn;  /* Give the processing function as much valid input data as we've got. */
                        consumeNullInput = MA_FALSE;
                    }

                    /*

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

            y[2*18] = t[1][i] + t[1][i + 1];
            y[3*18] = t[2][i + 1] + t[3][i] + t[3][i + 1];
        }
        y[0*18] = t[0][7];
        y[1*18] = t[2][7] + t[3][7];
        y[2*18] = t[1][7];
        y[3*18] = t[3][7];
    }
#endif
}
#ifndef DR_MP3_FLOAT_OUTPUT
typedef drmp3_int16 drmp3d_sample_t;
static drmp3_int16 drmp3d_scale_pcm(float sample)
{
    drmp3_int16 s;
#if DRMP3_HAVE_ARMV6
    drmp3_int32 s32 = (drmp3_int32)(sample + .5f);
    s32 -= (s32 < 0);
    s = (drmp3_int16)drmp3_clip_int16_arm(s32);
#else
    if (sample >=  32766.5) return (drmp3_int16) 32767;

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

        zlin[4*i]     = xl[18*(31 - i)];
        zlin[4*i + 1] = xr[18*(31 - i)];
        zlin[4*i + 2] = xl[1 + 18*(31 - i)];
        zlin[4*i + 3] = xr[1 + 18*(31 - i)];
        zlin[4*i + 64] = xl[1 + 18*(1 + i)];
        zlin[4*i + 64 + 1] = xr[1 + 18*(1 + i)];
        zlin[4*i - 64 + 2] = xl[18*(1 + i)];
        zlin[4*i - 64 + 3] = xr[18*(1 + i)];
        DRMP3_V0(0) DRMP3_V2(1) DRMP3_V1(2) DRMP3_V2(3) DRMP3_V1(4) DRMP3_V2(5) DRMP3_V1(6) DRMP3_V2(7)
        {
#ifndef DR_MP3_FLOAT_OUTPUT
#if DRMP3_HAVE_SSE
            static const drmp3_f4 g_max = { 32767.0f, 32767.0f, 32767.0f, 32767.0f };
            static const drmp3_f4 g_min = { -32768.0f, -32768.0f, -32768.0f, -32768.0f };
            __m128i pcm8 = _mm_packs_epi32(_mm_cvtps_epi32(_mm_max_ps(_mm_min_ps(a, g_max), g_min)),
                                           _mm_cvtps_epi32(_mm_max_ps(_mm_min_ps(b, g_max), g_min)));
            dstr[(15 - i)*nch] = (drmp3_int16)_mm_extract_epi16(pcm8, 1);
            dstr[(17 + i)*nch] = (drmp3_int16)_mm_extract_epi16(pcm8, 5);
            dstl[(15 - i)*nch] = (drmp3_int16)_mm_extract_epi16(pcm8, 0);
            dstl[(17 + i)*nch] = (drmp3_int16)_mm_extract_epi16(pcm8, 4);
            dstr[(47 - i)*nch] = (drmp3_int16)_mm_extract_epi16(pcm8, 3);

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

    if (pMP3->onRead == drmp3__on_read_stdio) {
        FILE* pFile = (FILE*)pMP3->pUserData;
        if (pFile != NULL) {
            fclose(pFile);
            pMP3->pUserData = NULL;
        }
    }
#endif
    drmp3__free_from_callbacks(pMP3->pData, &pMP3->allocationCallbacks);
}
#if defined(DR_MP3_FLOAT_OUTPUT)
static void drmp3_f32_to_s16(drmp3_int16* dst, const float* src, drmp3_uint64 sampleCount)
{
    drmp3_uint64 i;
    drmp3_uint64 i4;
    drmp3_uint64 sampleCount4;
    i = 0;
    sampleCount4 = sampleCount >> 2;
    for (i4 = 0; i4 < sampleCount4; i4 += 1) {
        float x0 = src[i+0];
        float x1 = src[i+1];

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

        i += 4;
    }
    for (; i < sampleCount; i += 1) {
        float x = src[i];
        x = ((x < -1) ? -1 : ((x > 1) ? 1 : x));
        x = x * 32767.0f;
        dst[i] = (drmp3_int16)x;
    }
}
#endif
#if !defined(DR_MP3_FLOAT_OUTPUT)
static void drmp3_s16_to_f32(float* dst, const drmp3_int16* src, drmp3_uint64 sampleCount)
{
    drmp3_uint64 i;
    for (i = 0; i < sampleCount; i += 1) {
        float x = (float)src[i];
        x = x * 0.000030517578125f;
        dst[i] = x;
    }
}
#endif
static drmp3_uint64 drmp3_read_pcm_frames_raw(drmp3* pMP3, drmp3_uint64 framesToRead, void* pBufferOut)
{
    drmp3_uint64 totalFramesRead = 0;
    DRMP3_ASSERT(pMP3 != NULL);
    DRMP3_ASSERT(pMP3->onRead != NULL);
    while (framesToRead > 0) {
        drmp3_uint32 framesToConsume = (drmp3_uint32)DRMP3_MIN(pMP3->pcmFramesRemainingInMP3Frame, framesToRead);
        if (pBufferOut != NULL) {
        #if defined(DR_MP3_FLOAT_OUTPUT)
            float* pFramesOutF32 = (float*)DRMP3_OFFSET_PTR(pBufferOut,          sizeof(float) * totalFramesRead                   * pMP3->channels);
            float* pFramesInF32  = (float*)DRMP3_OFFSET_PTR(&pMP3->pcmFrames[0], sizeof(float) * pMP3->pcmFramesConsumedInMP3Frame * pMP3->mp3FrameChannels);
            DRMP3_COPY_MEMORY(pFramesOutF32, pFramesInF32, sizeof(float) * framesToConsume * pMP3->channels);
        #else
            drmp3_int16* pFramesOutS16 = (drmp3_int16*)DRMP3_OFFSET_PTR(pBufferOut,          sizeof(drmp3_int16) * totalFramesRead                   * pMP3->channels);
            drmp3_int16* pFramesInS16  = (drmp3_int16*)DRMP3_OFFSET_PTR(&pMP3->pcmFrames[0], sizeof(drmp3_int16) * pMP3->pcmFramesConsumedInMP3Frame * pMP3->mp3FrameChannels);
            DRMP3_COPY_MEMORY(pFramesOutS16, pFramesInS16, sizeof(drmp3_int16) * framesToConsume * pMP3->channels);
        #endif
        }
        pMP3->currentPCMFrame              += framesToConsume;

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

            break;
        }
    }
    return totalFramesRead;
}
DRMP3_API drmp3_uint64 drmp3_read_pcm_frames_f32(drmp3* pMP3, drmp3_uint64 framesToRead, float* pBufferOut)
{
    if (pMP3 == NULL || pMP3->onRead == NULL) {
        return 0;
    }
#if defined(DR_MP3_FLOAT_OUTPUT)
    return drmp3_read_pcm_frames_raw(pMP3, framesToRead, pBufferOut);
#else
    {
        drmp3_int16 pTempS16[8192];
        drmp3_uint64 totalPCMFramesRead = 0;
        while (totalPCMFramesRead < framesToRead) {
            drmp3_uint64 framesJustRead;
            drmp3_uint64 framesRemaining = framesToRead - totalPCMFramesRead;
            drmp3_uint64 framesToReadNow = DRMP3_COUNTOF(pTempS16) / pMP3->channels;
            if (framesToReadNow > framesRemaining) {

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

        }
        return totalPCMFramesRead;
    }
#endif
}
DRMP3_API drmp3_uint64 drmp3_read_pcm_frames_s16(drmp3* pMP3, drmp3_uint64 framesToRead, drmp3_int16* pBufferOut)
{
    if (pMP3 == NULL || pMP3->onRead == NULL) {
        return 0;
    }
#if !defined(DR_MP3_FLOAT_OUTPUT)
    return drmp3_read_pcm_frames_raw(pMP3, framesToRead, pBufferOut);
#else
    {
        float pTempF32[4096];
        drmp3_uint64 totalPCMFramesRead = 0;
        while (totalPCMFramesRead < framesToRead) {
            drmp3_uint64 framesJustRead;
            drmp3_uint64 framesRemaining = framesToRead - totalPCMFramesRead;
            drmp3_uint64 framesToReadNow = DRMP3_COUNTOF(pTempF32) / pMP3->channels;
            if (framesToReadNow > framesRemaining) {

share/public_html/static/music_worklet_inprogress/decoder/deps/miniaudio/miniaudio.h  view on Meta::CPAN

    DRMP3_ASSERT(pMP3->onSeek != NULL);
    if (!drmp3__on_seek(pMP3, 0, drmp3_seek_origin_start)) {
        return DRMP3_FALSE;
    }
    drmp3_reset(pMP3);
    return DRMP3_TRUE;
}
static drmp3_bool32 drmp3_seek_forward_by_pcm_frames__brute_force(drmp3* pMP3, drmp3_uint64 frameOffset)
{
    drmp3_uint64 framesRead;
#if defined(DR_MP3_FLOAT_OUTPUT)
    framesRead = drmp3_read_pcm_frames_f32(pMP3, frameOffset, NULL);
#else
    framesRead = drmp3_read_pcm_frames_s16(pMP3, frameOffset, NULL);
#endif
    if (framesRead != frameOffset) {
        return DRMP3_FALSE;
    }
    return DRMP3_TRUE;
}
static drmp3_bool32 drmp3_seek_to_pcm_frame__brute_force(drmp3* pMP3, drmp3_uint64 frameIndex)



( run in 0.306 second using v1.01-cache-2.11-cpan-64827b87656 )