Update Files

This commit is contained in:
2025-01-22 16:18:30 +01:00
parent ed4603cf95
commit a36294b518
16718 changed files with 2960346 additions and 0 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 43 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

12
lib/aura/.img/readme.md Normal file
View File

@ -0,0 +1,12 @@
# Logo Usage Guidelines
The images in this directory are licensed under the CC-BY-4.0 license.
Please also try to comply with the following, although not legally binding:
- The images may be used to refer to the Aura library, but please do **NOT** place them more prominent than your own branding. It should be clear for the average viewer that whatever you are creating is not necessarily endorsed by any copyright holder of these images or the Aura library whosoever.
- The images may be cropped, extended or resized, but please do **NOT** alter the essential part of the logo and the text.
If you have any questions in this regard, please open an [issue](https://github.com/MoritzBrueckner/aura/issues) or a [discussion](https://github.com/MoritzBrueckner/aura/discussions).
Thank you!

View File

@ -0,0 +1,9 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: trailing-whitespace
- repo: https://github.com/fsfe/reuse-tool
rev: v1.1.2
hooks:
- id: reuse

16
lib/aura/.reuse/dep5 Normal file
View File

@ -0,0 +1,16 @@
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: Aura
Upstream-Contact: Moritz Brückner <gitlab@moritz-brueckner.de>
Source: https://github.com/MoritzBrueckner/aura
Files: Sources/* Backends/* Tests/*
Copyright: 2021 Moritz Brückner & Aura Contributors
License: Zlib
Files: readme.md CHANGES.md */readme.md khafile.js */kincfile.js .gitattributes .gitmodules .gitignore .pre-commit-config.yaml
Copyright: 2021 Moritz Brückner & Aura Contributors
License: CC0-1.0
Files: .img/*
Copyright: 2021 Moritz Brückner & Aura Contributors
License: CC-BY-4.0

View File

@ -0,0 +1,123 @@
#include <stdint.h>
#include "fft.h"
#define PI_FLOAT 3.14159265358979323846f
static uint32_t bitReverseUint32(uint32_t value, uint32_t log2N);
static uint32_t log2Unsigned(uint32_t n) {
uint32_t res = 0;
while (n >>= 1) {
res++;
}
return res;
}
void aura_ditfft2(const aura_complex_t * times, int t, aura_complex_t * freqs, int f, int n, int step, bool inverse) {
if (n == 1) {
aura_copy_complex_elem(freqs, f, times, t);
}
else {
const int halfLen = n >> 1;
aura_ditfft2(times, t, freqs, f, halfLen, step << 1, inverse);
aura_ditfft2(times, t + step, freqs, f + halfLen, halfLen, step << 1, inverse);
const float t_exp = ((inverse ? 1.0f : -1.0f) * 2.0f * PI_FLOAT) / n;
for (int k = 0; k < halfLen; k++) {
aura_complex_t even = { 0 };
aura_complex_t odd = { 0 };
aura_copy_complex(&even, freqs[f + k]);
aura_copy_complex(&odd, freqs[f + k + halfLen]);
const aura_complex_t twiddle = aura_cmult(aura_cexp(t_exp * k), odd);
aura_copy_complex(&(freqs[f + k]), aura_cadd(even, twiddle));
aura_copy_complex(&(freqs[f + k + halfLen]), aura_csub(even, twiddle));
}
}
}
void aura_ditfft2_iterative(const aura_complex_t * times, aura_complex_t * freqs, int n, bool inverse, const aura_complex_t * exp_lut) {
// Decimate
const uint32_t log2N = log2Unsigned(n);
for (uint32_t i = 0; i < ((uint32_t) n); i++) {
uint32_t reversedI = bitReverseUint32(i, log2N);
if (reversedI >= i) {
aura_copy_complex(&freqs[i], times[reversedI]);
aura_copy_complex(&freqs[reversedI], times[i]);
}
else if (reversedI == i) {
aura_copy_complex(&freqs[reversedI], times[i]);
}
}
int halfLayerIdx = 0;
for (int layerSize = 2; layerSize <= n; layerSize <<= 1) {
const int halfLayerSize = layerSize >> 1;
aura_complex_t expRotationStep;
aura_copy_complex(&expRotationStep, exp_lut[halfLayerIdx]);
if (inverse) {
expRotationStep = aura_cconj(expRotationStep);
}
for (int sectionOffset = 0; sectionOffset < n; sectionOffset += layerSize) {
aura_complex_t currentExpRotation = {.real = 1.0, .imag = 0.0};
for (int i = 0; i < halfLayerSize; i++) {
aura_complex_t even;
aura_complex_t odd;
aura_copy_complex(&even, freqs[sectionOffset + i]);
aura_copy_complex(&odd, freqs[sectionOffset + i + halfLayerSize]);
const aura_complex_t twiddle = aura_cmult(currentExpRotation, odd);
aura_copy_complex(&(freqs[sectionOffset + i]), aura_cadd(even, twiddle));
aura_copy_complex(&(freqs[sectionOffset + i + halfLayerSize]), aura_csub(even, twiddle));
aura_copy_complex(&currentExpRotation, aura_cmult(currentExpRotation, expRotationStep));
}
}
halfLayerIdx++;
}
}
/**
The following bit reversal code is taken (and slightly changed) from
https://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable.
The original sources are released in the public domain.
**/
// Lookup table for bit reversal where each entry is one possible byte
static const uint8_t bitReverseTable[] = {
0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0,
0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8,
0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4,
0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC,
0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2,
0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA,
0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6,
0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE,
0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1,
0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9,
0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5,
0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD,
0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3,
0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB,
0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7,
0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF
};
static uint32_t bitReverseUint32(uint32_t value, uint32_t log2N) {
return ((uint32_t)(
(bitReverseTable[ value & 0xff] << 24) |
(bitReverseTable[(value >> 8 ) & 0xff] << 16) |
(bitReverseTable[(value >> 16) & 0xff] << 8 ) |
(bitReverseTable[(value >> 24) & 0xff] )
) >> (32 - log2N));
}

View File

@ -0,0 +1,16 @@
#pragma once
#include <stdbool.h>
#include "common_c/types/complex_t.h"
#ifdef __cplusplus
extern "C" {
#endif
void aura_ditfft2(const aura_complex_t * times, int t, aura_complex_t * freqs, int f, int n, int step, bool inverse);
void aura_ditfft2_iterative(const aura_complex_t * times, aura_complex_t * freqs, int n, bool inverse, const aura_complex_t * exp_lut);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,21 @@
#include <stdlib.h>
#include "complex_array.h"
aura_complex_t* aura_complex_array_alloc(int length) {
return calloc(length, sizeof(aura_complex_t));
}
void aura_complex_array_free(aura_complex_t* complex_array) {
free(complex_array);
}
aura_complex_t* aura_complex_array_set(aura_complex_t* complex_array, int index, float real, float imag) {
complex_array[index].real = real;
complex_array[index].imag = imag;
return &(complex_array[index]);
}
aura_complex_t* aura_complex_array_get(aura_complex_t* complex_array, int index) {
return &(complex_array[index]);
}

View File

@ -0,0 +1,17 @@
#pragma once
#include "common_c/types/complex_t.h"
#ifdef __cplusplus
extern "C" {
#endif
aura_complex_t* aura_complex_array_alloc(int length);
void aura_complex_array_free(aura_complex_t* complex_array);
aura_complex_t* aura_complex_array_set(aura_complex_t* complex_array, int index, float real, float imag);
aura_complex_t* aura_complex_array_get(aura_complex_t* complex_array, int index);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,41 @@
#include <math.h>
#include "complex_t.h"
void aura_copy_complex_elem(aura_complex_t *to, const int toIndex, const aura_complex_t *from, const int fromIndex) {
to[toIndex].real = from[fromIndex].real;
to[toIndex].imag = from[fromIndex].imag;
}
void aura_copy_complex(aura_complex_t *to, const aura_complex_t from) {
to->real = from.real;
to->imag = from.imag;
}
aura_complex_t aura_cexp(const float w) {
const aura_complex_t out = {.real = cosf(w), .imag = sinf(w)};
return out;
}
aura_complex_t aura_cadd(const aura_complex_t a, const aura_complex_t b) {
const aura_complex_t out = {.real = a.real + b.real, .imag = a.imag + b.imag};
return out;
}
aura_complex_t aura_csub(const aura_complex_t a, const aura_complex_t b) {
const aura_complex_t out = {.real = a.real - b.real, .imag = a.imag - b.imag};
return out;
}
aura_complex_t aura_cmult(const aura_complex_t a, const aura_complex_t b) {
const aura_complex_t out = {
.real = a.real * b.real - a.imag * b.imag,
.imag = a.real * b.imag + a.imag * b.real
};
return out;
}
aura_complex_t aura_cconj(const aura_complex_t val) {
const aura_complex_t out = {.real = val.real, .imag = -val.imag};
return out;
}

View File

@ -0,0 +1,24 @@
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
typedef struct aura_complex_t {
float real;
float imag;
} aura_complex_t;
void aura_copy_complex_elem(aura_complex_t *to, const int toIndex, const aura_complex_t *from, const int fromIndex);
void aura_copy_complex(aura_complex_t *to, const aura_complex_t from);
aura_complex_t aura_cexp(const float w);
aura_complex_t aura_cadd(const aura_complex_t a, const aura_complex_t b);
aura_complex_t aura_csub(const aura_complex_t a, const aura_complex_t b);
aura_complex_t aura_cmult(const aura_complex_t a, const aura_complex_t b);
aura_complex_t aura_cconj(const aura_complex_t val);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,3 @@
// Keep this file so that the headers are included in the compilation
#include "hl/aura/math/fft.h"
#include "hl/aura/types/complex_array.h"

View File

@ -0,0 +1,3 @@
#pragma once
#define AURA_HL_FUNC(n) aura_hl_##n

View File

@ -0,0 +1,28 @@
#pragma once
#include <hl.h>
#include <aura/types/_ComplexArray/HL_ComplexArrayImpl.h>
#include "hl/aura/aurahl.h"
#include "common_c/math/fft.h"
#include "common_c/types/complex_t.h"
HL_PRIM void AURA_HL_FUNC(ditfft2)(aura__types___ComplexArray__HL_ComplexArrayImpl time_array, int t, aura__types___ComplexArray__HL_ComplexArrayImpl freq_array, int f, int n, int step, bool inverse) {
const aura_complex_t *times = (aura_complex_t*) time_array->self;
aura_complex_t *freqs = (aura_complex_t*) freq_array->self;
aura_ditfft2(times, t, freqs, f, n, step, inverse);
}
HL_PRIM void AURA_HL_FUNC(ditfft2_iterative)(aura__types___ComplexArray__HL_ComplexArrayImpl time_array, aura__types___ComplexArray__HL_ComplexArrayImpl freq_array, int n, bool inverse, aura__types___ComplexArray__HL_ComplexArrayImpl exp_rotation_step_table) {
const aura_complex_t *times = (aura_complex_t*) time_array->self;
aura_complex_t *freqs = (aura_complex_t*) freq_array->self;
const aura_complex_t *exp_lut = (aura_complex_t*) exp_rotation_step_table->self;
aura_ditfft2_iterative(times, freqs, n, inverse, exp_lut);
}
DEFINE_PRIM(_VOID, ditfft2, _BYTES _I32 _BYTES _I32 _I32 _I32 _BOOL)
DEFINE_PRIM(_VOID, ditfft2_iterative, _BYTES _BYTES _I32 _BOOL _BYTES)

View File

@ -0,0 +1,29 @@
#pragma once
#include <hl.h>
#include "hl/aura/aurahl.h"
#include "common_c/types/complex_array.h"
#include "common_c/types/complex_t.h"
HL_PRIM vbyte* AURA_HL_FUNC(complex_array_alloc)(int length) {
return (vbyte*) aura_complex_array_alloc(length);
}
HL_PRIM void AURA_HL_FUNC(complex_array_free)(vbyte* complex_array) {
aura_complex_array_free((aura_complex_t*) complex_array);
}
HL_PRIM aura_complex_t* AURA_HL_FUNC(complex_array_set)(vbyte* complex_array, int index, float real, float imag) {
return aura_complex_array_set((aura_complex_t *) complex_array, index, real, imag);
}
HL_PRIM aura_complex_t* AURA_HL_FUNC(complex_array_get)(vbyte* complex_array, int index) {
return aura_complex_array_get((aura_complex_t*) complex_array, index);
}
DEFINE_PRIM(_BYTES, complex_array_alloc, _I32)
DEFINE_PRIM(_VOID, complex_array_free, _BYTES)
DEFINE_PRIM(_REF(_aura__types___Complex__ComplexImpl), complex_array_set, _BYTES _I32 _F32 _F32)
DEFINE_PRIM(_REF(_aura__types___Complex__ComplexImpl), complex_array_get, _BYTES _I32)

View File

@ -0,0 +1,8 @@
const project = new Project("aura-hl");
project.addIncludeDir("../");
project.addFile("../common_c/**");
project.addFile("aura/**");
resolve(project);

View File

@ -0,0 +1,41 @@
# Aura Backends
The `/Backends` directory contains target-specific code that may be used on some
targets to improve performance.
The backends are enabled by default, but if you want to only use the generic
Haxe sources (for performance comparisons e.g.), compile your Kha project with
the command line flag `--aura-no-backend`.
## Folder Structure
- `/common_c`:
Pure C code that can be used by multiple backends.
- `/hl`:
Sources/headers for the Hashlink/C backend. The header files are mostly
Hashlink API wrappers around the code in `/common_c`.
Most of the backend sources mirror the respective Haxe code, so please don't
expect much documentation for the individual functions.
The Haxe implementation/glue code for the backends is in Aura's source files in
`/Sources`. There are no Haxe files in the `/backend` folder that shadow
original sources to reduce redundancy and ensure completeness of the API.
Instead, there is usually a static class per backend implementation at the
bottom of a Haxe source module, whose methods are then called and inlined from
the original class if the [backend specific define is set](#defines). This way
all the Haxe functionality for a module stays inside a module and is not
distributed in many per-backend Haxe files, which also keeps the API consistent
for each target.
## Defines
If the backends are enabled, Aura sets some defines before compilation which are
based on the Haxe target to which the project is compiled. They should only be
used internally, but for the sake of completeness they are documented here:
- `AURA_NO_BACKEND`: Defined if backends are disabled.
- `AURA_BACKEND_HL`: Defined if backends are enabled and the project is compiled
to a Hashlink target.

109
lib/aura/CHANGES.md Normal file
View File

@ -0,0 +1,109 @@
# Breaking Changes
This list contains notable changes that may break compatibility with previous versions (public API only).
Non-breaking changes (e.g. new features) are _not_ listed here.
_The dates below are given as **YYYY.MM.DD**._
- **2024.06.25** ([a8a66f6](https://github.com/MoritzBrueckner/aura/commit/a8a66f6d86fc812512dca2e7d5ba07ef0d804cd4)):
`aura.dsp.panner.Panner.dopplerFactor` was renamed to `aura.dsp.panner.Panner.dopplerStrength`.
- **2024.01.22** ([f7dff6e](https://github.com/MoritzBrueckner/aura/commit/f7dff6ea3840ed7c42c8994a735cc534525d0b63)):
Previously, if loading an asset with `aura.Aura.loadAssets()` failed, Aura would sometimes continue loading other assets and in other cases stop loading assets of the same type after the first failure, which was rather unintuitive.
Now, Aura always continues to load other assets even if an asset could not be loaded.
- **2024.01.14** ([`47d4426`](https://github.com/MoritzBrueckner/aura/commit/47d4426ffd93a5efb24eb5dc4c2d2a985e1010f5)):
The `aura.format.mhr.MHRReader` class is no longer meant to be instantiated, instead it is used statically now:
```haxe
final mhrReader = new aura.format.mhr.MHRReader(mhrBlobBytes);
final hrtf = mhrReader.read();
// becomes
final hrtf = aura.format.mhr.MHRReader.read(mhrBlobBytes);
```
- **2023.04.29** ([`8c1da0b`](https://github.com/MoritzBrueckner/aura/commit/8c1da0b039c55f56400f6270ca109b58c4a48526)):
This commit introduced multiple compatibility-breaking changes:
1. `aura.Handle` is now `aura.Aura.BaseChannelHandle` (a convenience typedef for `aura.channels.BaseChannel.BaseChannelHandle`).
2. `aura.MixChannelHandle` is now `aura.Aura.MixChannelHandle` (a convenience typedef for `aura.channels.MixChannel.MixChannelHandle`).
3. `Aura.createHandle()` was replaced with `Aura.createUncompBufferChannel()` as well as `Aura.createCompBufferChannel()`, depending on the first parameter of `createHandle()` that is now obsolete:
```haxe
Aura.createHandle(Play, mySound, loop, mixChannelHandle);
// becomes
Aura.createUncompBufferChannel(mySound, loop, mixChannelHandle);
// and
Aura.createHandle(Stream, mySound, loop, mixChannelHandle);
// becomes
Aura.createCompBufferChannel(mySound, loop, mixChannelHandle);
```
This change is more or less reverting [`0576c1f`](https://github.com/MoritzBrueckner/aura/commit/0576c1f657c5ff11d72f1916ae1b3f81ee0e2be7) and is introduced as part of adding more handle types to distinguish different channel features.
Now, `Aura.createUncompBufferChannel()` returns `Null<UncompBufferChannelHandle>` (`UncompBufferChannelHandle` is a new type introduced by this commit) and `Aura.createCompBufferChannel()` returns the unspecialized `Null<BaseChannelHandle>`.
This type-safe compile-time handling of handle types prevents the user from having to cast a returned handle to a specific handle type to get access to the complete functionality of a handle, which would have been required if `Aura.createHandle()` was still used to create handles (thus, [abstraction leaking](https://en.wikipedia.org/wiki/Leaky_abstraction) is minimized).
- **2022.11.21** ([`db8902c`](https://github.com/MoritzBrueckner/aura/commit/db8902c2816cdb7acbe221c97e3f454175df79c5)):
The way channels are connected to mix channels was changed:
```haxe
final myMixChannel: aura.MixChannelHandle = Aura.createMixChannel();
final myInputChannnel: aura.Handle = Aura.createHandle(...);
myMixChannel.removeInputChannel(myInputChannel);
// becomes
myInputChannel.setMixChannel(null);
// and
myMixChannel.addInputChannel(myInputChannel);
// becomes
myInputChannel.setMixChannel(myMixChannel);
```
- **2022.09.03** ([`3feb4ee`](https://github.com/MoritzBrueckner/aura/commit/3feb4eec6f5c9e10a7bc305c91c47c2aa1d52e1e)):
Stereo panning was moved out of the `aura.Handle` class to be completely inside
the `auda.dsp.panner.StereoPanner` where it actually belongs. This lays the
groundwork for upcoming changes to the `StereoPanner` and potentially different
channel formats in the future.
- **2022.07.18** ([`4386c3d`](https://github.com/MoritzBrueckner/aura/commit/4386c3dd6bcfe894016dc0c631c07881cbe7eba6)):
`Aura.dsp.Filter.Channels` was replaced with the new `aura.Types.Channels`
abstract. `Channels.Both` is now `Channels.All` (Aura currently only supports
stereo channels) and `Channels.toLeft()`/`Channels.toRight()` have been
replaced with the more generic `channel.matches()` member function.
- **2022.03.17** ([`0576c1f`](https://github.com/MoritzBrueckner/aura/commit/0576c1f657c5ff11d72f1916ae1b3f81ee0e2be7)):
`Aura.play()` and `Aura.stream()` were replaced with `Aura.createHandle()`.
The distinction between both play modes is now handled by the first parameter,
all other following parameters stay the same:
```haxe
Aura.play(mySound, loop, mixChannelHandle);
// becomes
Aura.createHandle(Play, mySound, loop, mixChannelHandle);
// and
Aura.stream(mySound, loop, mixChannelHandle);
// becomes
Aura.createHandle(Stream, mySound, loop, mixChannelHandle);
```
In addition to that, sounds are no longer auto-played to make it easier to
pre-initialize their handles. To play them, call `play()` on the returned
handle.

View File

@ -0,0 +1,156 @@
Creative Commons Attribution 4.0 International
Creative Commons Corporation (<28>Creative Commons<6E>) is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an <20>as-is<69> basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible.
Using Creative Commons Public Licenses
Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses.
Considerations for licensors: Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC-licensed material, or material used under an exception or limitation to copyright. More considerations for licensors.
Considerations for the public: By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor<6F>s permission is not necessary for any reason<6F>for example, because of any applicable exception or limitation to copyright<68>then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. More considerations for the public.
Creative Commons Attribution 4.0 International Public License
By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions.
Section 1 <20> Definitions.
a. Adapted Material means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image.
b. Adapter's License means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License.
c. Copyright and Similar Rights means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights.
d. Effective Technological Measures means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements.
e. Exceptions and Limitations means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material.
f. Licensed Material means the artistic or literary work, database, or other material to which the Licensor applied this Public License.
g. Licensed Rights means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license.
h. Licensor means the individual(s) or entity(ies) granting rights under this Public License.
i. Share means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them.
j. Sui Generis Database Rights means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world.
k. You means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning.
Section 2 <20> Scope.
a. License grant.
1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to:
A. reproduce and Share the Licensed Material, in whole or in part; and
B. produce, reproduce, and Share Adapted Material.
2. Exceptions and Limitations. For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions.
3. Term. The term of this Public License is specified in Section 6(a).
4. Media and formats; technical modifications allowed. The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material.
5. Downstream recipients.
A. Offer from the Licensor <20> Licensed Material. Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License.
B. No downstream restrictions. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material.
6. No endorsement. Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i).
b. Other rights.
1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise.
2. Patent and trademark rights are not licensed under this Public License.
3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties.
Section 3 <20> License Conditions.
Your exercise of the Licensed Rights is expressly made subject to the following conditions.
a. Attribution.
1. If You Share the Licensed Material (including in modified form), You must:
A. retain the following if it is supplied by the Licensor with the Licensed Material:
i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated);
ii. a copyright notice;
iii. a notice that refers to this Public License;
iv. a notice that refers to the disclaimer of warranties;
v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable;
B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and
C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License.
2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information.
3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable.
4. If You Share Adapted Material You produce, the Adapter's License You apply must not prevent recipients of the Adapted Material from complying with this Public License.
Section 4 <20> Sui Generis Database Rights.
Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material:
a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database;
b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material; and
c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database.
For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights.
Section 5 <20> Disclaimer of Warranties and Limitation of Liability.
a. Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You.
b. To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You.
c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability.
Section 6 <20> Term and Termination.
a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically.
b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates:
1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or
2. upon express reinstatement by the Licensor.
c. For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License.
d. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License.
e. Sections 1, 5, 6, 7, and 8 survive termination of this Public License.
Section 7 <20> Other Terms and Conditions.
a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed.
b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License.
Section 8 <20> Interpretation.
a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License.
b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions.
c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor.
d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority.
Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the <20>Licensor.<2E> Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at creativecommons.org/policies, Creative Commons does not authorize the use of the trademark <20>Creative Commons<6E> or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses.
Creative Commons may be contacted at creativecommons.org.

View File

@ -0,0 +1,121 @@
Creative Commons Legal Code
CC0 1.0 Universal
CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS
PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM
THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED
HEREUNDER.
Statement of Purpose
The laws of most jurisdictions throughout the world automatically confer
exclusive Copyright and Related Rights (defined below) upon the creator
and subsequent owner(s) (each and all, an "owner") of an original work of
authorship and/or a database (each, a "Work").
Certain owners wish to permanently relinquish those rights to a Work for
the purpose of contributing to a commons of creative, cultural and
scientific works ("Commons") that the public can reliably and without fear
of later claims of infringement build upon, modify, incorporate in other
works, reuse and redistribute as freely as possible in any form whatsoever
and for any purposes, including without limitation commercial purposes.
These owners may contribute to the Commons to promote the ideal of a free
culture and the further production of creative, cultural and scientific
works, or to gain reputation or greater distribution for their Work in
part through the use and efforts of others.
For these and/or other purposes and motivations, and without any
expectation of additional consideration or compensation, the person
associating CC0 with a Work (the "Affirmer"), to the extent that he or she
is an owner of Copyright and Related Rights in the Work, voluntarily
elects to apply CC0 to the Work and publicly distribute the Work under its
terms, with knowledge of his or her Copyright and Related Rights in the
Work and the meaning and intended legal effect of CC0 on those rights.
1. Copyright and Related Rights. A Work made available under CC0 may be
protected by copyright and related or neighboring rights ("Copyright and
Related Rights"). Copyright and Related Rights include, but are not
limited to, the following:
i. the right to reproduce, adapt, distribute, perform, display,
communicate, and translate a Work;
ii. moral rights retained by the original author(s) and/or performer(s);
iii. publicity and privacy rights pertaining to a person's image or
likeness depicted in a Work;
iv. rights protecting against unfair competition in regards to a Work,
subject to the limitations in paragraph 4(a), below;
v. rights protecting the extraction, dissemination, use and reuse of data
in a Work;
vi. database rights (such as those arising under Directive 96/9/EC of the
European Parliament and of the Council of 11 March 1996 on the legal
protection of databases, and under any national implementation
thereof, including any amended or successor version of such
directive); and
vii. other similar, equivalent or corresponding rights throughout the
world based on applicable law or treaty, and any national
implementations thereof.
2. Waiver. To the greatest extent permitted by, but not in contravention
of, applicable law, Affirmer hereby overtly, fully, permanently,
irrevocably and unconditionally waives, abandons, and surrenders all of
Affirmer's Copyright and Related Rights and associated claims and causes
of action, whether now known or unknown (including existing as well as
future claims and causes of action), in the Work (i) in all territories
worldwide, (ii) for the maximum duration provided by applicable law or
treaty (including future time extensions), (iii) in any current or future
medium and for any number of copies, and (iv) for any purpose whatsoever,
including without limitation commercial, advertising or promotional
purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each
member of the public at large and to the detriment of Affirmer's heirs and
successors, fully intending that such Waiver shall not be subject to
revocation, rescission, cancellation, termination, or any other legal or
equitable action to disrupt the quiet enjoyment of the Work by the public
as contemplated by Affirmer's express Statement of Purpose.
3. Public License Fallback. Should any part of the Waiver for any reason
be judged legally invalid or ineffective under applicable law, then the
Waiver shall be preserved to the maximum extent permitted taking into
account Affirmer's express Statement of Purpose. In addition, to the
extent the Waiver is so judged Affirmer hereby grants to each affected
person a royalty-free, non transferable, non sublicensable, non exclusive,
irrevocable and unconditional license to exercise Affirmer's Copyright and
Related Rights in the Work (i) in all territories worldwide, (ii) for the
maximum duration provided by applicable law or treaty (including future
time extensions), (iii) in any current or future medium and for any number
of copies, and (iv) for any purpose whatsoever, including without
limitation commercial, advertising or promotional purposes (the
"License"). The License shall be deemed effective as of the date CC0 was
applied by Affirmer to the Work. Should any part of the License for any
reason be judged legally invalid or ineffective under applicable law, such
partial invalidity or ineffectiveness shall not invalidate the remainder
of the License, and in such case Affirmer hereby affirms that he or she
will not (i) exercise any of his or her remaining Copyright and Related
Rights in the Work or (ii) assert any associated claims and causes of
action with respect to the Work, in either case contrary to Affirmer's
express Statement of Purpose.
4. Limitations and Disclaimers.
a. No trademark or patent rights held by Affirmer are waived, abandoned,
surrendered, licensed or otherwise affected by this document.
b. Affirmer offers the Work as-is and makes no representations or
warranties of any kind concerning the Work, express, implied,
statutory or otherwise, including without limitation warranties of
title, merchantability, fitness for a particular purpose, non
infringement, or the absence of latent or other defects, accuracy, or
the present or absence of errors, whether or not discoverable, all to
the greatest extent permissible under applicable law.
c. Affirmer disclaims responsibility for clearing rights of other persons
that may apply to the Work or any use thereof, including without
limitation any person's Copyright and Related Rights in the Work.
Further, Affirmer disclaims responsibility for obtaining any necessary
consents, permissions or other rights required for any use of the
Work.
d. Affirmer understands and acknowledges that Creative Commons is not a
party to this document and has no duty or obligation with respect to
this CC0 or use of the Work.

View File

@ -0,0 +1,11 @@
zlib License
This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.

View File

@ -0,0 +1,458 @@
// =============================================================================
// audioCallback() is roughly based on
// https://github.com/Kode/Kha/blob/master/Sources/kha/audio2/Audio1.hx
// =============================================================================
package aura;
import haxe.Exception;
import haxe.ds.Vector;
import kha.Assets;
import kha.SystemImpl;
import kha.arrays.Float32Array;
import aura.channels.Html5StreamChannel;
import aura.channels.MixChannel;
import aura.channels.UncompBufferChannel;
import aura.channels.UncompBufferResamplingChannel;
import aura.channels.StreamChannel;
import aura.format.mhr.MHRReader;
import aura.threading.BufferCache;
import aura.types.AudioBuffer;
import aura.types.HRTF;
import aura.utils.Assert;
import aura.utils.BufferUtils.clearBuffer;
import aura.utils.MathUtils;
import aura.utils.Profiler;
import aura.utils.Pointer;
// Convenience typedefs to auto-import them with this module
typedef BaseChannelHandle = aura.channels.BaseChannel.BaseChannelHandle;
typedef UncompBufferChannelHandle = aura.channels.UncompBufferChannel.UncompBufferChannelHandle;
typedef MixChannelHandle = aura.channels.MixChannel.MixChannelHandle;
@:access(aura.channels.MixChannelHandle)
class Aura {
public static var options(default, null): Null<AuraOptions> = null;
public static var sampleRate(default, null): Int;
public static var lastBufferSize(default, null): Int = 0;
public static var listener: Listener;
public static final mixChannels = new Map<String, MixChannelHandle>();
public static var masterChannel(default, null): MixChannelHandle;
static inline var BLOCK_SIZE = 1024;
static inline var NUM_OUTPUT_CHANNELS = 2;
static inline var BLOCK_CHANNEL_SIZE = Std.int(BLOCK_SIZE / NUM_OUTPUT_CHANNELS);
static var p_samplesBuffer = new Pointer<Float32Array>(null);
static var blockBuffer = new AudioBuffer(NUM_OUTPUT_CHANNELS, BLOCK_CHANNEL_SIZE);
static var blockBufPos = 0;
static final hrtfs = new Map<String, HRTF>();
public static function init(?options: AuraOptions) {
sampleRate = kha.audio2.Audio.samplesPerSecond;
assert(Critical, sampleRate != 0, "sampleRate must not be 0!");
Aura.options = AuraOptions.addDefaults(options);
@:privateAccess MixChannel.channelSize = Aura.options.channelSize;
listener = new Listener();
BufferCache.init();
// Create a few preconfigured mix channels
masterChannel = createMixChannel("master");
createMixChannel("music").setMixChannel(masterChannel);
createMixChannel("fx").setMixChannel(masterChannel);
#if (kha_html5 || kha_debug_html5)
if (kha.SystemImpl.mobile) {
// kha.js.MobileWebAudio doesn't support a custom audio callback, so
// manually synchronize all tracks here (note that because of this
// limitation there are no insert effects supported for mobile audio)
kha.Scheduler.addTimeTask(masterChannel.getMixChannel().synchronize, 0, 1/60);
}
else {
#end
kha.audio2.Audio.audioCallback = audioCallback;
#if (kha_html5 || kha_debug_html5)
}
#end
#if (kha_html5 || kha_debug_html5)
// Check if virtual html5 stream channels can be made physical
kha.Scheduler.addBreakableTimeTask(() -> {
if (kha.SystemImpl.mobileAudioPlaying) {
Html5StreamChannel.makeChannelsPhysical();
return BreakTask;
}
return ContinueTask;
}, 0, 1/60);
#end
kha.System.notifyOnApplicationState(null, null, null, null, () -> {
Profiler.shutdown();
});
}
/**
Load all assets listed in the given `loadConfig`.
If all assets are loaded successfully, `done` is called.
For each asset that fails to be loaded, `failed` is called if it
is passed to this function.
If `onProgress` is passed to this function, it is called for each
successfully loaded asset with the number of successfully loaded assets
so far including the current asset (first parameter), the number
of assets in the `loadConfig` (second parameter), as well as the name
of the current asset (third parameter).
**/
public static function loadAssets(loadConfig: AuraLoadConfig, done: Void->Void, ?failed: Void->Void, ?onProgress:Int->Int->String->Void) {
final length = loadConfig.getEntryCount();
var count = 0;
for (soundName in loadConfig.compressed) {
if (!doesSoundExist(soundName)) {
onLoadingError(null, failed, soundName);
continue;
}
Assets.loadSound(soundName, (sound: kha.Sound) -> {
#if !kha_krom // Krom only uses uncompressedData
if (sound.compressedData == null) {
throw 'Cannot compress already uncompressed sound ${soundName}!';
}
#end
count++;
if (onProgress != null) {
onProgress(count, length, soundName);
}
if (count == length) {
done();
return;
}
}, (error: kha.AssetError) -> { onLoadingError(error, failed, soundName); });
}
for (soundName in loadConfig.uncompressed) {
if (!doesSoundExist(soundName)) {
onLoadingError(null, failed, soundName);
continue;
}
Assets.loadSound(soundName, (sound: kha.Sound) -> {
if (sound.uncompressedData == null) {
sound.uncompress(() -> {
count++;
if (onProgress != null) {
onProgress(count, length, soundName);
}
if (count == length) {
done();
return;
}
});
}
else {
count++;
if (onProgress != null) {
onProgress(count, length, soundName);
}
if (count == length) {
done();
return;
}
}
}, (error: kha.AssetError) -> { onLoadingError(error, failed, soundName); });
}
for (hrtfName in loadConfig.hrtf) {
if (!doesBlobExist(hrtfName)) {
onLoadingError(null, failed, hrtfName);
continue;
}
Assets.loadBlob(hrtfName, (blob: kha.Blob) -> {
var hrtf: HRTF;
try {
hrtf = MHRReader.read(blob.toBytes());
}
catch (e: Exception) {
trace('Could not load hrtf $hrtfName: ${e.details()}');
if (failed != null) {
failed();
}
return;
}
hrtfs[hrtfName] = hrtf;
count++;
if (onProgress != null) {
onProgress(count, length, hrtfName);
}
if (count == length) {
done();
return;
}
}, (error: kha.AssetError) -> { onLoadingError(error, failed, hrtfName); });
}
}
static function onLoadingError(error: Null<kha.AssetError>, failed: Null<Void->Void>, assetName: String) {
final errorInfo = error == null ? "" : "\nOriginal error: " + error.url + "..." + error.error;
trace(
'Could not load asset "$assetName", make sure that all assets are named\n'
+ " correctly and that they are included in the khafile.js."
+ errorInfo
);
if (failed != null) {
failed();
}
}
/**
Returns whether a sound exists and can be loaded.
**/
public static inline function doesSoundExist(soundName: String): Bool {
// Use reflection instead of Asset.sounds.get() to prevent errors on
// static targets. A sound's description is the sound's entry in
// files.json and not a kha.Sound, but get() returns a sound which would
// lead to a invalid cast exception.
// Relying on Kha internals ("Description" as name) is bad, but there is
// no good alternative...
return Reflect.field(Assets.sounds, soundName + "Description") != null;
}
/**
Returns whether a blob exists and can be loaded.
**/
public static inline function doesBlobExist(blobName: String): Bool {
return Reflect.field(Assets.blobs, blobName + "Description") != null;
}
public static inline function getSound(soundName: String): Null<kha.Sound> {
return Assets.sounds.get(soundName);
}
public static inline function getHRTF(hrtfName: String): Null<HRTF> {
return hrtfs.get(hrtfName);
}
/**
Create a new audio channel to play an uncompressed and pre-loaded sound, and return a main-thread handle object to the newly created channel.
The playback of the newly created channel does not start automatically.
@param sound The _uncompressed_ sound to play by the created channel
@param loop Whether to loop the playback of the channel
@param mixChannelHandle (Optional) A handle for the `MixChannel`
to which to route the audio output of the newly created channel.
If the parameter is `null` (default), route the channel's output
to the master channel
@return A main-thread handle to the newly created channel, or `null`
if the created channel could not be assigned to the given mix channel
(e.g. in case of circular dependencies)
**/
public static function createUncompBufferChannel(sound: kha.Sound, loop: Bool = false, mixChannelHandle: Null<MixChannelHandle> = null): Null<UncompBufferChannelHandle> {
assert(Critical, sound.uncompressedData != null,
"Cannot play a sound with no uncompressed data. Make sure to load it as 'uncompressed' in the AuraLoadConfig."
);
if (mixChannelHandle == null) {
mixChannelHandle = masterChannel;
}
// TODO: Like Kha, only use resampling channel if pitch is used or if samplerate of sound and system differs
final newChannel = new UncompBufferResamplingChannel(sound.uncompressedData, loop, sound.sampleRate);
final handle = new UncompBufferChannelHandle(newChannel);
final foundChannel = handle.setMixChannel(mixChannelHandle);
return foundChannel ? handle : null;
}
/**
Create a new audio channel to play a compressed and pre-loaded sound, and return a main-thread handle object to the newly created channel.
The playback of the newly created channel does not start automatically.
@param sound The _compressed_ sound to play by the created channel
@param loop Whether to loop the playback of the channel
@param mixChannelHandle (Optional) A handle for the `MixChannel`
to which to route the audio output of the newly created channel.
If the parameter is `null` (default), route the channel's output
to the master channel
@return A main-thread handle to the newly created channel, or `null`
if the created channel could not be assigned to the given mix channel
(e.g. in case of circular dependencies)
**/
public static function createCompBufferChannel(sound: kha.Sound, loop: Bool = false, mixChannelHandle: Null<MixChannelHandle> = null): Null<BaseChannelHandle> {
#if kha_krom
// Krom only uses uncompressedData -> no streaming
return createUncompBufferChannel(sound, loop, mixChannelHandle);
#end
assert(Critical, sound.compressedData != null,
"Cannot stream a sound with no compressed data. Make sure to load it as 'compressed' in the AuraLoadConfig."
);
if (mixChannelHandle == null) {
mixChannelHandle = masterChannel;
}
#if (kha_html5 || kha_debug_html5)
final newChannel = kha.SystemImpl.mobile ? new Html5MobileStreamChannel(sound, loop) : new Html5StreamChannel(sound, loop);
#else
final khaChannel: Null<kha.audio1.AudioChannel> = kha.audio2.Audio1.stream(sound, loop);
if (khaChannel == null) {
return null;
}
final newChannel = new StreamChannel(cast khaChannel);
newChannel.stop();
#end
final handle = new BaseChannelHandle(newChannel);
final foundChannel = handle.setMixChannel(mixChannelHandle);
return foundChannel ? handle : null;
}
/**
Create a `MixChannel` to control a group of other channels together.
@param name Optional name. If not empty, the name can be used later to
retrieve the channel handle via `Aura.mixChannels[name]`.
**/
public static inline function createMixChannel(name: String = ""): MixChannelHandle {
final handle = new MixChannelHandle(new MixChannel());
if (name != "") {
assert(Error, !mixChannels.exists(name), 'MixChannel with name $name already exists!');
mixChannels[name] = handle;
#if AURA_DEBUG
handle.name = name;
#end
}
return handle;
}
/**
Mixes all sub channels and sounds in this channel together.
Based on `kha.audio2.Audio1.mix()`.
@param samplesBox Wrapper that holds the amount of requested samples.
@param buffer The buffer into which to write the output samples.
**/
static function audioCallback(samplesBox: kha.internal.IntBox, buffer: kha.audio2.Buffer): Void {
Profiler.frame("AudioCallback");
Time.update();
final samplesRequested = samplesBox.value;
Aura.lastBufferSize = samplesRequested;
if (!BufferCache.getBuffer(TFloat32Array, p_samplesBuffer, 1, samplesRequested)) {
for (_ in 0...samplesRequested) {
buffer.data.set(buffer.writeLocation, 0);
buffer.writeLocation += 1;
if (buffer.writeLocation >= buffer.size) {
buffer.writeLocation = 0;
}
}
return;
}
// At this point we can be sure that sampleCache is not null
final sampleCache = p_samplesBuffer.get();
// Copy reference to masterChannel for some more thread safety.
// TODO: Investigate if other solutions are required here
var master: MixChannel = masterChannel.getMixChannel();
master.synchronize();
clearBuffer(sampleCache);
var samplesWritten = 0;
// The blockBuffer still has some values from the last audioCallback
// invocation that haven't been written to the sampleCache yet
if (blockBufPos != 0) {
final samplesToWrite = minI(samplesRequested, BLOCK_SIZE - blockBufPos);
blockBuffer.interleaveToFloat32Array(sampleCache, Std.int(blockBufPos / NUM_OUTPUT_CHANNELS), 0, Std.int(samplesToWrite / NUM_OUTPUT_CHANNELS));
samplesWritten += samplesToWrite;
blockBufPos += samplesToWrite;
if (blockBufPos >= BLOCK_SIZE) {
blockBufPos = 0;
}
}
while (samplesWritten < samplesRequested) {
master.nextSamples(blockBuffer, buffer.samplesPerSecond);
final samplesStillWritable = minI(samplesRequested - samplesWritten, BLOCK_SIZE);
blockBuffer.interleaveToFloat32Array(sampleCache, 0, samplesWritten, Std.int(samplesStillWritable / NUM_OUTPUT_CHANNELS));
samplesWritten += samplesStillWritable;
blockBufPos += samplesStillWritable;
if (blockBufPos >= BLOCK_SIZE) {
blockBufPos = 0;
}
}
for (i in 0...samplesRequested) {
// Write clamped samples to final buffer
buffer.data.set(buffer.writeLocation, maxF(minF(sampleCache[i], 1.0), -1.0));
buffer.writeLocation += 1;
if (buffer.writeLocation >= buffer.size) {
buffer.writeLocation = 0;
}
}
#if AURA_BENCHMARK
Time.endOfFrame();
#end
}
}
@:allow(aura.Aura)
@:structInit
class AuraLoadConfig {
public final compressed: Array<String> = [];
public final uncompressed: Array<String> = [];
public final hrtf: Array<String> = [];
inline function getEntryCount(): Int {
return compressed.length + uncompressed.length + hrtf.length;
}
}
@:structInit
class AuraOptions {
@:optional public var channelSize: Null<Int>;
public static function addDefaults(options: Null<AuraOptions>) {
if (options == null) { options = {}; }
if (options.channelSize == null) { options.channelSize = 16; }
return options;
}
}
private enum abstract BreakableTaskStatus(Bool) to Bool {
var BreakTask = false;
var ContinueTask = true;
}

View File

@ -0,0 +1,96 @@
package aura;
import aura.math.Vec3;
@:allow(aura.Handle)
@:allow(aura.dsp.panner.Panner)
class Listener {
public var location(default, null): Vec3;
public var look(default, null): Vec3;
public var right(default, null): Vec3;
var velocity: Vec3;
public function new() {
this.location = new Vec3(0, 0, 0);
this.velocity = new Vec3(0, 0, 0);
this.look = new Vec3(0, 1, 0);
this.right = new Vec3(1, 0, 0);
}
/**
Set the listener's view direction. `look` points directly in the view
direction, `right` is perpendicular to `look` and is used internally to
get the sign of the angle between a channel and the listener.
Both parameters must be normalized.
**/
public inline function setViewDirection(look: Vec3, right: Vec3) {
assert(Debug, look.length == 1 && right.length == 1);
this.look.setFrom(look);
this.right.setFrom(right);
}
/**
Set the location of this listener in world space.
Calling this function also sets the listener's velocity if the call
to this function is not the first call for this listener. This behavior
avoids audible "jumps" in the audio output for initial placement
of objects if they are far away from the origin.
**/
public function setLocation(location: Vec3) {
final time = Time.getTime();
final timeDeltaLastCall = time - _setLocation_lastCallTime;
// If the last time setLocation() was called was at an earlier time step
if (timeDeltaLastCall > 0) {
_setLocation_lastLocation.setFrom(this.location);
_setLocation_lastVelocityUpdateTime = _setLocation_lastCallTime;
}
final timeDeltaVelocityUpdate = time - _setLocation_lastVelocityUpdateTime;
this.location.setFrom(location);
if (!_setLocation_initializedLocation) {
_setLocation_initializedLocation = true;
}
else if (timeDeltaVelocityUpdate > 0) {
velocity.setFrom(location.sub(_setLocation_lastLocation).mult(1 / timeDeltaVelocityUpdate));
}
_setLocation_lastCallTime = time;
}
var _setLocation_initializedLocation = false;
var _setLocation_lastLocation: Vec3 = new Vec3(0, 0, 0);
var _setLocation_lastCallTime: Float = 0.0;
var _setLocation_lastVelocityUpdateTime: Float = 0.0;
/**
Wrapper around `setViewDirection()` and `setLocation()`.
**/
public function set(location: Vec3, look: Vec3, right: Vec3) {
inline setViewDirection(look, right);
inline setLocation(location);
}
/**
Resets the location, direction and velocity of the listener to their
default values.
**/
public inline function reset() {
this.location.setFrom(new Vec3(0, 0, 0));
this.velocity.setFrom(new Vec3(0, 0, 0));
this._setLocation_initializedLocation = false;
this._setLocation_lastLocation.setFrom(new Vec3(0, 0, 0));
this._setLocation_lastVelocityUpdateTime = Time.getTime();
this.look.setFrom(new Vec3(0, 1, 0));
this.right.setFrom(new Vec3(1, 0, 0));
}
}

View File

@ -0,0 +1,61 @@
package aura;
import kha.Scheduler;
import aura.threading.BufferCache;
class Time {
public static var lastTime(default, null): Float = 0.0;
public static var delta(default, null): Float = 0.0;
#if AURA_UNIT_TESTS
public static var overrideTime: Null<Float> = null;
#end
#if AURA_BENCHMARK
public static var times: Array<Float>;
static var benchmarkStarted = false;
static var currentIteration = 0;
static var numIterations = 0;
static var onBenchmarkDone: Array<Float>->Void;
#end
public static inline function getTime():Float {
#if AURA_UNIT_TESTS
if (overrideTime != null) {
return overrideTime;
}
#end
return Scheduler.realTime();
}
public static inline function update() {
delta = getTime() - lastTime;
lastTime = getTime();
BufferCache.updateTimer();
}
#if AURA_BENCHMARK
public static inline function endOfFrame() {
if (benchmarkStarted) {
times[currentIteration] = Scheduler.realTime() - lastTime;
currentIteration++;
if (currentIteration == numIterations) {
onBenchmarkDone(times);
benchmarkStarted = false;
currentIteration = 0;
}
}
}
public static function startBenchmark(numIterations: Int, onBenchmarkDone: Array<Float>->Void) {
Time.numIterations = numIterations;
Time.onBenchmarkDone = onBenchmarkDone;
times = new Array();
times.resize(numIterations);
benchmarkStarted = true;
}
#end
}

View File

@ -0,0 +1,105 @@
package aura;
import aura.utils.MathUtils.clampF;
/**
Integer representing a Hertz value.
**/
typedef Hertz = Int;
/**
Float representing milliseconds.
**/
typedef Millisecond = Float;
enum abstract Channels(Int) {
var Left = 1 << 0;
var Right = 1 << 1;
var All = ~0;
public inline function matches(mask: Channels): Bool {
return (this & mask.asInt()) != 0;
}
public inline function matchesIndex(index: Int): Bool {
return ((1 << index) & this) != 0;
}
inline function asInt(): Int {
return this;
}
}
abstract Balance(Float) from Float to Float {
public static inline var LEFT: Balance = 0.0;
public static inline var CENTER: Balance = 0.5;
public static inline var RIGHT: Balance = 1.0;
inline function new(value: Float) {
this = clampF(value);
}
@:from public static inline function fromAngle(angle: Angle): Balance {
return switch (angle) {
case Deg(deg): (deg + 90) / 180;
case Rad(rad): (rad + Math.PI / 2) / Math.PI;
}
}
@:op(~A) public function invert() {
return 1.0 - this;
}
}
enum Angle {
Deg(deg: Int);
Rad(rad: Float);
}
#if cpp
@:forward
@:forwardStatics
abstract AtomicInt(cpp.AtomicInt) from Int to Int {
public inline function toPtr(): cpp.Pointer<cpp.AtomicInt> {
final val: cpp.AtomicInt = this; // For some reason, this line is required for correct codegen...
return cpp.Pointer.addressOf(val);
}
}
#else
typedef AtomicInt = Int;
#end
#if (haxe_ver >= 4.3 && hl_ver >= version("1.13.0") && !js)
typedef AtomicBool = haxe.atomic.AtomicBool;
#else
@:forward
@:forwardStatics
abstract AtomicBool({val: Bool}) { // We need indirection via struct here to not run into compile issues with `this`
public inline function new(value: Bool) {
this = {val: value};
}
public inline function compareExchange(expected: Bool, replacement: Bool): Bool {
final orig = this.val;
if (orig == expected) {
this.val = replacement;
}
return orig;
}
public inline function exchange(value: Bool): Bool {
final orig = this.val;
this.val = value;
return orig;
}
public inline function load(): Bool {
return this.val;
}
public inline function store(value: Bool): Bool {
return this.val = value;
}
}
#end

View File

@ -0,0 +1,244 @@
package aura.channels;
import aura.channels.MixChannel.MixChannelHandle;
import aura.dsp.DSP;
import aura.dsp.panner.Panner;
import aura.threading.Fifo;
import aura.threading.Message;
import aura.types.AudioBuffer;
import aura.utils.Interpolator.LinearInterpolator;
import aura.utils.MathUtils;
/**
Main-thread handle to an audio channel in the audio thread.
**/
@:access(aura.channels.BaseChannel)
@:allow(aura.dsp.panner.Panner)
class BaseChannelHandle {
/**
Whether the playback of the handle's channel is currently paused.
**/
public var paused(get, never): Bool;
inline function get_paused(): Bool { return channel.paused; }
/**
Whether the playback of the handle's channel has finished.
On `MixerChannel`s this value is always `false`.
**/
public var finished(get, never): Bool;
inline function get_finished(): Bool { return channel.finished; }
public var panner(get, null): Null<Panner>;
inline function get_panner(): Null<Panner> { return channel.panner; }
/**
Link to the audio channel in the audio thread.
**/
final channel: BaseChannel;
var parentHandle: Null<MixChannelHandle> = null;
// Parameter cache for getter functions
var _volume: Float = 1.0;
var _pitch: Float = 1.0;
public inline function new(channel: BaseChannel) {
this.channel = channel;
}
/**
Starts the playback. If the sound wasn't played before or was stopped,
the playback starts from the beginning. If it is paused, playback starts
from the position where it was paused.
@param retrigger Controls the behaviour if the sound is already playing.
If true, restart playback from the beginning, else do nothing.
**/
public inline function play(retrigger = false) {
channel.sendMessage({ id: ChannelMessageID.Play, data: retrigger });
}
public inline function pause() {
channel.sendMessage({ id: ChannelMessageID.Pause, data: null });
}
public inline function stop() {
channel.sendMessage({ id: ChannelMessageID.Stop, data: null });
}
public inline function addInsert(insert: DSP): DSP {
return channel.addInsert(insert);
}
public inline function removeInsert(insert: DSP) {
channel.removeInsert(insert);
}
/**
Set the mix channel into which this channel routes its output.
Returns `true` if setting the mix channel was successful and `false` if
there would be a circular dependency or the amount of input channels of
the mix channel is already maxed out.
**/
public function setMixChannel(mixChannelHandle: MixChannelHandle): Bool {
if (mixChannelHandle == parentHandle) {
return true;
}
if (parentHandle != null) {
@:privateAccess parentHandle.removeInputChannel(this);
parentHandle = null;
}
if (mixChannelHandle == null) {
return true;
}
// Return false for circular references (including mixChannelHandle == this)
var curHandle = mixChannelHandle;
while (curHandle != null) {
if (curHandle == this) {
return false;
}
curHandle = curHandle.parentHandle;
}
final success = @:privateAccess mixChannelHandle.addInputChannel(this);
if (success) {
parentHandle = mixChannelHandle;
} else {
parentHandle = null;
}
return success;
}
public inline function setVolume(volume: Float) {
assert(Critical, volume >= 0, "Volume value must not be a negative number!");
channel.sendMessage({ id: ChannelMessageID.PVolume, data: maxF(0.0, volume) });
this._volume = volume;
}
public inline function getVolume(): Float {
return this._volume;
}
public inline function setPitch(pitch: Float) {
assert(Critical, pitch > 0, "Pitch value must be a positive number!");
channel.sendMessage({ id: ChannelMessageID.PPitch, data: maxF(0.0, pitch) });
this._pitch = pitch;
}
public inline function getPitch(): Float {
return this._pitch;
}
#if AURA_DEBUG
public function getDebugAttrs(): Map<String, String> {
return ["In use" => Std.string(@:privateAccess channel.isPlayable())];
}
#end
}
/**
Base class of all audio channels in the audio thread.
**/
@:allow(aura.Aura)
@:access(aura.dsp.DSP)
@:allow(aura.dsp.panner.Panner)
@:access(aura.dsp.panner.Panner)
abstract class BaseChannel {
final messages: Fifo<Message> = new Fifo();
final inserts: Array<DSP> = [];
var panner: Null<Panner> = null;
// Parameters
final pVolume = new LinearInterpolator(1.0);
final pDopplerRatio = new LinearInterpolator(1.0);
final pDstAttenuation = new LinearInterpolator(1.0);
var treeLevel(default, null): Int = 0;
var paused: Bool = false;
var finished: Bool = true;
abstract function nextSamples(requestedSamples: AudioBuffer, sampleRate: Hertz): Void;
abstract function play(retrigger: Bool): Void;
abstract function pause(): Void;
abstract function stop(): Void;
function isPlayable(): Bool {
return !paused && !finished;
}
function setTreeLevel(level: Int) {
this.treeLevel = level;
}
inline function processInserts(buffer: AudioBuffer) {
for (insert in inserts) {
if (insert.bypass) { continue; }
insert.process(buffer);
}
if (panner != null) {
panner.process(buffer);
}
}
inline function addInsert(insert: DSP): DSP {
assert(Critical, !insert.inUse, "DSP objects can only belong to one unique channel");
insert.inUse = true;
inserts.push(insert);
return insert;
}
inline function removeInsert(insert: DSP) {
var found = inserts.remove(insert);
if (found) {
insert.inUse = false;
}
}
function synchronize() {
var message: Null<Message>;
while ((message = messages.tryPop()) != null) {
parseMessage(message);
}
for (insert in inserts) {
insert.synchronize();
}
if (panner != null) {
panner.synchronize();
}
}
function parseMessage(message: Message) {
switch (message.id) {
case ChannelMessageID.Play: play(cast message.data);
case ChannelMessageID.Pause: pause();
case ChannelMessageID.Stop: stop();
case ChannelMessageID.PVolume: pVolume.targetValue = cast message.data;
case ChannelMessageID.PDopplerRatio: pDopplerRatio.targetValue = cast message.data;
case ChannelMessageID.PDstAttenuation: pDstAttenuation.targetValue = cast message.data;
default:
}
}
inline function sendMessage(message: Message) {
messages.add(message);
}
}
enum abstract AttenuationMode(Int) {
var Linear;
var Inverse;
var Exponential;
}

View File

@ -0,0 +1,192 @@
package aura.channels;
#if (kha_html5 || kha_debug_html5)
import js.Browser;
import js.html.AudioElement;
import js.html.URL;
import kha.SystemImpl;
import kha.js.MobileWebAudioChannel;
import aura.threading.Message;
import aura.types.AudioBuffer;
/**
Channel dedicated for streaming playback on html5.
Because most browsers don't allow audio playback before the user has
interacted with the website or canvas at least once, we can't always play
audio without causing an exception. In order to not cause chaos with sounds
playing at wrong times, sounds are virtualized before they can actually be
played. This means that their playback position is tracked and as soon as
the user interacts with the web page, the audio starts playing at the
correct position as if the sound would be playing all the time since it was
started.
Note that on mobile browsers the `aura.channels.Html5MobileStreamChannel` is
used instead.
**/
class Html5StreamChannel extends BaseChannel {
static final virtualChannels: Array<Html5StreamChannel> = [];
final audioElement: AudioElement;
var virtualPosition: Float;
var lastUpdateTime: Float;
public function new(sound: kha.Sound, loop: Bool) {
audioElement = Browser.document.createAudioElement();
final mimeType = #if kha_debug_html5 "audio/ogg" #else "audio/mp4" #end;
final blob = new js.html.Blob([sound.compressedData.getData()], {type: mimeType});
// TODO: if removing channels, use revokeObjectUrl() ?
// see https://developer.mozilla.org/en-US/docs/Web/API/URL/createObjectURL
audioElement.src = URL.createObjectURL(blob);
audioElement.loop = loop;
if (isVirtual()) {
virtualChannels.push(this);
}
}
inline function isVirtual(): Bool {
return !SystemImpl.mobileAudioPlaying;
}
@:allow(aura.Aura)
static function makeChannelsPhysical() {
for (channel in virtualChannels) {
channel.updateVirtualPosition();
channel.audioElement.currentTime = channel.virtualPosition;
if (!channel.finished && !channel.paused) {
channel.audioElement.play();
}
}
virtualChannels.resize(0);
}
inline function updateVirtualPosition() {
final now = kha.Scheduler.realTime();
if (finished) {
virtualPosition = 0;
}
else if (!paused) {
virtualPosition += now - lastUpdateTime;
while (virtualPosition > audioElement.duration) {
virtualPosition -= audioElement.duration;
}
}
lastUpdateTime = now;
}
public function play(retrigger: Bool) {
if (isVirtual()) {
updateVirtualPosition();
if (retrigger) {
virtualPosition = 0;
}
}
else {
audioElement.play();
if (retrigger) {
audioElement.currentTime = 0;
}
}
paused = false;
finished = false;
}
public function pause() {
if (isVirtual()) {
updateVirtualPosition();
}
else {
audioElement.pause();
}
paused = true;
}
public function stop() {
if (isVirtual()) {
updateVirtualPosition();
}
else {
audioElement.pause();
audioElement.currentTime = 0;
}
finished = true;
}
function nextSamples(requestedSamples: AudioBuffer, sampleRate: Hertz) {}
override function parseMessage(message: Message) {
switch (message.id) {
// Because we're using a HTML implementation here, we cannot use the
// LinearInterpolator parameters
case ChannelMessageID.PVolume: audioElement.volume = cast message.data;
case ChannelMessageID.PPitch:
case ChannelMessageID.PDopplerRatio:
case ChannelMessageID.PDstAttenuation:
default:
super.parseMessage(message);
}
}
}
/**
Wrapper around kha.js.MobileWebAudioChannel.
See https://github.com/Kode/Kha/issues/299 and
https://github.com/Kode/Kha/commit/12494b1112b64e4286b6a2fafc0f08462c1e7971
**/
class Html5MobileStreamChannel extends BaseChannel {
final khaChannel: kha.js.MobileWebAudioChannel;
public function new(sound: kha.Sound, loop: Bool) {
khaChannel = new kha.js.MobileWebAudioChannel(cast sound, loop);
}
public function play(retrigger: Bool) {
if (retrigger) {
khaChannel.position = 0;
}
khaChannel.play();
paused = false;
finished = false;
}
public function pause() {
khaChannel.pause();
paused = true;
}
public function stop() {
khaChannel.stop();
finished = true;
}
function nextSamples(requestedSamples: AudioBuffer, sampleRate: Hertz) {}
override function parseMessage(message: Message) {
switch (message.id) {
// Because we're using a HTML implementation here, we cannot use the
// LinearInterpolator parameters
case ChannelMessageID.PVolume: khaChannel.volume = cast message.data;
case ChannelMessageID.PPitch:
case ChannelMessageID.PDopplerRatio:
case ChannelMessageID.PDstAttenuation:
default:
super.parseMessage(message);
}
}
}
#end

View File

@ -0,0 +1,299 @@
package aura.channels;
import haxe.ds.Vector;
#if cpp
import sys.thread.Mutex;
#end
import aura.channels.BaseChannel.BaseChannelHandle;
import aura.threading.BufferCache;
import aura.threading.Message;
import aura.types.AudioBuffer;
import aura.utils.Profiler;
/**
Main-thread handle to a `MixChannel` in the audio thread.
**/
class MixChannelHandle extends BaseChannelHandle {
#if AURA_DEBUG
public var name: String = "";
public var inputHandles: Array<BaseChannelHandle> = new Array();
#end
public inline function getNumInputs(): Int {
return getMixChannel().getNumInputs();
}
/**
Adds an input channel. Returns `true` if adding the channel was
successful, `false` if the amount of input channels is already maxed
out.
**/
inline function addInputChannel(channelHandle: BaseChannelHandle): Bool {
assert(Error, channelHandle != null, "channelHandle must not be null");
final foundChannel = getMixChannel().addInputChannel(channelHandle.channel);
#if AURA_DEBUG
if (foundChannel) inputHandles.push(channelHandle);
#end
return foundChannel;
}
/**
Removes an input channel from this `MixChannel`.
**/
inline function removeInputChannel(channelHandle: BaseChannelHandle) {
#if AURA_DEBUG
inputHandles.remove(channelHandle);
#end
getMixChannel().removeInputChannel(channelHandle.channel);
}
inline function getMixChannel(): MixChannel {
return cast this.channel;
}
#if AURA_DEBUG
public override function getDebugAttrs(): Map<String, String> {
return super.getDebugAttrs().mergeIntoThis([
"Name" => name,
"Num inserts" => Std.string(@:privateAccess channel.inserts.length),
]);
}
#end
}
/**
A channel that mixes together the output of multiple input channels.
**/
@:access(aura.dsp.DSP)
class MixChannel extends BaseChannel {
#if cpp
static var mutex: Mutex = new Mutex();
#end
/**
The amount of inputs a MixChannel can hold. Set this value via
`Aura.init(channelSize)`.
**/
static var channelSize: Int;
var inputChannels: Vector<BaseChannel>;
var numUsedInputs: Int = 0;
/**
Temporary copy of inputChannels for thread safety.
**/
var inputChannelsCopy: Vector<BaseChannel>;
public function new() {
inputChannels = new Vector<BaseChannel>(channelSize);
// Make sure super.isPlayable() is true until we find better semantics
// for MixChannel.play()/pause()/stop()
this.finished = false;
}
/**
Adds an input channel. Returns `true` if adding the channel was
successful, `false` if the amount of input channels is already maxed
out.
**/
public function addInputChannel(channel: BaseChannel): Bool {
var foundChannel = false;
#if cpp
mutex.acquire();
#end
for (i in 0...MixChannel.channelSize) {
if (inputChannels[i] == null) { // || inputChannels[i].finished) {
inputChannels[i] = channel;
numUsedInputs++;
channel.setTreeLevel(this.treeLevel + 1);
foundChannel = true;
break;
}
}
updateChannelsCopy();
#if cpp
mutex.release();
#end
return foundChannel;
}
public function removeInputChannel(channel: BaseChannel) {
#if cpp
mutex.acquire();
#end
for (i in 0...MixChannel.channelSize) {
if (inputChannels[i] == channel) {
inputChannels[i] = null;
numUsedInputs--;
break;
}
}
updateChannelsCopy();
#if cpp
mutex.release();
#end
}
public inline function getNumInputs() {
return numUsedInputs;
}
/**
Copy the references to the inputs channels for thread safety. This
function does not acquire any additional mutexes.
@see `MixChannel.inputChannelsCopy`
**/
inline function updateChannelsCopy() {
inputChannelsCopy = inputChannels.copy();
// TODO: Streaming
// for (i in 0...channelCount) {
// internalStreamChannels[i] = streamChannels[i];
// }
}
override function isPlayable(): Bool {
// TODO: be more intelligent here and actually check inputs?
return super.isPlayable() && numUsedInputs != 0;
}
override function setTreeLevel(level: Int) {
this.treeLevel = level;
for (inputChannel in inputChannels) {
if (inputChannel != null) {
inputChannel.setTreeLevel(level + 1);
}
}
}
override function synchronize() {
for (inputChannel in inputChannels) {
if (inputChannel != null) {
inputChannel.synchronize();
}
}
super.synchronize();
}
function nextSamples(requestedSamples: AudioBuffer, sampleRate: Hertz): Void {
Profiler.event();
if (numUsedInputs == 0) {
requestedSamples.clear();
return;
}
final inputBuffer = BufferCache.getTreeBuffer(treeLevel, requestedSamples.numChannels, requestedSamples.channelLength);
if (inputBuffer == null) {
requestedSamples.clear();
return;
}
var first = true;
var foundPlayableInput = false;
for (channel in inputChannelsCopy) {
if (channel == null || !channel.isPlayable()) {
continue;
}
foundPlayableInput = true;
channel.nextSamples(inputBuffer, sampleRate);
if (first) {
// To prevent feedback loops, the input buffer has to be cleared
// before all inputs are added to it. To not waste calculations,
// we do not clear the buffer here but instead just override
// the previous sample cache.
for (i in 0...requestedSamples.rawData.length) {
requestedSamples.rawData[i] = inputBuffer.rawData[i];
}
first = false;
}
else {
for (i in 0...requestedSamples.rawData.length) {
requestedSamples.rawData[i] += inputBuffer.rawData[i];
}
}
}
// for (channel in internalStreamChannels) {
// if (channel == null || !channel.isPlayable())
// continue;
// foundPlayableInput = true;
// channel.nextSamples(inputBuffer, samples, buffer.samplesPerSecond);
// for (i in 0...samples) {
// sampleCacheAccumulated[i] += inputBuffer[i] * channel.volume;
// }
// }
if (!foundPlayableInput) {
// Didn't read from input channels, clear possible garbage values
requestedSamples.clear();
return;
}
// Apply volume of this channel
final stepVol = pVolume.getLerpStepSize(requestedSamples.channelLength);
for (c in 0...requestedSamples.numChannels) {
final channelView = requestedSamples.getChannelView(c);
for (i in 0...requestedSamples.channelLength) {
channelView[i] *= pVolume.currentValue;
pVolume.currentValue += stepVol;
}
pVolume.currentValue = pVolume.lastValue;
}
pVolume.updateLast();
processInserts(requestedSamples);
}
/**
Calls `play()` for all input channels.
**/
public function play(retrigger: Bool): Void {
for (inputChannel in inputChannels) {
if (inputChannel != null) {
inputChannel.play(retrigger);
}
}
}
/**
Calls `pause()` for all input channels.
**/
public function pause(): Void {
for (inputChannel in inputChannels) {
if (inputChannel != null) {
inputChannel.pause();
}
}
}
/**
Calls `stop()` for all input channels.
**/
public function stop(): Void {
for (inputChannel in inputChannels) {
if (inputChannel != null) {
inputChannel.stop();
}
}
}
}

View File

@ -0,0 +1,64 @@
package aura.channels;
import aura.utils.Pointer;
import kha.arrays.Float32Array;
import aura.threading.BufferCache;
import aura.threading.Message;
import aura.types.AudioBuffer;
/**
Wrapper around `kha.audio2.StreamChannel` (for now).
**/
class StreamChannel extends BaseChannel {
final khaChannel: kha.audio2.StreamChannel;
final p_khaBuffer = new Pointer<Float32Array>(null);
public function new(khaChannel: kha.audio2.StreamChannel) {
this.khaChannel = khaChannel;
}
public function play(retrigger: Bool) {
paused = false;
finished = false;
khaChannel.play();
if (retrigger) {
khaChannel.position = 0;
}
}
public function pause() {
paused = true;
khaChannel.pause();
}
public function stop() {
finished = true;
khaChannel.stop();
}
function nextSamples(requestedSamples: AudioBuffer, sampleRate: Hertz) {
if (!BufferCache.getBuffer(TFloat32Array, p_khaBuffer, 1, requestedSamples.numChannels * requestedSamples.channelLength)) {
requestedSamples.clear();
return;
}
final khaBuffer = p_khaBuffer.get();
khaChannel.nextSamples(khaBuffer, requestedSamples.channelLength, sampleRate);
requestedSamples.deinterleaveFromFloat32Array(khaBuffer, requestedSamples.numChannels);
}
override function parseMessage(message: Message) {
switch (message.id) {
// Because we're using a Kha implementation here, we cannot use the
// LinearInterpolator parameters
case ChannelMessageID.PVolume: khaChannel.volume = cast message.data;
case ChannelMessageID.PPitch:
case ChannelMessageID.PDopplerRatio:
case ChannelMessageID.PDstAttenuation:
default:
super.parseMessage(message);
}
}
}

View File

@ -0,0 +1,264 @@
package aura.channels;
import kha.arrays.Float32Array;
import aura.channels.BaseChannel.BaseChannelHandle;
import aura.dsp.sourcefx.SourceEffect;
import aura.utils.MathUtils;
import aura.threading.Message;
import aura.types.AudioBuffer;
// TODO make handle thread-safe!
@:access(aura.channels.UncompBufferChannel)
class UncompBufferChannelHandle extends BaseChannelHandle {
final _sourceEffects: Array<SourceEffect> = []; // main-thread twin of channel.sourceEffects. TODO investigate better solution
var _playbackDataLength = -1;
inline function getUncompBufferChannel(): UncompBufferChannel {
return cast this.channel;
}
/**
Return the sound's length in seconds.
**/
public inline function getLength(): Float {
return getUncompBufferChannel().data.channelLength / Aura.sampleRate;
}
/**
Return the channel's current playback position in seconds.
**/
public inline function getPlaybackPosition(): Float {
return getUncompBufferChannel().playbackPosition / Aura.sampleRate;
}
/**
Set the channel's current playback position in seconds.
**/
public inline function setPlaybackPosition(value: Float) {
final pos = Math.round(value * Aura.sampleRate);
getUncompBufferChannel().playbackPosition = clampI(pos, 0, getUncompBufferChannel().data.channelLength);
}
public function addSourceEffect(sourceEffect: SourceEffect) {
_sourceEffects.push(sourceEffect);
final playbackData = updatePlaybackBuffer();
getUncompBufferChannel().sendMessage({ id: UncompBufferChannelMessageID.AddSourceEffect, data: [sourceEffect, playbackData] });
}
public function removeSourceEffect(sourceEffect: SourceEffect) {
if (_sourceEffects.remove(sourceEffect)) {
final playbackData = updatePlaybackBuffer();
getUncompBufferChannel().sendMessage({ id: UncompBufferChannelMessageID.RemoveSourceEffect, data: [sourceEffect, playbackData] });
}
}
@:access(aura.dsp.sourcefx.SourceEffect)
function updatePlaybackBuffer(): Null<AudioBuffer> {
final data = getUncompBufferChannel().data;
var playbackData: Null<AudioBuffer> = null;
if (_sourceEffects.length == 0) {
playbackData = data;
}
else {
var requiredChannelLength = data.channelLength;
var prevChannelLength = data.channelLength;
for (sourceEffect in _sourceEffects) {
prevChannelLength = sourceEffect.calculateRequiredChannelLength(prevChannelLength);
requiredChannelLength = maxI(requiredChannelLength, prevChannelLength);
}
if (_playbackDataLength != requiredChannelLength) {
playbackData = new AudioBuffer(data.numChannels, requiredChannelLength);
_playbackDataLength = requiredChannelLength;
}
}
// if null -> no buffer to change in channel
return playbackData;
}
}
@:allow(aura.channels.UncompBufferChannelHandle)
class UncompBufferChannel extends BaseChannel {
public static inline var NUM_CHANNELS = 2;
final sourceEffects: Array<SourceEffect> = [];
var appliedSourceEffects = false;
/** The current playback position in samples. **/
var playbackPosition: Int = 0;
var looping: Bool = false;
/**
The original audio source data for this channel.
**/
final data: AudioBuffer;
/**
The audio data used for playback. This might be different than `this.data`
if this channel has `AudioSourceEffect`s assigned to it.
**/
var playbackData: AudioBuffer;
public function new(data: Float32Array, looping: Bool) {
this.data = this.playbackData = new AudioBuffer(2, Std.int(data.length / 2));
this.data.deinterleaveFromFloat32Array(data, 2);
this.looping = looping;
}
override function parseMessage(message: Message) {
switch (message.id) {
case UncompBufferChannelMessageID.AddSourceEffect:
final sourceEffect: SourceEffect = message.dataAsArrayUnsafe()[0];
final _playbackData = message.dataAsArrayUnsafe()[1];
if (_playbackData != null) {
playbackData = _playbackData;
}
addSourceEffect(sourceEffect);
case UncompBufferChannelMessageID.RemoveSourceEffect:
final sourceEffect: SourceEffect = message.dataAsArrayUnsafe()[0];
final _playbackData = message.dataAsArrayUnsafe()[1];
if (_playbackData != null) {
playbackData = _playbackData;
}
removeSourceEffect(sourceEffect);
default: super.parseMessage(message);
}
}
function nextSamples(requestedSamples: AudioBuffer, sampleRate: Hertz): Void {
assert(Critical, requestedSamples.numChannels == playbackData.numChannels);
final stepDopplerRatio = pDopplerRatio.getLerpStepSize(requestedSamples.channelLength);
final stepDstAttenuation = pDstAttenuation.getLerpStepSize(requestedSamples.channelLength);
final stepVol = pVolume.getLerpStepSize(requestedSamples.channelLength);
var samplesWritten = 0;
// As long as there are more samples requested
while (samplesWritten < requestedSamples.channelLength) {
// Check how many samples we can actually write
final samplesToWrite = minI(playbackData.channelLength - playbackPosition, requestedSamples.channelLength - samplesWritten);
for (c in 0...requestedSamples.numChannels) {
final outChannelView = requestedSamples.getChannelView(c);
final dataChannelView = playbackData.getChannelView(c);
// Reset interpolators for channel
pDopplerRatio.currentValue = pDopplerRatio.lastValue;
pDstAttenuation.currentValue = pDstAttenuation.lastValue;
pVolume.currentValue = pVolume.lastValue;
for (i in 0...samplesToWrite) {
final value = dataChannelView[playbackPosition + i] * pVolume.currentValue * pDstAttenuation.currentValue;
outChannelView[samplesWritten + i] = value;
// TODO: SIMD
pDopplerRatio.currentValue += stepDopplerRatio;
pDstAttenuation.currentValue += stepDstAttenuation;
pVolume.currentValue += stepVol;
}
}
samplesWritten += samplesToWrite;
playbackPosition += samplesToWrite;
if (playbackPosition >= playbackData.channelLength) {
playbackPosition = 0;
if (looping) {
optionallyApplySourceEffects();
}
else {
finished = true;
break;
}
}
}
// Fill further requested samples with zeroes
for (c in 0...requestedSamples.numChannels) {
final channelView = requestedSamples.getChannelView(c);
for (i in samplesWritten...requestedSamples.channelLength) {
channelView[i] = 0;
}
}
pDopplerRatio.updateLast();
pDstAttenuation.updateLast();
pVolume.updateLast();
processInserts(requestedSamples);
}
function play(retrigger: Bool): Void {
if (finished || retrigger || !appliedSourceEffects) {
optionallyApplySourceEffects();
}
paused = false;
finished = false;
if (retrigger) {
playbackPosition = 0;
}
}
function pause(): Void {
paused = true;
}
function stop(): Void {
playbackPosition = 0;
finished = true;
}
inline function addSourceEffect(audioSourceEffect: SourceEffect) {
sourceEffects.push(audioSourceEffect);
appliedSourceEffects = false;
}
inline function removeSourceEffect(audioSourceEffect: SourceEffect) {
sourceEffects.remove(audioSourceEffect);
appliedSourceEffects = false;
}
/**
Apply all source effects to `playbackData`, if there are any.
**/
@:access(aura.dsp.sourcefx.SourceEffect)
function optionallyApplySourceEffects() {
var currentSrcBuffer = data;
var previousLength = data.channelLength;
var needsReprocessing = !appliedSourceEffects;
if (!needsReprocessing) {
for (sourceEffect in sourceEffects) {
if (sourceEffect.applyOnReplay.load()) {
needsReprocessing = true;
break;
}
}
}
if (needsReprocessing) {
for (sourceEffect in sourceEffects) {
previousLength = sourceEffect.process(currentSrcBuffer, previousLength, playbackData);
currentSrcBuffer = playbackData;
}
}
appliedSourceEffects = true;
}
}
private class UncompBufferChannelMessageID extends ChannelMessageID {
final AddSourceEffect;
final RemoveSourceEffect;
}

View File

@ -0,0 +1,145 @@
// =============================================================================
// Roughly based on
// https://github.com/Kode/Kha/blob/master/Sources/kha/audio2/ResamplingAudioChannel.hx
// =============================================================================
package aura.channels;
import kha.arrays.Float32Array;
import aura.threading.Message;
import aura.types.AudioBuffer;
import aura.utils.MathUtils;
import aura.utils.Interpolator.LinearInterpolator;
import aura.utils.Profiler;
import aura.utils.Resampler;
class UncompBufferResamplingChannel extends UncompBufferChannel {
public var sampleRate: Hertz;
public var floatPosition: Float = 0.0;
final pPitch = new LinearInterpolator(1.0);
public function new(data: Float32Array, looping: Bool, sampleRate: Hertz) {
super(data, looping);
this.sampleRate = sampleRate;
};
override function nextSamples(requestedSamples: AudioBuffer, sampleRate: Hertz): Void {
Profiler.event();
assert(Critical, requestedSamples.numChannels == playbackData.numChannels);
final stepDopplerRatio = pDopplerRatio.getLerpStepSize(requestedSamples.channelLength);
final stepDstAttenuation = pDstAttenuation.getLerpStepSize(requestedSamples.channelLength);
final stepPitch = pPitch.getLerpStepSize(requestedSamples.channelLength);
final stepVol = pVolume.getLerpStepSize(requestedSamples.channelLength);
final resampleLength = Resampler.getResampleLength(playbackData.channelLength, this.sampleRate, sampleRate);
var samplesWritten = 0;
var reachedEndOfData = false;
// As long as there are more samples requested and there is data left
while (samplesWritten < requestedSamples.channelLength && !reachedEndOfData) {
final initialFloatPosition = floatPosition;
// Check how many samples we can actually write
final samplesToWrite = minI(resampleLength - playbackPosition, requestedSamples.channelLength - samplesWritten);
for (c in 0...requestedSamples.numChannels) {
final outChannelView = requestedSamples.getChannelView(c);
// Reset interpolators for channel
pDopplerRatio.currentValue = pDopplerRatio.lastValue;
pDstAttenuation.currentValue = pDstAttenuation.lastValue;
pPitch.currentValue = pPitch.lastValue;
pVolume.currentValue = pVolume.lastValue;
floatPosition = initialFloatPosition;
for (i in 0...samplesToWrite) {
var sampledVal: Float = Resampler.sampleAtTargetPositionLerp(playbackData.getChannelView(c), floatPosition, this.sampleRate, sampleRate);
if (pDopplerRatio.currentValue <= 0) {
// In this case, the audio is inaudible at the time of emission at its source,
// although technically the sound would eventually arrive at the listener in reverse.
// We don't simulate the latter, but still make the sound silent for some added realism
outChannelView[samplesWritten + i] = 0.0;
floatPosition += pPitch.currentValue;
}
else {
outChannelView[samplesWritten + i] = sampledVal * pVolume.currentValue * pDstAttenuation.currentValue;
floatPosition += pPitch.currentValue * pDopplerRatio.currentValue;
}
pDopplerRatio.currentValue += stepDopplerRatio;
pDstAttenuation.currentValue += stepDstAttenuation;
pPitch.currentValue += stepPitch;
pVolume.currentValue += stepVol;
if (floatPosition >= resampleLength) {
if (looping) {
while (floatPosition >= resampleLength) {
playbackPosition -= resampleLength;
floatPosition -= resampleLength; // Keep fraction
}
if (c == 0) {
optionallyApplySourceEffects();
}
}
else {
stop();
reachedEndOfData = true;
break;
}
}
else {
playbackPosition = Std.int(floatPosition);
}
}
}
samplesWritten += samplesToWrite;
}
// We're out of data, but more samples are requested
for (c in 0...requestedSamples.numChannels) {
final channelView = requestedSamples.getChannelView(c);
for (i in samplesWritten...requestedSamples.channelLength) {
channelView[i] = 0;
}
}
pDopplerRatio.updateLast();
pDstAttenuation.updateLast();
pPitch.updateLast();
pVolume.updateLast();
processInserts(requestedSamples);
}
override public function play(retrigger: Bool) {
super.play(retrigger);
if (retrigger) {
floatPosition = 0.0;
}
}
override public function stop() {
super.stop();
floatPosition = 0.0;
}
override public function pause() {
super.pause();
floatPosition = playbackPosition;
}
override function parseMessage(message: Message) {
switch (message.id) {
case ChannelMessageID.PPitch: pPitch.targetValue = cast message.data;
default:
super.parseMessage(message);
}
}
}

View File

@ -0,0 +1,16 @@
package aura.channels.generators;
abstract class BaseGenerator extends BaseChannel {
public function play(retrigger: Bool): Void {
paused = false;
finished = false;
}
public function pause(): Void {
paused = true;
}
public function stop(): Void {
finished = true;
}
}

View File

@ -0,0 +1,42 @@
package aura.channels.generators;
import haxe.ds.Vector;
import kha.FastFloat;
import aura.channels.BaseChannel.BaseChannelHandle;
import aura.types.AudioBuffer;
import aura.utils.BufferUtils;
/**
Signal noise produced by Brownian motion.
**/
class BrownNoise extends BaseGenerator {
final last: Vector<FastFloat>;
inline function new() {
last = createEmptyVecF32(2);
}
/**
Creates a new BrownNoise channel and returns a handle to it.
**/
public static function create(): BaseChannelHandle {
return new BaseChannelHandle(new BrownNoise());
}
function nextSamples(requestedSamples: AudioBuffer, sampleRate: Hertz) {
for (c in 0...requestedSamples.numChannels) {
final channelView = requestedSamples.getChannelView(c);
for (i in 0...requestedSamples.channelLength) {
final white = Math.random() * 2 - 1;
channelView[i] = (last[c] + (0.02 * white)) / 1.02;
last[c] = channelView[i];
channelView[i] * 3.5;
}
}
processInserts(requestedSamples);
}
}

View File

@ -0,0 +1,67 @@
package aura.channels.generators;
import haxe.ds.Vector;
import kha.FastFloat;
import aura.channels.BaseChannel.BaseChannelHandle;
import aura.types.AudioBuffer;
import aura.utils.BufferUtils;
/**
Signal with a frequency spectrum such that the power spectral density
(energy or power per Hz) is inversely proportional to the frequency of the
signal. Each octave (halving/doubling in frequency) carries an equal amount
of noise power.
**/
class PinkNoise extends BaseGenerator {
final b0: Vector<FastFloat>;
final b1: Vector<FastFloat>;
final b2: Vector<FastFloat>;
final b3: Vector<FastFloat>;
final b4: Vector<FastFloat>;
final b5: Vector<FastFloat>;
final b6: Vector<FastFloat>;
inline function new() {
b0 = createEmptyVecF32(2);
b1 = createEmptyVecF32(2);
b2 = createEmptyVecF32(2);
b3 = createEmptyVecF32(2);
b4 = createEmptyVecF32(2);
b5 = createEmptyVecF32(2);
b6 = createEmptyVecF32(2);
}
/**
Creates a new PinkNoise channel and returns a handle to it.
**/
public static function create(): BaseChannelHandle {
return new BaseChannelHandle(new PinkNoise());
}
function nextSamples(requestedSamples: AudioBuffer, sampleRate: Hertz) {
for (c in 0...requestedSamples.numChannels) {
final channelView = requestedSamples.getChannelView(c);
for (i in 0...requestedSamples.channelLength) {
final white = Math.random() * 2 - 1;
// Paul Kellet's refined method from
// https://www.firstpr.com.au/dsp/pink-noise/
b0[c] = 0.99886 * b0[c] + white * 0.0555179;
b1[c] = 0.99332 * b1[c] + white * 0.0750759;
b2[c] = 0.96900 * b2[c] + white * 0.1538520;
b3[c] = 0.86650 * b3[c] + white * 0.3104856;
b4[c] = 0.55000 * b4[c] + white * 0.5329522;
b5[c] = -0.7616 * b5[c] - white * 0.0168980;
channelView[i] = b0[c] + b1[c] + b2[c] + b3[c] + b4[c] + b5[c] + b6[c] + white * 0.5362;
channelView[i] *= 0.11;
b6[c] = white * 0.115926;
}
}
processInserts(requestedSamples);
}
}

View File

@ -0,0 +1,27 @@
package aura.channels.generators;
import aura.channels.BaseChannel.BaseChannelHandle;
import aura.types.AudioBuffer;
/**
Random signal with a constant power spectral density.
**/
class WhiteNoise extends BaseGenerator {
inline function new() {}
/**
Creates a new WhiteNoise channel and returns a handle to it.
**/
public static function create(): BaseChannelHandle {
return new BaseChannelHandle(new WhiteNoise());
}
function nextSamples(requestedSamples: AudioBuffer, sampleRate: Hertz) {
for (i in 0...requestedSamples.rawData.length) {
requestedSamples.rawData[i] = Math.random() * 2 - 1;
}
processInserts(requestedSamples);
}
}

View File

@ -0,0 +1,36 @@
package aura.dsp;
import aura.threading.Fifo;
import aura.threading.Message;
import aura.types.AudioBuffer;
@:allow(aura.dsp.panner.Panner)
abstract class DSP {
public var bypass = false;
var inUse = false;
final messages: Fifo<Message> = new Fifo();
abstract function process(buffer: AudioBuffer): Void;
function synchronize() {
var message: Null<Message>;
while ((message = messages.tryPop()) != null) {
parseMessage(message);
}
}
function parseMessage(message: Message) {
switch (message.id) {
// TODO
case DSPMessageID.BypassEnable:
case DSPMessageID.BypassDisable:
default:
}
}
inline function sendMessage(message: Message) {
messages.add(message);
}
}

View File

@ -0,0 +1,61 @@
package aura.dsp;
import haxe.ds.Vector;
import aura.threading.Message;
import aura.types.AudioBuffer;
import aura.utils.CircularBuffer;
class DelayLine extends DSP {
public static inline var NUM_CHANNELS = 2;
public final maxDelaySamples: Int;
final delayBufs: Vector<CircularBuffer>;
public function new(maxDelaySamples: Int) {
this.maxDelaySamples = maxDelaySamples;
delayBufs = new Vector(NUM_CHANNELS);
for (i in 0...NUM_CHANNELS) {
delayBufs[i] = new CircularBuffer(maxDelaySamples);
}
}
public inline function setDelay(delaySamples: Int) {
for (i in 0...NUM_CHANNELS) {
delayBufs[i].setDelay(delaySamples);
}
}
public inline function setDelays(delaySamples: Array<Int>) {
for (i in 0...NUM_CHANNELS) {
delayBufs[i].setDelay(delaySamples[i]);
}
}
function process(buffer: AudioBuffer) {
for (c in 0...buffer.numChannels) {
final delayBuf = delayBufs[c];
if (delayBuf.delay == 0) continue;
final channelView = buffer.getChannelView(c);
for (i in 0...buffer.channelLength) {
delayBuf.set(channelView[i]);
channelView[i] = delayBuf.get();
delayBuf.increment();
}
}
}
override function parseMessage(message: Message) {
switch (message.id) {
case DSPMessageID.SetDelays:
setDelays(message.data);
default:
super.parseMessage(message);
}
}
}

View File

@ -0,0 +1,217 @@
package aura.dsp;
import haxe.ds.Vector;
import kha.FastFloat;
import kha.arrays.Float32Array;
import kha.arrays.Int32Array;
import aura.math.FFT;
import aura.threading.Message;
import aura.types.AudioBuffer;
import aura.types.ComplexArray;
import aura.types.SwapBuffer;
import aura.utils.BufferUtils;
import aura.utils.MathUtils;
import aura.utils.Profiler;
/**
Calculates the 1D linear convolution of the input with another buffer called
`impulse`.
**/
class FFTConvolver extends DSP {
public static inline var NUM_CHANNELS = 2;
public static inline var FFT_SIZE = 1024;
public static inline var CHUNK_SIZE = Std.int(FFT_SIZE / 2);
/**
The amount of samples used to (temporally) interpolate
between consecutive impulse responses. Values larger than `CHUNK_SIZE`
are clamped to that length.
**Special values**:
- Any negative value: Automatically follows `CHUNK_SIZE`
- 0: Do not interpolate between consecutive impulse responses // TODO implement me
**/
// TODO: make thread-safe
public var temporalInterpolationLength = -1;
final impulseSwapBuffer: SwapBuffer;
/**
The part of the last output signal that was longer than the last frame
buffer and thus overlaps to the next frame. To prevent allocations
during runtime and to ensure that overlapPrev is not longer than one
FFT segment, the overlap vectors are preallocated to `CHUNK_SIZE - 1`.
Use `overlapLength` to get the true length.
**/
final overlapPrev: Vector<Vector<FastFloat>>;
/**
The (per-channel) overlap length of the convolution result for the
current impulse response.
**/
final overlapLength: Vector<Int>;
/**
The (per-channel) overlap length of the convolution result for the
impulse response from the previous processing block.
**/
final prevOverlapLength: Vector<Int>;
static var signalFFT: Null<RealValuedFFT>;
final impulseFFT: Null<RealValuedFFT>;
var currentImpulseAlternationIndex = 0;
final prevImpulseLengths: Int32Array = new Int32Array(NUM_CHANNELS);
public function new() {
assert(Error, isPowerOf2(FFT_SIZE), 'FFT_SIZE must be a power of 2, but it is $FFT_SIZE');
if (signalFFT == null) {
signalFFT = new RealValuedFFT(FFT_SIZE, 2, 2);
}
impulseFFT = new RealValuedFFT(FFT_SIZE, 1, NUM_CHANNELS * 2);
prevImpulseLengths = new Int32Array(NUM_CHANNELS);
for (i in 0...prevImpulseLengths.length) {
prevImpulseLengths[i] = 0;
}
impulseSwapBuffer = new SwapBuffer(CHUNK_SIZE * 2);
overlapPrev = new Vector(NUM_CHANNELS);
for (i in 0...NUM_CHANNELS) {
// Max. impulse size is CHUNK_SIZE
overlapPrev[i] = createEmptyVecF32(CHUNK_SIZE - 1);
}
overlapLength = createEmptyVecI(NUM_CHANNELS);
prevOverlapLength = createEmptyVecI(NUM_CHANNELS);
}
// TODO: move this into main thread and use swapbuffer for impulse freqs
// instead? Moving the impulse FFT computation into the main thread will
// also remove the fft computation while the swap buffer lock is active,
// reducing the lock time, but it occupies the main thread more...
function updateImpulseFromSwapBuffer(impulseLengths: Array<Int>) {
final impulseTimeDomain = impulseFFT.getInput(0);
impulseSwapBuffer.beginRead();
for (c in 0...impulseLengths.length) {
impulseSwapBuffer.read(impulseTimeDomain, 0, CHUNK_SIZE * c, CHUNK_SIZE);
inline calculateImpulseFFT(impulseLengths[c], c);
}
impulseSwapBuffer.endRead();
currentImpulseAlternationIndex = 1 - currentImpulseAlternationIndex;
}
inline function calculateImpulseFFT(impulseLength: Int, channelIndex: Int) {
impulseFFT.forwardFFT(0, NUM_CHANNELS * channelIndex + currentImpulseAlternationIndex);
overlapLength[channelIndex] = maxI(prevImpulseLengths[channelIndex], impulseLength - 1);
prevImpulseLengths[channelIndex] = impulseLength;
}
public function process(buffer: AudioBuffer) {
Profiler.event();
// TODO
assert(Critical, buffer.numChannels == NUM_CHANNELS);
for (c in 0...buffer.numChannels) {
if (overlapLength[c] < 0) return;
}
// Ensure correct boundaries
final isMultiple = (buffer.channelLength % CHUNK_SIZE) == 0 || (CHUNK_SIZE % buffer.channelLength) == 0;
assert(Debug, isMultiple, "channelLength must be a multiple of CHUNK_SIZE or vice versa");
var numSegments: Int; // Segments per channel frame
var segmentSize: Int;
if (CHUNK_SIZE < buffer.channelLength) {
numSegments = Std.int(buffer.channelLength / CHUNK_SIZE);
segmentSize = CHUNK_SIZE;
}
else {
// TODO: accumulate samples if buffer.channelLength < CHUNK_SIZE,
// then delay output
numSegments = 1;
segmentSize = buffer.channelLength;
}
final numInterpolationSteps = temporalInterpolationLength < 0 ? CHUNK_SIZE : minI(temporalInterpolationLength, CHUNK_SIZE);
final interpolationStepSize = 1 / numInterpolationSteps;
final signalTimeDomainCurrentImpulse = signalFFT.getInput(0);
final signalTimeDomainPrevImpulse = signalFFT.getInput(1);
final signalFreqDomainCurrentImpulse = signalFFT.getOutput(0);
final signalFreqDomainPrevImpulse = signalFFT.getOutput(1);
for (c in 0...buffer.numChannels) {
final channelView = buffer.getChannelView(c);
final impulseFreqDomainCurrent = impulseFFT.getOutput(NUM_CHANNELS * c + (1 - currentImpulseAlternationIndex));
final impulseFreqDomainPrev = impulseFFT.getOutput(NUM_CHANNELS * c + currentImpulseAlternationIndex);
for (s in 0...numSegments) {
final segmentOffset = s * segmentSize;
// Copy to FFT input buffer and apply padding
for (i in 0...segmentSize) {
signalTimeDomainCurrentImpulse[i] = channelView[segmentOffset + i];
}
for (i in segmentSize...FFT_SIZE) {
signalTimeDomainCurrentImpulse[i] = 0.0;
}
signalFFT.forwardFFT(0, 0);
// Copy signal frequency signal to multiply with
// both current and previous impulse frequency responses
signalFreqDomainPrevImpulse.copyFrom(signalFreqDomainCurrentImpulse);
// The actual convolution takes place here
// TODO: SIMD
for (i in 0...FFT_SIZE) {
signalFreqDomainCurrentImpulse[i] *= impulseFreqDomainCurrent[i];
signalFreqDomainPrevImpulse[i] *= impulseFreqDomainPrev[i];
}
// Transform back into time domain
signalFFT.inverseFFT(0, 0);
signalFFT.inverseFFT(1, 1);
// Interpolate (only for first segment) and copy to output
final actualNumInterpolationSteps = (s == 0) ? numInterpolationSteps : 0;
var t = 0.0;
for (i in 0...actualNumInterpolationSteps) {
channelView[segmentOffset + i] = lerpF32(signalTimeDomainPrevImpulse[i], signalTimeDomainCurrentImpulse[i], t);
t += interpolationStepSize;
}
for (i in actualNumInterpolationSteps...CHUNK_SIZE) {
channelView[segmentOffset + i] = signalTimeDomainCurrentImpulse[i];
}
// Apply overlapping from last segment
for (i in 0...prevOverlapLength[c]) {
channelView[segmentOffset + i] += overlapPrev[c][i];
}
// Write overlapping samples for next segment
for (i in 0...overlapLength[c]) {
overlapPrev[c][i] = signalTimeDomainCurrentImpulse[CHUNK_SIZE + i];
}
prevOverlapLength[c] = overlapLength[c];
}
}
}
override function parseMessage(message: Message) {
switch (message.id) {
case DSPMessageID.SwapBufferReady:
updateImpulseFromSwapBuffer(message.data);
default:
super.parseMessage(message);
}
}
}

View File

@ -0,0 +1,86 @@
package aura.dsp;
import haxe.ds.Vector;
import kha.FastFloat;
import aura.Types;
import aura.types.AudioBuffer;
import aura.utils.BufferUtils;
import aura.utils.FrequencyUtils;
import aura.utils.MathUtils;
using aura.utils.StepIterator;
/**
A simple IIR (infinite impulse response) lowpass/bandpass/highpass filter
with a slope of 12 dB/octave.
**/
class Filter extends DSP {
/**
Whether the filter should be a low-/band- or highpass filter.
**/
public var filterMode: FilterMode;
final buf: Vector<Vector<FastFloat>>;
final cutoff: Vector<FastFloat>;
public function new(filterMode: FilterMode) {
this.filterMode = filterMode;
this.buf = new Vector(2); // Two channels
buf[0] = createEmptyVecF32(2); // Two buffers per channel
buf[1] = createEmptyVecF32(2);
this.cutoff = new Vector(2);
cutoff[0] = cutoff[1] = 1.0;
}
public function process(buffer: AudioBuffer) {
for (c in 0...buffer.numChannels) {
if (cutoff[c] == 1.0) { continue; }
final channelView = buffer.getChannelView(c);
for (i in 0...buffer.channelLength) {
// http://www.martin-finke.de/blog/articles/audio-plugins-013-filter/
buf[c][0] += cutoff[c] * (channelView[i] - buf[c][0]);
buf[c][1] += cutoff[c] * (buf[c][0] - buf[c][1]);
// TODO: Move the switch out of the loop, even if that means duplicate code?
channelView[i] = switch (filterMode) {
case LowPass: buf[c][1];
case HighPass: channelView[i] - buf[c][0];
case BandPass: buf[c][0] - buf[c][1];
}
}
}
}
/**
Set the cutoff frequency for this filter. `channels` state for which
channels to set the cutoff value.
**/
public inline function setCutoffFreq(cutoffFreq: Hertz, channels: Channels = All) {
final maxFreq = sampleRateToMaxFreq(Aura.sampleRate);
final c = frequencyToFactor(clampI(cutoffFreq, 0, maxFreq), maxFreq);
if (channels.matches(Channels.Left)) { cutoff[0] = c; }
if (channels.matches(Channels.Right)) { cutoff[1] = c; }
}
/**
Get the cutoff frequency of this filter. `channels` state from which
channels to get the cutoff value, if it's `Both`, the left channel's
cutoff frequency is returned.
**/
public inline function getCutoffFreq(channels: Channels = All): Hertz {
final c = channels.matches(Channels.Left) ? cutoff[0] : cutoff[1];
return factorToFrequency(c, sampleRateToMaxFreq(Aura.sampleRate));
}
}
enum abstract FilterMode(Int) {
var LowPass;
var BandPass;
var HighPass;
}

View File

@ -0,0 +1,99 @@
package aura.dsp;
import haxe.ds.Vector;
import kha.FastFloat;
import kha.arrays.Float32Array;
import aura.Types;
import aura.threading.Message;
import aura.types.AudioBuffer;
import aura.utils.CircularBuffer;
/**
A delay line that supports fractions of samples set as delay times.
The implementation follows the linear interpolation approach as presented
in https://ccrma.stanford.edu/~jos/pasp/Fractional_Delay_Filtering_Linear.html.
@see `aura.dsp.DelayLine`
**/
class FractionalDelayLine extends DSP {
/**
The maximum amount of channels this DSP effect supports.
**/
public final maxNumChannels: Int;
/**
The maximum amount of (whole) samples by which any channel of the input
can be delayed.
**/
public final maxDelayLength: Int;
final delayBufs: Vector<CircularBuffer>;
final delayLengthFracts: Float32Array;
public function new(maxNumChannels: Int, maxDelayLength: Int) {
this.maxNumChannels = maxNumChannels;
this.maxDelayLength = maxDelayLength;
delayLengthFracts = new Float32Array(maxNumChannels);
delayBufs = new Vector(maxNumChannels);
for (i in 0...maxNumChannels) {
delayLengthFracts[i] = 0.0;
delayBufs[i] = new CircularBuffer(maxDelayLength);
}
}
public inline function setDelayLength(channelMask: Channels, delayLength: FastFloat) {
assert(Error, delayLength >= 0);
assert(Error, delayLength < maxDelayLength);
sendMessage({id: DSPMessageID.SetDelays, data: [channelMask, delayLength]});
}
function process(buffer: AudioBuffer) {
for (c in 0...buffer.numChannels) {
if (delayBufs[c].delay == 0) continue;
final channelView = buffer.getChannelView(c);
for (i in 0...buffer.channelLength) {
delayBufs[c].set(channelView[i]);
var delayedSignalMm1 = delayBufs[c].get(); // M - 1
delayBufs[c].increment();
var delayedSignalM = delayBufs[c].get(); // M
channelView[i] = delayedSignalM + delayLengthFracts[c] * (delayedSignalMm1 - delayedSignalM);
}
}
}
override function parseMessage(message: Message) {
switch (message.id) {
case DSPMessageID.SetDelays:
final channelMask = message.dataAsArrayUnsafe()[0];
final delayLength = message.dataAsArrayUnsafe()[1];
at_setDelayLength(channelMask, delayLength);
default:
super.parseMessage(message);
}
}
inline function at_setDelayLength(channelMask: Channels, delayLength: FastFloat) {
final delayLengthFloor = Math.ffloor(delayLength); // TODO implement 32-bit ffloor
final delayLengthFract = delayLength - delayLengthFloor;
final delayLengthInt = Std.int(delayLengthFloor);
for (c in 0...maxNumChannels) {
if (!channelMask.matchesIndex(c)) {
continue;
}
delayLengthFracts[c] = delayLengthFract;
delayBufs[c].setDelay(delayLengthInt + 1);
}
}
}

View File

@ -0,0 +1,55 @@
package aura.dsp;
import aura.types.AudioBuffer;
import aura.utils.CircularBuffer;
import aura.utils.FrequencyUtils;
/**
The [Haas effect](https://en.wikipedia.org/wiki/Precedence_effect) is a
psychoacoustical effect that uses a delay of one stereo channel of ca.
3 - 50 milliseconds to create the perception of 3D sound.
Using a negative value for `delay` moves the sound to the left of the
listener by delaying the right channel. Using a positive value delays the
left channel and moves the sound to the right. If `delay` is `0`, this
effect does nothing.
**/
class HaasEffect extends DSP {
var delayChannelIdx: Int;
var diffSamples: Int;
var delayBuff: CircularBuffer;
public function new(delay: Millisecond) {
this.diffSamples = 0;
this.setDelay(delay);
}
public function process(buffer: AudioBuffer) {
if (diffSamples == 0) return;
for (c in 0...buffer.numChannels) {
if (c != delayChannelIdx) { continue; }
final channelView = buffer.getChannelView(c);
for (i in 0...buffer.channelLength) {
delayBuff.set(channelView[i]);
channelView[i] = delayBuff.get();
delayBuff.increment();
}
}
}
public function setDelay(delay: Millisecond) {
final prev = diffSamples;
this.diffSamples = msToSamples(Aura.sampleRate, delay);
if (prev != diffSamples) {
this.delayChannelIdx = (diffSamples > 0) ? 0 : 1;
this.delayBuff = new CircularBuffer((diffSamples < 0) ? -diffSamples : diffSamples);
}
}
public inline function getDelay(): Millisecond {
return samplesToMs(Aura.sampleRate, diffSamples);
}
}

View File

@ -0,0 +1,109 @@
package aura.dsp;
import haxe.ds.Vector;
import kha.FastFloat;
import kha.arrays.ByteArray;
import aura.types.AudioBuffer;
import aura.utils.CircularBuffer;
/**
Perform efficient convolution of sparse impulse responses (i.e., impulse
responses in which most samples have a value of 0).
**/
class SparseConvolver extends DSP {
static inline var NUM_CHANNELS = 2;
public final impulseBuffer: SparseImpulseBuffer;
final delayBufs: Vector<CircularBuffer>;
/**
Create a new `SparseConvolver` object.
@param maxNumImpulses The maximal amount of non-zero impulses that can be stored in `this.impulseBuffer`.
@param maxNumImpulseResponseSamples The highest possible position of any non-zero impulse stored in the `impulseBuffer`.
There is no bounds checking in place!
**/
public function new(maxNumImpulses: Int, maxNumImpulseResponseSamples: Int) {
assert(Error, maxNumImpulseResponseSamples > maxNumImpulses);
impulseBuffer = new SparseImpulseBuffer(maxNumImpulses);
delayBufs = new Vector(NUM_CHANNELS);
for (i in 0...NUM_CHANNELS) {
delayBufs[i] = new CircularBuffer(maxNumImpulseResponseSamples);
}
}
public inline function getMaxNumImpulses(): Int {
return impulseBuffer.length;
}
public inline function getMaxNumImpulseResponseSamples(): Int {
return delayBufs[0].length;
}
function process(buffer: AudioBuffer) {
assert(Error, buffer.numChannels == NUM_CHANNELS);
for (c in 0...buffer.numChannels) {
final channelView = buffer.getChannelView(c);
final delayBuf = delayBufs[c];
for (i in 0...buffer.channelLength) {
delayBuf.set(channelView[i]);
var convolutionSum: FastFloat = 0.0;
for (impulseIndex in 0...impulseBuffer.length) {
// Move read pointer to impulse position, probably not the
// most cache efficient operation but it looks pretty unavoidable
delayBuf.setDelay(impulseBuffer.getImpulsePos(impulseIndex));
convolutionSum += delayBuf.get() * impulseBuffer.getImpulseMagnitude(impulseIndex);
}
// TODO: impulse response must be longer than buffer.channelLength!
channelView[i] = convolutionSum;
delayBuf.increment();
}
}
}
}
/**
A cache efficient buffer to store `(position: Int, magnitude: FastFloat)`
pairs that represent impulses of varying magnitudes within a sparse impulse
response. The buffer is **NOT** guaranteed to be zero-initialized.
**/
abstract SparseImpulseBuffer(ByteArray) {
public var length(get, never): Int;
public inline function new(numImpulses: Int) {
this = ByteArray.make(numImpulses * 8);
}
public inline function get_length(): Int {
return this.byteLength >> 3;
}
public inline function getImpulsePos(index: Int): Int {
return this.getUint32(index * 8);
}
public inline function setImpulsePos(index: Int, position: Int) {
this.setUint32(index * 8, position);
}
public inline function getImpulseMagnitude(index: Int): FastFloat {
return this.getFloat32(index * 8 + 4);
}
public inline function setImpulseMagnitude(index: Int, magnitude: FastFloat) {
this.setFloat32(index * 8 + 4, magnitude);
}
}

View File

@ -0,0 +1,120 @@
package aura.dsp.panner;
import kha.FastFloat;
import kha.arrays.Float32Array;
import aura.Types.Channels;
import aura.channels.BaseChannel.BaseChannelHandle;
import aura.threading.Message;
import aura.types.AudioBuffer;
import aura.types.HRTF;
import aura.utils.MathUtils;
import aura.utils.Pointer;
class HRTFPanner extends Panner {
public var hrtf: HRTF;
final hrtfConvolver: FFTConvolver;
final hrtfDelayLine: FractionalDelayLine;
final hrirPtrDelay: Pointer<FastFloat>;
final hrirPtrImpulseLength: Pointer<Int>;
final hrir: Float32Array;
final hrirOpp: Float32Array;
public function new(handle: BaseChannelHandle, hrtf: HRTF) {
super(handle);
this.hrtf = hrtf;
hrtfConvolver = new FFTConvolver();
hrtfDelayLine = new FractionalDelayLine(2, Math.ceil(hrtf.maxDelayLength));
hrtfConvolver.bypass = true;
hrtfDelayLine.bypass = true;
hrirPtrDelay = new Pointer<FastFloat>();
hrirPtrImpulseLength = new Pointer<Int>();
hrir = new Float32Array(FFTConvolver.CHUNK_SIZE);
hrirOpp = new Float32Array(FFTConvolver.CHUNK_SIZE);
}
override public function update3D() {
final listener = Aura.listener;
final dirToChannel = this.location.sub(listener.location);
if (dirToChannel.length == 0) {
hrtfConvolver.bypass = true;
hrtfDelayLine.bypass = true;
handle.channel.sendMessage({ id: ChannelMessageID.PDstAttenuation, data: 1.0 });
return;
}
final look = listener.look;
final up = listener.right.cross(look).normalized();
// Project the channel position (relative to the listener) to the plane
// described by the listener's look and right vectors
final projectedChannelPos = projectPointOntoPlane(dirToChannel, up).normalized();
final elevationCos = up.dot(dirToChannel.normalized());
// 180: top, 0: bottom
final elevation = 180 - (Math.acos(elevationCos) * (180 / Math.PI));
var angle = getFullAngleDegrees(look, projectedChannelPos, up);
angle = angle != 0 ? 360 - angle : 0; // Make clockwise
hrtf.getInterpolatedHRIR(elevation, angle, hrir, hrirPtrImpulseLength, hrirPtrDelay);
final hrirLength = hrirPtrImpulseLength.getSure();
final hrirDelay = hrirPtrDelay.getSure();
if (hrtf.numChannels == 1) {
hrtf.getInterpolatedHRIR(elevation, 360 - angle, hrirOpp, hrirPtrImpulseLength, hrirPtrDelay);
final hrirOppLength = hrirPtrImpulseLength.getSure();
final hrirOppDelay = hrirPtrDelay.getSure();
final swapBuf = @:privateAccess hrtfConvolver.impulseSwapBuffer;
swapBuf.beginWrite();
// Left channel
swapBuf.write(hrir, 0, 0, hrirLength);
swapBuf.writeZero(hrirLength, FFTConvolver.CHUNK_SIZE);
// Right channel
swapBuf.write(hrirOpp, 0, FFTConvolver.CHUNK_SIZE, hrirOppLength);
swapBuf.writeZero(FFTConvolver.CHUNK_SIZE + hrirOppLength, swapBuf.length);
swapBuf.endWrite();
hrtfConvolver.bypass = false;
hrtfDelayLine.bypass = false;
hrtfConvolver.sendMessage({id: DSPMessageID.SwapBufferReady, data: [hrirLength, hrirOppLength]});
hrtfDelayLine.setDelayLength(Channels.Left, hrirDelay);
hrtfDelayLine.setDelayLength(Channels.Right, hrirOppDelay);
}
else {
for (c in 0...hrtf.numChannels) {
// final delaySamples = Math.round(hrir.delays[0]);
// TODO: handle interleaved coeffs of stereo HRTFs
// Deinterleave when reading the file?
}
}
super.update3D();
}
override public function reset3D() {
hrtfConvolver.bypass = true;
hrtfDelayLine.bypass = true;
super.reset3D();
}
function process(buffer: AudioBuffer) {
if (!hrtfConvolver.bypass) {
hrtfConvolver.synchronize();
hrtfConvolver.process(buffer);
hrtfDelayLine.synchronize();
hrtfDelayLine.process(buffer);
}
}
}

View File

@ -0,0 +1,166 @@
package aura.dsp.panner;
import kha.FastFloat;
import kha.math.FastVector3;
import aura.channels.BaseChannel.BaseChannelHandle;
import aura.math.Vec3;
import aura.threading.Message;
import aura.utils.MathUtils;
abstract class Panner extends DSP {
static inline var REFERENCE_DST = 1.0;
static inline var SPEED_OF_SOUND = 343.4; // Air, m/s
/**
The strength of the doppler effect.
This value is multiplied to the calculated doppler effect, thus:
- A value of `0.0` results in no doppler effect.
- A value between `0.0` and `1.0` attenuates the effect (smaller values: more attenuation).
- A value of `1.0` does not attenuate or amplify the doppler effect.
- A value larger than `1.0` amplifies the doppler effect (larger values: more amplification).
**/
public var dopplerStrength = 1.0;
public var attenuationMode = AttenuationMode.Inverse;
public var attenuationFactor = 1.0;
public var maxDistance = 10.0;
// public var minDistance = 1;
var handle: BaseChannelHandle;
/**
The location of this audio source in world space.
**/
var location: Vec3 = new Vec3(0, 0, 0);
/**
The velocity of this audio source in world space.
**/
var velocity: Vec3 = new Vec3(0, 0, 0);
public function new(handle: BaseChannelHandle) {
this.inUse = true; // Don't allow using panners with addInsert()
this.handle = handle;
this.handle.channel.panner = this;
}
public inline function setHandle(handle: BaseChannelHandle) {
if (this.handle != null) {
this.handle.channel.panner = null;
}
reset3D();
this.handle = handle;
this.handle.channel.panner = this;
}
/**
Update the channel's audible 3D parameters after changing the channel's
or the listener's position or rotation.
**/
public function update3D() {
final displacementToSource = location.sub(Aura.listener.location);
calculateAttenuation(displacementToSource);
calculateDoppler(displacementToSource);
};
/**
Reset all the audible 3D sound parameters (balance, doppler effect etc.)
which are calculated by `update3D()`. This function does *not* reset the
location value of the sound, so if you call `update3D()` again, you will
hear the sound at the same position as before you called `reset3D()`.
**/
public function reset3D() {
handle.channel.sendMessage({ id: ChannelMessageID.PDopplerRatio, data: 1.0 });
handle.channel.sendMessage({ id: ChannelMessageID.PDstAttenuation, data: 1.0 });
};
/**
Set the location of this panner in world space.
Calling this function also sets the panner's velocity if the call
to this function is not the first call for this panner. This behavior
avoids audible "jumps" in the doppler effect for initial placement
of objects if they are far away from the origin.
**/
public function setLocation(location: Vec3) {
final time = Time.getTime();
final timeDeltaLastCall = time - _setLocation_lastCallTime;
// If the last time setLocation() was called was at an earlier time step
if (timeDeltaLastCall > 0) {
_setLocation_lastLocation.setFrom(this.location);
_setLocation_lastVelocityUpdateTime = _setLocation_lastCallTime;
}
final timeDeltaVelocityUpdate = time - _setLocation_lastVelocityUpdateTime;
this.location.setFrom(location);
if (!_setLocation_initializedLocation) {
// Prevent jumps in the doppler effect caused by initial distance
// too far away from the origin
_setLocation_initializedLocation = true;
}
else if (timeDeltaVelocityUpdate > 0) {
velocity.setFrom(location.sub(_setLocation_lastLocation).mult(1 / timeDeltaVelocityUpdate));
}
_setLocation_lastCallTime = time;
}
var _setLocation_initializedLocation = false;
var _setLocation_lastLocation: Vec3 = new Vec3(0, 0, 0);
var _setLocation_lastCallTime: Float = 0.0;
var _setLocation_lastVelocityUpdateTime: Float = 0.0;
function calculateAttenuation(dirToChannel: FastVector3) {
final dst = maxF(REFERENCE_DST, dirToChannel.length);
final dstAttenuation = switch (attenuationMode) {
case Linear:
maxF(0.0, 1 - attenuationFactor * (dst - REFERENCE_DST) / (maxDistance - REFERENCE_DST));
case Inverse:
REFERENCE_DST / (REFERENCE_DST + attenuationFactor * (dst - REFERENCE_DST));
case Exponential:
Math.pow(dst / REFERENCE_DST, -attenuationFactor);
}
handle.channel.sendMessage({ id: ChannelMessageID.PDstAttenuation, data: dstAttenuation });
}
function calculateDoppler(displacementToSource: FastVector3) {
final listener = Aura.listener;
var dopplerRatio: FastFloat = 1.0;
if (dopplerStrength != 0.0 && (listener.velocity.length != 0 || this.velocity.length != 0)) {
final dist = displacementToSource.length;
if (dist == 0) {
// We don't have any radial velocity here...
handle.channel.sendMessage({ id: ChannelMessageID.PDopplerRatio, data: 1.0 });
return;
}
// Calculate radial velocity
final vr = listener.velocity.dot(displacementToSource) / dist;
final vs = this.velocity.dot(displacementToSource) / dist;
// Sound source comes closer exactly at speed of sound,
// make silent and prevent division by zero below
if (vs == -SPEED_OF_SOUND) {
handle.channel.sendMessage({ id: ChannelMessageID.PDopplerRatio, data: 0.0 });
return;
}
dopplerRatio = (SPEED_OF_SOUND + vr) / (SPEED_OF_SOUND + vs);
dopplerRatio = Math.pow(dopplerRatio, dopplerStrength);
}
handle.channel.sendMessage({ id: ChannelMessageID.PDopplerRatio, data: dopplerRatio });
}
}
enum abstract AttenuationMode(Int) {
var Linear;
var Inverse;
var Exponential;
}

View File

@ -0,0 +1,127 @@
package aura.dsp.panner;
import aura.threading.Message;
import aura.types.AudioBuffer;
import aura.utils.Interpolator.LinearInterpolator;
import aura.utils.MathUtils;
using aura.utils.StepIterator;
class StereoPanner extends Panner {
final pVolumeLeft = new LinearInterpolator(1.0);
final pVolumeRight = new LinearInterpolator(1.0);
var _balance = Balance.CENTER;
override public function update3D() {
final listener = Aura.listener;
final dirToChannel = this.location.sub(listener.location);
if (dirToChannel.length == 0) {
setBalance(Balance.CENTER);
handle.channel.sendMessage({ id: ChannelMessageID.PDstAttenuation, data: 1.0 });
return;
}
final look = listener.look;
final up = listener.right.cross(look).normalized();
// Project the channel position (relative to the listener) to the plane
// described by the listener's look and right vectors
final projectedChannelPos = projectPointOntoPlane(dirToChannel, up).normalized();
// Angle cosine
var angle = listener.look.dot(projectedChannelPos);
// The calculated angle cosine looks like this on the unit circle:
// / 1 \
// 0 x 0 , where x is the listener and top is on the front
// \ -1 /
// Make the center 0.5, use absolute angle to prevent phase flipping.
// We loose front/back information here, but that's ok
angle = Math.abs(angle * 0.5);
// The angle cosine doesn't contain side information, so if the sound is
// to the right of the listener, we must invert the angle
if (listener.right.dot(projectedChannelPos) > 0) {
angle = 1 - angle;
}
setBalance(angle);
super.update3D();
}
override public function reset3D() {
setBalance(Balance.CENTER);
super.reset3D();
}
public inline function setBalance(balance: Balance) {
this._balance = balance;
sendMessage({ id: StereoPannerMessageID.PVolumeLeft, data: Math.sqrt(~balance) });
sendMessage({ id: StereoPannerMessageID.PVolumeRight, data: Math.sqrt(balance) });
}
public inline function getBalance(): Balance {
return this._balance;
}
function process(buffer: AudioBuffer) {
assert(Critical, buffer.numChannels == 2, "A StereoPanner can only be applied to stereo channels");
final channelViewL = buffer.getChannelView(0);
final channelViewR = buffer.getChannelView(1);
final stepSizeL = pVolumeLeft.getLerpStepSize(buffer.channelLength);
final stepSizeR = pVolumeRight.getLerpStepSize(buffer.channelLength);
#if AURA_SIMD
final stepSizesL = pVolumeLeft.getLerpStepSizes32x4(buffer.channelLength);
final stepSizesR = pVolumeRight.getLerpStepSizes32x4(buffer.channelLength);
final lenRemainder = mod4(buffer.channelLength);
final startRemainder = buffer.channelLength - lenRemainder - 1;
for (i in (0...buffer.channelLength).step(4)) {
pVolumeLeft.applySIMD32x4(channelViewL, i, stepSizesL);
pVolumeRight.applySIMD32x4(channelViewR, i, stepSizesR);
}
for (i in startRemainder...lenRemainder) {
channelViewL[i] *= pVolumeLeft.currentValue;
channelViewR[i] *= pVolumeRight.currentValue;
pVolumeLeft.currentValue += stepSizeL;
pVolumeRight.currentValue += stepSizeR;
}
#else
for (i in 0...buffer.channelLength) {
channelViewL[i] *= pVolumeLeft.currentValue;
channelViewR[i] *= pVolumeRight.currentValue;
pVolumeLeft.currentValue += stepSizeL;
pVolumeRight.currentValue += stepSizeR;
}
#end
pVolumeLeft.updateLast();
pVolumeRight.updateLast();
}
override function parseMessage(message: Message) {
switch (message.id) {
case StereoPannerMessageID.PVolumeLeft: pVolumeLeft.targetValue = cast message.data;
case StereoPannerMessageID.PVolumeRight: pVolumeRight.targetValue = cast message.data;
default:
super.parseMessage(message);
}
}
}
class StereoPannerMessageID extends DSPMessageID {
final PVolumeLeft;
final PVolumeRight;
}

View File

@ -0,0 +1,60 @@
package aura.dsp.sourcefx;
import aura.types.AudioBuffer;
import aura.Types;
/**
A special type of audio effect that—unlike insert effects—is not applied
continuously during audio playback but instead to the audio source buffer of
an `aura.channels.UncompBufferChannel` object.
This allows `SourceEffect`s to bake effects or provide sound variations
(for example by selecting random sounds from a pool of sounds, or by creating
sound variations on the fly with `aura.dsp.sourcefx.VelvetNoiseVariator`).
**/
abstract class SourceEffect {
/**
If `false` (default), `SourceEffect.process()` is only called
before the linked audio channel is played for the very first time with
its current combination of source effects. Adding or removing source
effects to a channel results in a recalculation of all source effects
on that channel.
If `true`, _additionally_ call `SourceEffect.process()` before each
consecutive replay of the audio source, including:
- Repetitions if the audio source is looping
- Calls to `audioChannel.play()` if the audio channel was stopped or
`play()` is called with `retrigger` set to `true`.
**/
public var applyOnReplay(default, null): AtomicBool = new AtomicBool(false);
/**
`SourceEffect`s are allowed to change the length of the source
audio passed as `srcBuffer` to `SourceEffect.process()`.
This function is used to calculate the amount of memory that needs to be
allocated to efficiently process all audio source effects of a channel.
It must return the least required channel length of the effect's
destination buffer with respect to the given source channel length.
**/
abstract function calculateRequiredChannelLength(srcChannelLength: Int): Int;
/**
Apply the effect to the audio data stored in the given source buffer and
write the result into the destination buffer.
- `srcBuffer` and `dstBuffer` may or may not point to the same object.
- The channels of `srcBuffer` might be longer than the valid audio
contained, use `srcChannelLength` to get the amount of valid samples
in each channel of the source buffer.
- `dstBuffer` is guaranteed to contain channels _at least_ the length
of `calculateRequiredChannelLength(srcChannelLength)`, it is expected
that the source effect fills `dstBuffer` exactly to that length.
This function must return the required destination channel length as
calculated by `calculateRequiredChannelLength(srcChannelLength)`.
**/
abstract function process(srcBuffer: AudioBuffer, srcChannelLength: Int, dstBuffer: AudioBuffer): Int;
}

View File

@ -0,0 +1,119 @@
package aura.dsp.sourcefx;
import kha.FastFloat;
import aura.dsp.SparseConvolver;
import aura.types.AudioBuffer;
import aura.utils.MathUtils;
import aura.utils.FrequencyUtils;
/**
Generate infinite variations on short percussive samples on the fly,
following the technique from the paper linked below.
The parameters of this effect need careful tweaking. Some examples can be
found in _Table 1_ in the paper linked below.
**Paper**:
Fagerström, Jon & Schlecht, Sebastian & Välimäki, Vesa. (2021).
One-to-Many Conversion for Percussive Samples. doi.org/10.23919/DAFx51585.2021.9768256.
**/
class VelvetNoiseVariator extends SourceEffect {
public final noiseLengthMs: FastFloat;
public final strength: FastFloat;
public final decayRate: FastFloat;
final highpassFilter: Filter;
final sparseConvolver: SparseConvolver;
var averageImpulseSpacing: Float;
/**
Create a new `VelvetNoiseVariator`.
@param noiseLengthMs The length of the velvet noise used for convolution, in milliseconds.
@param numImpulses The amount of impulses in the velvet noise.
@param decayRate The strength of the exponential decay of the velvet noise impulses.
@param lowShelfCutoff The cutoff frequency for the integrated high-pass filter.
@param strength The strength/influence of this effect. Think of this as a dry/wet control.
**/
public function new(noiseLengthMs: FastFloat, numImpulses: Int, decayRate: FastFloat, lowShelfCutoff: Hertz, strength: FastFloat) {
this.noiseLengthMs = noiseLengthMs;
final noiseLengthSamples = msToSamples(Aura.sampleRate, noiseLengthMs);
this.sparseConvolver = new SparseConvolver(numImpulses, noiseLengthSamples);
this.averageImpulseSpacing = maxF(1.0, noiseLengthSamples / numImpulses);
this.highpassFilter = new Filter(HighPass);
highpassFilter.setCutoffFreq(lowShelfCutoff, All);
this.applyOnReplay.store(true);
this.decayRate = decayRate;
this.strength = strength;
}
public static function fillVelvetNoiseSparse(impulseBuffer: SparseImpulseBuffer, averageImpulseSpacing: Float, decayRate: FastFloat) {
var nextGridPosPrecise = 0.0;
var nextGridPosRounded = 0;
var nextImpulsePos = 0;
// Attenuate consecutive pulses
final expFactor = Math.pow(E_INV, decayRate); // e^(-decayRate) == 1/e^decayRate == (1/e)^decayRate
var exponentialDecayFactor = 1.0;
for (i in 0...impulseBuffer.length) {
final currentGridPosRounded = nextGridPosRounded;
nextGridPosPrecise += averageImpulseSpacing;
nextGridPosRounded = Math.round(nextGridPosPrecise);
nextImpulsePos = currentGridPosRounded + Std.random(nextGridPosRounded - currentGridPosRounded);
impulseBuffer.setImpulsePos(i, nextImpulsePos);
impulseBuffer.setImpulseMagnitude(i, (Math.random() < 0.5 ? -1.0 : 1.0) * exponentialDecayFactor);
exponentialDecayFactor *= expFactor; // e^(-decayRate*i) == e^(-decayRate)^i
}
}
function calculateRequiredChannelLength(srcChannelLength: Int): Int {
return srcChannelLength + sparseConvolver.getMaxNumImpulseResponseSamples() - 1;
}
@:access(aura.dsp.SparseConvolver)
function process(srcBuffer: AudioBuffer, srcChannelLength: Int, dstBuffer: AudioBuffer): Int {
final requiredLength = calculateRequiredChannelLength(srcChannelLength);
// Copy and pad data
for (c in 0...srcBuffer.numChannels) {
final srcChannelView = srcBuffer.getChannelView(c);
final dstChannelView = dstBuffer.getChannelView(c);
for (i in 0...srcChannelLength) {
dstChannelView[i] = srcChannelView[i];
}
// Pad with zeroes to convolve without overlapping
for (i in srcChannelLength...requiredLength) {
dstChannelView[i] = 0.0;
}
}
fillVelvetNoiseSparse(sparseConvolver.impulseBuffer, averageImpulseSpacing, decayRate);
highpassFilter.process(dstBuffer);
sparseConvolver.process(dstBuffer);
for (c in 0...srcBuffer.numChannels) {
final srcChannelView = srcBuffer.getChannelView(c);
final dstChannelView = dstBuffer.getChannelView(c);
for (i in 0...srcChannelLength) {
dstChannelView[i] = dstChannelView[i] * strength + srcChannelView[i];
}
for (i in srcChannelLength...requiredLength) {
dstChannelView[i] = dstChannelView[i] * strength;
}
}
return requiredLength;
}
}

View File

@ -0,0 +1,21 @@
package aura.format;
import haxe.Int64;
import haxe.io.Input;
inline function readInt64(inp: Input): Int64 {
final first = inp.readInt32();
final second = inp.readInt32();
return inp.bigEndian ? Int64.make(first, second) : Int64.make(second, first);
}
inline function readUInt32(inp: Input): Int64 {
var out: Int64 = 0;
for (i in 0...4) {
out += Int64.shl(inp.readByte(), (inp.bigEndian ? 3 - i : i) * 8);
}
return out;
}

View File

@ -0,0 +1,170 @@
/**
Specification:
V1: https://github.com/kcat/openal-soft/blob/be7938ed385e18c7800c663672262bb2976aa734/docs/hrtf.txt
V2: https://github.com/kcat/openal-soft/blob/0349bcc500fdb9b1245a5ddce01b2896bcf9bbb9/docs/hrtf.txt
V3: https://github.com/kcat/openal-soft/blob/3ef4bffaf959d06527a247faa19cc869781745e4/docs/hrtf.txt
**/
package aura.format.mhr;
import haxe.Int64;
import haxe.ds.Vector;
import haxe.io.Bytes;
import haxe.io.BytesInput;
import kha.arrays.Float32Array;
import aura.types.HRTF;
using aura.format.InputExtension;
/**
Load MHR HRTF files (format versions 13 are supported) into `HRTF` objects.
**/
class MHRReader {
public static function read(bytes: Bytes): HRTF {
final inp = new BytesInput(bytes);
inp.bigEndian = false;
final magic = inp.readString(8, UTF8);
final version = versionFromMagic(magic);
final sampleRate = Int64.toInt(inp.readUInt32());
final sampleType = switch (version) {
case V1: SampleType16Bit;
case V2: inp.readByte();
case V3: SampleType24Bit;
}
final channelType = switch (version) {
case V1: 0; // mono
case V2 | V3: inp.readByte();
}
final channels = channelType + 1;
// Samples per HRIR (head related impulse response) per channel
final hrirSize = inp.readByte();
// Number of fields used by the data set. Each field represents a
// set of points for a given distance.
final fieldCount = version == V1 ? 1 : inp.readByte();
final fields = new Vector<Field>(fieldCount);
var totalHRIRCount = 0;
for (i in 0...fieldCount) {
final field = new Field();
// 1000mm is arbitrary, but it doesn't matter since the interpolation
// can only access one distance anyway...
field.distance = version == V1 ? 1000 : inp.readUInt16();
field.evCount = inp.readByte();
field.azCount = new Vector<Int>(field.evCount);
field.evHRIROffsets = new Vector<Int>(field.evCount);
var fieldHrirCount = 0;
for (j in 0...field.evCount) {
// Calculate the offset into the HRIR arrays. Different
// elevations may have different amounts of azimuths/HRIRs
field.evHRIROffsets[j] = fieldHrirCount;
field.azCount[j] = inp.readByte();
fieldHrirCount += field.azCount[j];
}
field.hrirCount = fieldHrirCount;
totalHRIRCount += fieldHrirCount;
fields[i] = field;
}
// Read actual HRIR samples into coeffs
for (i in 0...fieldCount) {
final field = fields[i];
final hrirs = new Vector<HRIR>(field.hrirCount);
field.hrirs = hrirs;
for (j in 0...field.hrirCount) {
// Create individual HRIR
final hrir = hrirs[j] = new HRIR();
hrir.coeffs = new Float32Array(hrirSize * channels);
switch (sampleType) {
case SampleType16Bit:
for (s in 0...hrirSize) {
final coeff = inp.readInt16();
// 32768 = 2^15
hrir.coeffs[s] = coeff / (coeff < 0 ? 32768.0 : 32767.0);
}
case SampleType24Bit:
for (s in 0...hrirSize) {
final coeff = inp.readInt24();
// 8388608 = 2^23
hrir.coeffs[s] = coeff / (coeff < 0 ? 8388608.0 : 8388607.0);
}
}
}
}
// Read per-HRIR delay
var maxDelayLength = 0.0;
for (i in 0...fieldCount) {
final field = fields[i];
for (j in 0...field.hrirCount) {
final hrir = field.hrirs[j];
hrir.delays = new Vector<Float>(channels);
for (ch in 0...channels) {
// 6.2 fixed point
final delayRaw = inp.readByte();
final delayIntPart = delayRaw >> 2;
final delayFloatPart = isBitSet(delayRaw, 1) * 0.5 + isBitSet(delayRaw, 0) * 0.25;
final delay = delayIntPart + delayFloatPart;
hrir.delays[ch] = delay;
if (delay > maxDelayLength) {
maxDelayLength = delay;
}
}
}
}
// This should error if uncommented, check if we have reached the end of
// the file.
// inp.readByte();
return {
sampleRate: sampleRate,
numChannels: channels,
hrirSize: hrirSize,
hrirCount: totalHRIRCount,
fields: fields,
maxDelayLength: maxDelayLength
};
}
static inline function isBitSet(byte: Int, position: Int): Int {
return (byte & (1 << position) == 0) ? 0 : 1;
}
static inline function versionFromMagic(magic: String): MHRVersion {
return switch (magic) {
case "MinPHR01": V1;
case "MinPHR02": V2;
case "MinPHR03": V3;
default:
throw 'File is not an MHR HRTF file! Unknown magic string "$magic".';
}
}
}
private enum abstract SampleType(Int) from Int {
var SampleType16Bit;
var SampleType24Bit;
}
private enum abstract MHRVersion(Int) {
var V1;
var V2;
var V3;
}

View File

@ -0,0 +1,10 @@
package aura;
#if !macro
import aura.Types.Angle;
import aura.Types.Balance;
import aura.Types.Hertz;
import aura.Types.Millisecond;
#end
import aura.utils.Assert.*;

View File

@ -0,0 +1,384 @@
package aura.math;
import haxe.ds.Vector;
import kha.arrays.Float32Array;
import aura.types.Complex;
import aura.types.ComplexArray;
import aura.utils.BufferUtils;
import aura.utils.MathUtils;
enum abstract FFTInputType(Int) {
var RealValuedInput;
var ComplexValuedInput;
}
/**
Container for all required buffers for an FFT computation. The input buffers
can either be real or complex which depends on whether you instantiate an
`aura.math.FFT.RealValuedFFT` or an `aura.math.FFT.ComplexValuedFFT`.
Each instance of this class can have multiple input and output buffers whose
indices have to be passed to the respective FFT functions. It is more
efficient to use multiple buffers for different FFT calculations with the
same size instead of multiple instances of this class. The input buffers
are guaranteed to be zero-initialized.
Make sure to not use objects of this class in different threads at the same
time since `FFTBase` is not thread safe!
**/
abstract class FFTBase {
public final size: Int;
public final halfSize: Int;
public final outputBuffers: Vector<ComplexArray>;
final expRotationStepTable: ComplexArray;
public inline function new(size: Int, numOutputs: Int) {
this.size = size;
this.halfSize = size >>> 1;
outputBuffers = new Vector(numOutputs);
for (i in 0...numOutputs) {
outputBuffers[i] = new ComplexArray(size);
}
// Since the calculations for the complex exponential inside a FFT are
// basically just a rotation around the unit circle with a constant step
// size that only depends on the layer size, we can precompute the
// complex rotation steps.
final numExpTableEntries = log2Unsigned(size);
expRotationStepTable = new ComplexArray(numExpTableEntries);
for (halfLayerIdx in 0...numExpTableEntries) {
final halfLayerSize = exp2(halfLayerIdx);
// (-2 * Math.PI) / layerSize == -Math.PI / halfLayerSize,
// so we store values corresponding to each possible halfLayer index
expRotationStepTable[halfLayerIdx] = Complex.exp(-Math.PI / halfLayerSize);
}
}
public abstract function forwardFFT(inputBufferIndex: Int, outputBufferIndex: Int): Void;
public abstract function inverseFFT(inputBufferIndex: Int, outputBufferIndex: Int): Void;
public abstract function getInput(index: Int): Dynamic;
public inline function getOutput(index: Int): ComplexArray {
return outputBuffers[index];
}
}
class RealValuedFFT extends FFTBase {
public final inputBuffers: Vector<Float32Array>;
final tmpInputBufferHalf: ComplexArray;
final tmpOutputBufferHalf: ComplexArray;
public inline function new(size: Int, numInputs: Int, numOutputs: Int) {
super(size, numOutputs);
inputBuffers = new Vector(numInputs);
for (i in 0...numInputs) {
inputBuffers[i] = createEmptyF32Array(size);
}
tmpInputBufferHalf = new ComplexArray(halfSize);
tmpOutputBufferHalf = new ComplexArray(halfSize);
}
public inline function forwardFFT(inputBufferIndex: Int, outputBufferIndex: Int) {
realfft(inputBuffers[inputBufferIndex], outputBuffers[outputBufferIndex], tmpInputBufferHalf, tmpOutputBufferHalf, size, expRotationStepTable);
}
public inline function inverseFFT(inputBufferIndex: Int, outputBufferIndex: Int) {
realifft(outputBuffers[outputBufferIndex], inputBuffers[inputBufferIndex], tmpOutputBufferHalf, tmpInputBufferHalf, size, expRotationStepTable);
}
public inline function getInput(index: Int): Float32Array {
return inputBuffers[index];
}
}
class ComplexValuedFFT extends FFTBase {
public final inputBuffers: Vector<ComplexArray>;
public inline function new(size: Int, numInputs: Int, numOutputs: Int) {
super(size, numOutputs);
inputBuffers = new Vector(numInputs);
for (i in 0...numInputs) {
inputBuffers[i] = new ComplexArray(size);
}
}
public inline function forwardFFT(inputBufferIndex: Int, outputBufferIndex: Int) {
fft(inputBuffers[inputBufferIndex], outputBuffers[outputBufferIndex], size, expRotationStepTable);
}
public inline function inverseFFT(inputBufferIndex: Int, outputBufferIndex: Int) {
ifft(outputBuffers[outputBufferIndex], inputBuffers[inputBufferIndex], size, expRotationStepTable);
}
public inline function getInput(index: Int): ComplexArray {
return inputBuffers[index];
}
}
/**
Calculate the fast fourier transformation of the signal given in `inTimes`
and output the result in `outFreqs`.
@param inTimes Input buffer in time domain. Must have length of `size`.
@param outFreqs Output buffer in frequency domain. Must have length of `size`.
@param size The size of the FFT. Must be a power of 2.
**/
inline function fft(inTimes: ComplexArray, outFreqs: ComplexArray, size: Int, expRotationStepTable: ComplexArray) {
ditfft2Iterative(inTimes, outFreqs, size, false, expRotationStepTable);
}
/**
Calculate the inverse fast fourier transformation of the signal given in
`inFreqs` and output the result in `outTimes`.
@param inFreqs Input buffer in frequency domain. Must have length of `size`.
@param outTimes Output buffer in time domain. Must have length of `size`.
@param size The size of both buffers. Must be a power of 2.
@param scale If true, scale output values by `1 / size`.
**/
inline function ifft(inFreqs: ComplexArray, outTimes: ComplexArray, size: Int, expRotationStepTable: ComplexArray, scale = true) {
ditfft2Iterative(inFreqs, outTimes, size, true, expRotationStepTable);
if (scale) {
for (i in 0...size) {
outTimes[i] = outTimes[i].scale(1 / size);
}
}
}
/**
Variant of `aura.math.fft` with real-valued input, almost double as fast as
its complex-input counterpart.
@param inTimes Input buffer in time domain. Must have length of `size`.
@param outFreqs Output buffer in frequency domain. Must have length of `size`.
@param timeCmplxStore Temporary buffer. May contain any values and will contain garbage values afterwards. Must have length of `Std.int(size / 2)`.
@param freqCmplxStore Temporary buffer. May contain any values and will contain garbage values afterwards. Must have length of `Std.int(size / 2)`.
@param size The size of the FFT. Must be a power of 2.
**/
inline function realfft(inTimes: Float32Array, outFreqs: ComplexArray, timeCmplxStore: ComplexArray, freqCmplxStore: ComplexArray, size: Int, expRotationStepTable: ComplexArray) {
// Reference:
// Lyons, Richard G. (2011). Understanding Digital Signal Processing,
// 3rd edn. pp. 694696 (Section 13.5.2: Performing a 2N-Point Real FFT)
final halfSize = Std.int(size / 2);
assert(Error, inTimes.length == size);
assert(Error, outFreqs.length == size);
assert(Error, timeCmplxStore.length == halfSize);
assert(Error, freqCmplxStore.length == halfSize);
for (i in 0...halfSize) {
timeCmplxStore[i] = new Complex(inTimes[2 * i], inTimes[2 * i + 1]);
}
fft(timeCmplxStore, freqCmplxStore, halfSize, expRotationStepTable);
final piN = Math.PI / halfSize;
// Construct first half of the result
for (i in 0...halfSize) {
final opp = (i == 0) ? freqCmplxStore[i] : freqCmplxStore[halfSize - i];
final xPlus = new Complex(
0.5 * (freqCmplxStore[i].real + opp.real),
0.5 * (freqCmplxStore[i].imag + opp.imag)
);
final xMinus = new Complex(
0.5 * (freqCmplxStore[i].real - opp.real),
0.5 * (freqCmplxStore[i].imag - opp.imag)
);
final piNi = piN * i;
final iSin = Math.sin(piNi);
final iCos = Math.cos(piNi);
final real = xPlus.real + iCos * xPlus.imag - iSin * xMinus.real;
final imag = xMinus.imag - iSin * xPlus.imag - iCos * xMinus.real;
outFreqs[i] = new Complex(real, imag);
}
outFreqs[halfSize] = freqCmplxStore[0].real - freqCmplxStore[0].imag;
// Mirror first half to second half of the result
for (i in halfSize + 1...size) {
outFreqs[i] = outFreqs[halfSize - 1 - (i - halfSize)].conj();
}
}
/**
Variant of `aura.math.ifft` with real-valued output, almost double as fast
as its complex-input counterpart.
@param inFreqs Input buffer in frequency domain. Must have length of `size`.
@param outTimes Output buffer in time domain. Must have length of `size`.
@param freqCmplxStore Temporary buffer. May contain any values and will contain garbage values afterwards. Must have length of `Std.int(size / 2)`.
@param timeCmplxStore Temporary buffer. May contain any values and will contain garbage values afterwards. Must have length of `Std.int(size / 2)`.
@param size The size of the FFT. Must be a power of 2.
**/
inline function realifft(inFreqs: ComplexArray, outTimes: Float32Array, freqCmplxStore: ComplexArray, timeCmplxStore: ComplexArray, size: Int, expRotationStepTable: ComplexArray) {
// Reference:
// Scheibler, Robin (2013). Real FFT Algorithms.
// Available at: http://www.robinscheibler.org/2013/02/13/real-fft.html
final halfSize = Std.int(size / 2);
assert(Error, inFreqs.length == size);
assert(Error, outTimes.length == size);
assert(Error, freqCmplxStore.length == halfSize);
assert(Error, timeCmplxStore.length == halfSize);
final pi2N = (2 * Math.PI) / size;
// Construct input
for (i in 0...halfSize) {
final oppC = ((i == 0) ? inFreqs[i] : inFreqs[halfSize - i]).conj();
final xEven = 0.5 * (inFreqs[i] + oppC);
final xOdd = 0.5 * ((inFreqs[i] - oppC) * Complex.exp(i * pi2N));
freqCmplxStore[i] = xEven + xOdd.multWithI();
}
ifft(freqCmplxStore, timeCmplxStore, halfSize, expRotationStepTable, false);
final scale = 2 / size;
for (i in 0...halfSize) {
outTimes[2 * i] = timeCmplxStore[i].real * scale;
outTimes[2 * i + 1] = timeCmplxStore[i].imag * scale;
}
}
/**
Modified copy of `dsp.FFT.ditfft2()` from the "hxdsp" library (*) to be able
to use Aura's own complex number type to make the fft allocation-free.
The used algorithm is a Radix-2 Decimation-In-Time variant of CooleyTukey's
FFT, recursive.
(*) https://github.com/baioc/hxdsp, released under the UNLICENSE license.
**/
#if AURA_BACKEND_HL @:hlNative("aura_hl", "ditfft2") #end
private function ditfft2(time: ComplexArray, t: Int, freq: ComplexArray, f: Int, n: Int, step: Int, inverse: Bool) {
if (n == 1) {
freq[f] = time[t];
}
else {
final halfLen = Std.int(n / 2);
ditfft2(time, t, freq, f, halfLen, step * 2, inverse);
ditfft2(time, t + step, freq, f + halfLen, halfLen, step * 2, inverse);
final tExp = ((inverse ? 1 : -1) * 2 * Math.PI) / n;
for (k in 0...halfLen) {
final even = freq[f + k].copy();
final odd = freq[f + k + halfLen].copy();
final twiddle = Complex.exp(tExp * k) * odd;
freq[f + k] = even + twiddle;
freq[f + k + halfLen] = even - twiddle;
}
}
}
#if AURA_BACKEND_HL @:hlNative("aura_hl", "ditfft2_iterative") #end
private function ditfft2Iterative(time: ComplexArray, freq: ComplexArray, n: Int, inverse: Bool, expRotationStepTable: ComplexArray) {
// Decimate
final log2N = log2Unsigned(n);
for (i in 0...n) {
final reversedI = bitReverseUint32(i, log2N);
if (reversedI > i) {
freq[i] = time[reversedI];
freq[reversedI] = time[i];
}
else if (reversedI == i) {
freq[i] = time[reversedI];
}
}
var layerSize = 2; // Size of the FFT for the current layer in the divide & conquer tree
var halfLayerIdx = 0;
while (layerSize <= n) { // Iterate over all layers beginning with the lowest
final halfLayerSize = layerSize >>> 1;
final expRotationStep = expRotationStepTable[halfLayerIdx].copy();
if (inverse) {
expRotationStep.setFrom(expRotationStep.conj());
}
var sectionOffset = 0;
while (sectionOffset < n) {
final currentExpRotation = new Complex(1.0, 0.0);
for (i in 0...halfLayerSize) {
final even = freq[sectionOffset + i].copy();
final odd = freq[sectionOffset + i + halfLayerSize];
final twiddle = currentExpRotation * odd;
freq[sectionOffset + i] = even + twiddle;
freq[sectionOffset + i + halfLayerSize] = even - twiddle;
currentExpRotation.setFrom(currentExpRotation * expRotationStep);
}
sectionOffset += layerSize;
}
layerSize <<= 1;
halfLayerIdx++;
}
}
// The following bit reversal code was taken (and slightly altered) from
// https://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable.
// The original sources are released in the public domain.
// Bit reversal LUT where each entry is one possible byte (value = address)
private final bitReverseTable: kha.arrays.Uint8Array = uint8ArrayFromIntArray([
0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0,
0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8,
0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4,
0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC,
0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2,
0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA,
0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6,
0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE,
0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1,
0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9,
0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5,
0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD,
0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3,
0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB,
0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7,
0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF
]);
/**
Return the reversed bits of the given `value`, where `log2N` is the position
of the most significant bit that should be used for the left bound of the
"reverse range".
**/
private inline function bitReverseUint32(value: Int, log2N: Int): Int {
return (
(bitReverseTable[ value & 0xff] << 24) |
(bitReverseTable[(value >>> 8 ) & 0xff] << 16) |
(bitReverseTable[(value >>> 16) & 0xff] << 8 ) |
(bitReverseTable[(value >>> 24) & 0xff] )
) >>> (32 - log2N);
}
private inline function uint8ArrayFromIntArray(array: Array<Int>): kha.arrays.Uint8Array {
final out = new kha.arrays.Uint8Array(array.length);
for (i in 0...array.length) {
out[i] = array[i];
}
return out;
}

View File

@ -0,0 +1,54 @@
package aura.math;
import kha.FastFloat;
import kha.math.FastVector3;
import kha.math.FastVector4;
@:forward
abstract Vec3(FastVector3) from FastVector3 to FastVector3 {
public inline function new(x: FastFloat = 0.0, y: FastFloat = 0.0, z: FastFloat = 0.0) {
this = new FastVector3(x, y, z);
}
@:from
public static inline function fromKhaVec3(v: kha.math.FastVector3): Vec3 {
return new FastVector3(v.x, v.y, v.z);
}
@:from
public static inline function fromKhaVec4(v: kha.math.FastVector4): Vec3 {
return new FastVector3(v.x, v.y, v.z);
}
@:to
public inline function toKhaVec3(): kha.math.FastVector3 {
return new FastVector3(this.x, this.y, this.z);
}
@:to
public inline function toKhaVec4(): kha.math.FastVector4 {
return new FastVector4(this.x, this.y, this.z);
}
#if (AURA_WITH_IRON || leenkx)
@:from
public static inline function fromIronVec3(v: iron.math.Vec3): Vec3{
return new FastVector3(v.x, v.y, v.z);
}
@:from
public static inline function fromIronVec4(v: iron.math.Vec4): Vec3{
return new FastVector3(v.x, v.y, v.z);
}
@:to
public inline function toIronVec3(): iron.math.Vec3 {
return new iron.math.Vec3(this.x, this.y, this.z);
}
@:to
public inline function toIronVec4(): iron.math.Vec4 {
return new iron.math.Vec4(this.x, this.y, this.z);
}
#end
}

View File

@ -0,0 +1,188 @@
// =============================================================================
// getBuffer() is roughly based on
// https://github.com/Kode/Kha/blob/master/Sources/kha/audio2/Audio1.hx
//
// References:
// [1]: https://github.com/Kode/Kha/blob/3a3e9e6d51b1d6e3309a80cd795860da3ea07355/Backends/Kinc-hxcpp/main.cpp#L186-L233
//
// =============================================================================
package aura.threading;
import haxe.ds.Vector;
import kha.arrays.Float32Array;
import aura.types.AudioBuffer;
import aura.types.ComplexArray;
import aura.utils.Pointer;
class BufferCache {
// TODO: Make max tree height configurable
public static inline var MAX_TREE_HEIGHT = 8;
/**
Number of audioCallback() invocations since the last allocation. This is
used to automatically switch off interactions with the garbage collector
in the audio thread if there are no allocations for some time (for extra
performance).
**/
static var lastAllocationTimer: Int = 0;
/**
Each level in the channel tree has its own buffer that can be shared by
the channels on that level.
**/
static var treeBuffers: Vector<Pointer<AudioBuffer>>;
static var bufferConfigs: Vector<BufferConfig>;
public static inline function init() {
treeBuffers = new Vector(MAX_TREE_HEIGHT);
for (i in 0...treeBuffers.length) {
treeBuffers[i] = new Pointer<AudioBuffer>();
}
bufferConfigs = BufferType.createAllConfigs();
}
public static inline function updateTimer() {
lastAllocationTimer++;
if (lastAllocationTimer > 100) {
kha.audio2.Audio.disableGcInteractions = true;
}
}
public static function getTreeBuffer(treeLevel: Int, numChannels: Int, channelLength: Int): Null<AudioBuffer> {
var p_buffer = treeBuffers[treeLevel];
if (!getBuffer(TAudioBuffer, p_buffer, numChannels, channelLength)) {
// Unexpected allocation message is already printed
trace(' treeLevel: $treeLevel');
return null;
}
return p_buffer.get();
}
@:generic
public static function getBuffer<T>(bufferType: BufferType, p_buffer: PointerType<T>, numChannels: Int, channelLength: Int): Bool {
final bufferCfg = bufferConfigs[bufferType];
var buffer = p_buffer.get();
final currentNumChannels = (buffer == null) ? 0 : bufferCfg.getNumChannels(buffer);
final currentChannelLength = (buffer == null) ? 0 : bufferCfg.getChannelLength(buffer);
if (buffer != null && currentNumChannels >= numChannels && currentChannelLength >= channelLength) {
// Buffer is already big enough
return true;
}
if (kha.audio2.Audio.disableGcInteractions) {
// This code is executed in the case that there are suddenly
// more samples requested while the GC interactions are turned
// off (because the number of samples was sufficient for a
// longer time). We can't just turn on GC interactions, it will
// not take effect before the next audio callback invocation, so
// we skip this "frame" instead (see [1] for reference).
#if !AURA_UNIT_TESTS
trace("Unexpected allocation request in audio thread.");
final haveMsgNumC = (buffer == null) ? 'no buffer' : '${currentNumChannels}';
final haveMsgen = (buffer == null) ? 'no buffer' : '${currentChannelLength}';
trace(' wanted amount of channels: $numChannels (have: $haveMsgNumC)');
trace(' wanted channel length: $channelLength (have: $haveMsgen)');
#end
lastAllocationTimer = 0;
kha.audio2.Audio.disableGcInteractions = false;
return false;
}
// If the buffer exists but too few samples fit in, overallocate by
// factor 2 to avoid too many allocations. Eventually the buffer will be
// big enough for the required amount of samples. If the buffer does not
// exist yet, do not overallocate to prevent too high memory usage
// (the requested length should not change much).
buffer = cast bufferCfg.construct(numChannels, buffer == null ? channelLength : channelLength * 2);
p_buffer.set(buffer);
lastAllocationTimer = 0;
return true;
}
}
@:structInit
class BufferConfig {
public var construct: Int->Int->Any;
public var getNumChannels: Any->Int;
public var getChannelLength: Any->Int;
}
/**
Type-unsafe workaround for covariance and unification issues when working
with the generic `BufferCache.getBuffer()`.
**/
enum abstract BufferType(Int) to Int {
/** Represents `aura.types.AudioBuffer`. **/
var TAudioBuffer;
/** Represents `kha.arrays.Float32Array`. **/
var TFloat32Array;
/** Represents `Array<Float>`. **/
var TArrayFloat;
/** Represents `Array<dsp.Complex>`. **/
var TArrayComplex;
private var enumSize;
public static function createAllConfigs(): Vector<BufferConfig> {
final out = new Vector<BufferConfig>(enumSize);
out[TAudioBuffer] = ({
construct: (numChannels: Int, channelLength: Int) -> {
return new AudioBuffer(numChannels, channelLength);
},
getNumChannels: (buffer: Any) -> {
return (cast buffer: AudioBuffer).numChannels;
},
getChannelLength: (buffer: Any) -> {
return (cast buffer: AudioBuffer).channelLength;
}
}: BufferConfig);
out[TFloat32Array] = ({
construct: (numChannels: Int, channelLength: Int) -> {
return new Float32Array(channelLength);
},
getNumChannels: (buffer: Any) -> {
return 1;
},
getChannelLength: (buffer: Any) -> {
return (cast buffer: Float32Array).length;
}
}: BufferConfig);
out[TArrayFloat] = ({
construct: (numChannels: Int, channelLength: Int) -> {
final v = new Array<Float>();
v.resize(channelLength);
return v;
},
getNumChannels: (buffer: Any) -> {
return 1;
},
getChannelLength: (buffer: Any) -> {
return (cast buffer: Array<Float>).length;
}
}: BufferConfig);
out[TArrayComplex] = ({
construct: (numChannels: Int, channelLength: Int) -> {
return new ComplexArray(channelLength);
},
getNumChannels: (buffer: Any) -> {
return 1;
},
getChannelLength: (buffer: Any) -> {
return (cast buffer: ComplexArray).length;
}
}: BufferConfig);
return out;
}
}

View File

@ -0,0 +1,36 @@
package aura.threading;
/**
Non-blocking first in/first out queue for thread synchronization. On targets
with threading support, `sys.thread.Dequeue` is used, on those without
threading `haxe.ds.List` is used instead.
**/
@:generic
@:forward(add)
@:nullSafety(StrictThreaded)
abstract Fifo<T>(FifoImpl<T>) {
public inline function new() {
this = new FifoImpl<T>();
}
public inline function tryPop(): Null<T> {
return this.pop(false);
}
}
#if (target.threaded)
private typedef FifoImpl<T> = sys.thread.Deque<T>;
#else
@:generic
@:forward(add)
@:nullSafety(StrictThreaded)
private abstract FifoImpl<T>(List<T>) {
public inline function new() {
this = new List<T>();
}
public inline function pop(block: Bool): Null<T> {
return this.pop();
}
}
#end

View File

@ -0,0 +1,37 @@
package aura.threading;
@:struct
@:structInit
class Message {
public final id: Int;
public final data: Null<Dynamic>;
public final inline function dataAsArrayUnsafe(): Null<Array<Dynamic>> {
return data;
}
}
@:autoBuild(aura.utils.macro.ExtensibleEnumBuilder.build())
@:build(aura.utils.macro.ExtensibleEnumBuilder.build())
class MessageID {}
class ChannelMessageID extends MessageID {
final Play;
final Pause;
final Stop;
// Parameters
final PVolume;
final PPitch;
final PDopplerRatio;
final PDstAttenuation;
}
class DSPMessageID extends MessageID {
final BypassEnable;
final BypassDisable;
final SwapBufferReady;
final SetDelays;
}

View File

@ -0,0 +1,177 @@
package aura.types;
import haxe.ds.Vector;
import kha.FastFloat;
import kha.arrays.Float32Array;
import aura.utils.BufferUtils;
/**
Deinterleaved 32-bit floating point audio buffer.
**/
class AudioBuffer {
/**
The amount of audio channels in this buffer.
**/
public final numChannels: Int;
/**
The amount of samples stored in each channel of this buffer.
**/
public final channelLength: Int;
/**
The raw samples data of this buffer.
To access the samples of a specific channel, please use
`AudioBuffer.getChannelView()`.
**/
public final rawData: Float32Array;
final channelViews: Vector<AudioBufferChannelView>;
/**
Create a new `AudioBuffer` object.
@param numChannels The amount of audio channels in this buffer.
@param channelLength The amount of samples stored in each channel.
**/
public inline function new(numChannels: Int, channelLength: Int) {
assert(Error, numChannels > 0);
assert(Error, channelLength > 0);
this.numChannels = numChannels;
this.channelLength = channelLength;
this.rawData = new Float32Array(numChannels * channelLength);
channelViews = new Vector(numChannels);
for (c in 0...numChannels) {
channelViews[c] = this.rawData.subarray(channelLength * c, channelLength * (c + 1));
}
}
/**
Get access to the samples data in the audio channel specified by `channelIndex`.
**/
public inline function getChannelView(channelIndex: Int): AudioBufferChannelView {
assert(Error, 0 <= channelIndex && channelIndex < this.numChannels);
return channelViews[channelIndex];
}
/**
Copy and interleave this `AudioBuffer` into the given `target` array.
@param sourceOffset Per-channel position in this `AudioBuffer` from where to start copying and interleaving samples.
@param targetOffset Absolute position in the target array at which to start writing samples.
@param numSamplesToCopy The amount of samples to copy (per channel).
**/
public inline function interleaveToFloat32Array(target: Float32Array, sourceOffset: Int, targetOffset: Int, numSamplesToCopy: Int) {
assert(Error, numSamplesToCopy >= 0);
assert(Error, sourceOffset >= 0);
assert(Error, sourceOffset + numSamplesToCopy <= this.channelLength);
assert(Error, targetOffset >= 0);
assert(Error, targetOffset + numSamplesToCopy * this.numChannels <= target.length);
for (i in 0...numSamplesToCopy) {
for (c in 0...numChannels) {
target[targetOffset + i * numChannels + c] = getChannelView(c)[sourceOffset + i];
}
}
}
/**
Copy and deinterleave the given `source` array into this `AudioBuffer`.
@param source An interleaved array of audio samples.
@param numSourceChannels The amount of channels in the `source` array,
which must be smaller or equal to the amount of channels in this
`AudioBuffer`. The source channels are copied to the `numSourceChannels`
first channels in this `AudioBuffer`.
**/
public inline function deinterleaveFromFloat32Array(source: Float32Array, numSourceChannels: Int) {
assert(Error, numSourceChannels >= 0 && numSourceChannels <= this.numChannels);
assert(Error, source.length >= numSourceChannels * this.channelLength);
for (i in 0...channelLength) {
for (c in 0...numSourceChannels) {
getChannelView(c)[i] = source[i * numSourceChannels + c];
}
}
}
/**
Fill each audio channel in this buffer with zeroes.
**/
public inline function clear() {
clearBuffer(rawData);
}
/**
Copy the samples from `other` into this buffer.
Both buffers must have the same amount of channels
and the same amount of samples per channel.
**/
public inline function copyFromEquallySized(other: AudioBuffer) {
assert(Error, this.numChannels == other.numChannels);
assert(Error, this.channelLength == other.channelLength);
for (i in 0...rawData.length) {
this.rawData[i] = other.rawData[i];
}
}
/**
Copy the samples from `other` into this buffer.
Both buffers must have the same amount of channels, `other` must have
fewer or equal the amount of samples per channel than this buffer.
If `other` has fewer samples per channel than this buffer,
`padWithZeroes` specifies whether the remaining samples in this buffer
should be padded with zeroes (`padWithZeroes` is `true`) or should be
remain unmodified (`padWithZeroes` is `false`).
**/
public inline function copyFromShorterBuffer(other: AudioBuffer, padWithZeroes: Bool) {
assert(Error, this.numChannels == other.numChannels);
assert(Error, this.channelLength >= other.channelLength);
for (c in 0...this.numChannels) {
final thisView = this.getChannelView(c);
final otherView = other.getChannelView(c);
for (i in 0...other.channelLength) {
thisView[i] = otherView[i];
}
if (padWithZeroes) {
for (i in other.channelLength...this.channelLength) {
thisView[i] = 0.0;
}
}
}
}
}
/**
An array-like view on the samples data of an `AudioBuffer` channel.
**/
abstract AudioBufferChannelView(Float32Array) from Float32Array to Float32Array {
public function new(size: Int) {
this = new Float32Array(size);
}
@:arrayAccess
public function get(index: Int): FastFloat {
return this.get(index);
}
@:arrayAccess
public function set(index: Int, value: FastFloat): FastFloat {
return this.set(index, value);
}
}

View File

@ -0,0 +1,93 @@
package aura.types;
import kha.FastFloat;
@:notNull
@:pure
@:unreflective
@:forward(real, imag)
abstract Complex(ComplexImpl) {
public inline function new(real: FastFloat, imag: FastFloat) {
this = new ComplexImpl(real, imag);
}
@:from
public static inline function fromReal(real: FastFloat): Complex {
return new Complex(real, 0.0);
}
public static inline function newZero(): Complex {
return new Complex(0.0, 0.0);
}
public inline function copy(): Complex {
return new Complex(this.real, this.imag);
}
public inline function setZero() {
this.real = this.imag = 0.0;
}
public inline function setFrom(other: Complex) {
this.real = other.real;
this.imag = other.imag;
}
public inline function scale(s: FastFloat): Complex {
return new Complex(this.real * s, this.imag * s);
}
public static inline function exp(w: FastFloat) {
return new Complex(Math.cos(w), Math.sin(w));
}
@:op(A + B)
@:commutative
public inline function add(other: Complex): Complex {
return new Complex(this.real + other.real, this.imag + other.imag);
}
@:op(A - B)
public inline function sub(other: Complex): Complex {
return new Complex(this.real - other.real, this.imag - other.imag);
}
@:op(A * B)
@:commutative
public inline function mult(other: Complex): Complex {
return new Complex(
this.real*other.real - this.imag*other.imag,
this.real*other.imag + this.imag*other.real
);
}
/**
Optimized version of `this * new Complex(0.0, 1.0)`.
**/
public inline function multWithI(): Complex {
return new Complex(-this.imag, this.real);
}
@:op(~A)
public inline function conj(): Complex {
return new Complex(this.real, -this.imag);
}
public inline function equals(other: Complex): Bool {
return this.real == other.real && this.imag == other.imag;
}
}
@:pure
@:notNull
@:unreflective
@:struct
private final class ComplexImpl {
public var real: FastFloat;
public var imag: FastFloat;
public inline function new(real: FastFloat, imag: FastFloat) {
this.real = real;
this.imag = imag;
}
}

View File

@ -0,0 +1,194 @@
package aura.types;
import haxe.ds.Vector;
#if AURA_BACKEND_HL
import kha.FastFloat;
#end
typedef ComplexArrayImpl =
#if AURA_BACKEND_HL
HL_ComplexArrayImpl
#elseif js
JS_ComplexArrayImpl
#else
Vector<Complex>
#end
;
/**
An array of complex numbers.
**/
@:forward(length)
@:unreflective
abstract ComplexArray(ComplexArrayImpl) {
/**
Create a new zero-initialized complex array.
**/
public inline function new(length: Int) {
#if AURA_BACKEND_HL
this = inline HL_ComplexArray.create(length);
#elseif js
this = new JS_ComplexArrayImpl(length);
#else
this = new ComplexArrayImpl(length);
for (i in 0...length) {
this[i] = Complex.newZero();
}
#end
}
#if AURA_BACKEND_HL
public inline function free() {
HL_ComplexArray.free(this);
}
#end
/**
Get the complex number at the given index from the array. Note that it
is _not_ guaranteed that the returned value will be the same object
instance than stored in the array, because the array does not store
instances on every target.
**/
@:arrayAccess
public inline function get(index: Int): Complex {
#if AURA_BACKEND_HL
return HL_ComplexArray.get(this, index);
#elseif js
return JS_ComplexArrayImpl.get(this, index);
#else
return this[index];
#end
}
/**
Set a complex number at the given array index. It is _guaranteed_ that
the given value is copied to the array so that the passed complex object
instance may be kept on the stack if possible.
**/
@:arrayAccess
public inline function set(index: Int, value: Complex): Complex {
#if AURA_BACKEND_HL
return HL_ComplexArray.set(this, index, value);
#elseif js
return JS_ComplexArrayImpl.set(this, index, value);
#else
// Copy to array to keep original value on stack
this[index].setFrom(value);
// It is important to return the element from the array instead of
// the `value` parameter, so that Haxe doesn't create a temporary
// complex object (allocated on the heap in the worst case) to store
// the state of `value` before calling `setFrom()` above...
return this[index];
#end
}
#if js
public inline function subarray(offset: Int, ?length: Int): ComplexArray {
return this.subarray(offset, length);
}
#end
public inline function copy(): ComplexArray {
var ret = new ComplexArray(this.length);
for (i in 0...this.length) {
#if AURA_BACKEND_HL
ret[i].setFrom(HL_ComplexArray.get(this, i));
#elseif js
ret.set(i, ret.get(i));
#else
ret[i] = this[i];
#end
}
return ret;
}
/**
Copy the contents of `other` into this array.
Both arrays must have the same length.
**/
public inline function copyFrom(other: ComplexArray) {
assert(Error, this.length == other.length);
for (i in 0...this.length) {
set(i, other[i]);
}
}
}
#if AURA_BACKEND_HL
private class HL_ComplexArrayImpl {
public var self: hl.Bytes;
public var length: Int;
public inline function new() {}
}
private class HL_ComplexArray {
public static inline function create(length: Int): ComplexArrayImpl {
final impl = new ComplexArrayImpl();
impl.length = length;
if (length > 0) {
impl.self = aura_hl_complex_array_alloc(length);
if (impl.self == null) {
throw 'Could not allocate enough memory for complex array of length ${length}';
}
}
return impl;
}
public static inline function free(impl: ComplexArrayImpl) {
aura_hl_complex_array_free(impl.self);
}
public static inline function get(impl: ComplexArrayImpl, index: Int): Complex {
return aura_hl_complex_array_get(impl.self, index);
}
public static inline function set(impl: ComplexArrayImpl, index: Int, value: Complex): Complex {
return aura_hl_complex_array_set(impl.self, index, value.real, value.imag);
}
@:hlNative("aura_hl", "complex_array_alloc")
static function aura_hl_complex_array_alloc(length: Int): hl.Bytes { return null; }
@:hlNative("aura_hl", "complex_array_free")
static function aura_hl_complex_array_free(complexArray: hl.Bytes): Void {}
@:hlNative("aura_hl", "complex_array_get")
static function aura_hl_complex_array_get(complexArray: hl.Bytes, index: Int): Complex { return Complex.newZero(); }
@:hlNative("aura_hl", "complex_array_set")
static function aura_hl_complex_array_set(complexArray: hl.Bytes, index: Int, real: FastFloat, imag: FastFloat): Complex { return Complex.newZero(); }
}
#end // AURA_BACKEND_HL
#if js
@:forward
private abstract JS_ComplexArrayImpl(js.lib.DataView) {
public var length(get, never): Int;
public inline function get_length(): Int {
return this.byteLength >>> 3;
}
public inline function new(length: Int) {
final buffer = new js.lib.ArrayBuffer(length * 2 * 4);
this = new js.lib.DataView(buffer, 0, buffer.byteLength);
}
public static inline function get(impl: JS_ComplexArrayImpl, index: Int): Complex {
return new Complex(impl.getFloat32(index * 4 * 2), impl.getFloat32((index * 2 + 1) * 4));
}
public static inline function set(impl: JS_ComplexArrayImpl, index: Int, value: Complex): Complex {
impl.setFloat32(index * 2 * 4, value.real);
impl.setFloat32((index * 2 + 1) * 4, value.imag);
return value;
}
public inline function subarray(offset: Int, ?length: Int): ComplexArray {
return cast new js.lib.DataView(this.buffer, offset * 2 * 4, length != null ? length * 2 * 4 : null);
}
}
#end // js

View File

@ -0,0 +1,201 @@
package aura.types;
import haxe.ds.Vector;
import kha.FastFloat;
import kha.arrays.Float32Array;
import aura.utils.BufferUtils;
import aura.utils.MathUtils;
import aura.utils.Pointer;
using aura.utils.ReverseIterator;
/**
The entirety of all fields with their respective HRIRs (head related impulse
responses).
**/
@:structInit class HRTF {
/**
The sample rate of the HRIRs.
**/
public final sampleRate: Int;
/**
The number of channels of the HRIRs.
**/
public final numChannels: Int;
/**
The amount of samples of each HRIR (per channel).
**/
public final hrirSize: Int;
/**
The amount of HRIRs in this HRTF.
**/
public final hrirCount: Int;
/**
The fields of this HRTF.
**/
public final fields: Vector<Field>;
/**
The longest delay of any HRIR contained in this HRTF in samples. Useful
to preallocate enough memory for delay lines (use
`Math.ceil(maxDelayLength)`).
**/
public final maxDelayLength: Float;
/**
Create a bilinearly interpolated HRIR for the given direction (distance
is fixed for now) and store it in `outputBuf`. The length of the HRIR's
impulse response as well as the interpolated delay (in samples) is
stored in `outImpulseLength` and `outDelay`.
@param elevation Elevation (polar) angle from 0 (bottom) to 180 (top).
@param azimuth Azimuthal angle from 0 (front) to 360, clockwise.
**/
public function getInterpolatedHRIR(
elevation: Float, azimuth: Float,
outputBuf: Float32Array, outImpulseLength: Pointer<Int>, outDelay: Pointer<FastFloat>
) {
/**
Used terms in this function:
low/high: the elevations of the closest HRIR below and above the
given elevation
left/right: the azimuths of the closest HRIR left and right to the
given azimuth (the azimuth angle is clockwise, so the directions
left/right are meant from the perspective from the origin)
**/
clearBuffer(outputBuf);
if (azimuth == 360) {
azimuth = 0;
}
// TODO Use fixed distance for now...
final field = this.fields[this.fields.length - 1];
// Elevations don't go all the way around the sphere (only bottom to
// top), so at the top we don't jump to the bottom but stay at the top.
// Also, the indices include the borders of the range, so use -1 for
// calculating the elevationStep.
final elevationStep = 180 / (field.evCount - 1);
final elevationIndexLow = Std.int(elevation / elevationStep);
final elevationIndexHigh = minI(elevationIndexLow + 1, field.evCount - 1);
var elevationWeight = (elevation % elevationStep) / elevationStep;
final elevationHRIROffsetLow = field.evHRIROffsets[elevationIndexLow];
final elevationHRIROffsetHigh = field.evHRIROffsets[elevationIndexHigh];
var delay = 0.0;
var hrirLength = 0;
for (ev in 0...2) {
final elevationIndex = ev == 0 ? elevationIndexLow : elevationIndexHigh;
final elevationHRIROffset = ev == 0 ? elevationHRIROffsetLow : elevationHRIROffsetHigh;
final azimuthStep = 360 / field.azCount[elevationIndex];
final azimuthIndexLeft = Std.int(azimuth / azimuthStep);
var azimuthIndexRight = azimuthIndexLeft + 1;
if (azimuthIndexRight == field.azCount[elevationIndex]) {
azimuthIndexRight = 0;
}
final azimuthWeight = (azimuth % azimuthStep) / azimuthStep;
final hrirLeft = field.hrirs[elevationHRIROffset + azimuthIndexLeft];
final hrirRight = field.hrirs[elevationHRIROffset + azimuthIndexRight];
final evWeight = ev == 0 ? 1 - elevationWeight : elevationWeight;
// Interpolate delay
delay += lerp(hrirLeft.delays[0], hrirRight.delays[0], azimuthWeight) * evWeight;
// Interpolate coefficients
final invWeight = 1 - azimuthWeight;
for (i in 0...outputBuf.length) {
final leftCoeff = i < hrirLeft.coeffs.length ? hrirLeft.coeffs[i] * invWeight : 0.0;
final rightCoeff = i < hrirRight.coeffs.length ? hrirRight.coeffs[i] * azimuthWeight : 0.0;
outputBuf[i] += (leftCoeff + rightCoeff) * evWeight;
}
var maxLength = maxI(hrirLeft.coeffs.length, hrirRight.coeffs.length);
if (maxLength > hrirLength) {
hrirLength = maxLength;
}
}
// Delay is stored in samples relative to the HRTF sample rate, convert
// to current sample rate
final sampleRateFactor = this.sampleRate / Aura.sampleRate;
outDelay.set(delay * sampleRateFactor);
outImpulseLength.set(hrirLength);
}
}
/**
A field represents the entirety of HRIRs (head related impulse responses)
for a given distance to the listener. Imagine this as one layer of a sphere
around the listener.
**/
class Field {
/**
Distance to the listener, in millimeters (in the range 50mm-2500mm).
**/
public var distance: Int;
/**
Total HRIR count (for all elevations combined).
**/
public var hrirCount: Int;
/**
Number of elevations in this field. Elevations start at -90 degrees
(bottom) and go up to 90 degrees.
**/
public var evCount: Int;
/**
Number of azimuths (and HRIRs) per elevation. Azimuths construct a full
circle (360 degrees), starting at the front of the listener and going
clockwise.
**/
public var azCount: Vector<Int>;
/**
The offset into the `hrirs` array per elevation. The stored offset index
starts at the HRIR with azimuth 0 (front of the listener).
**/
public var evHRIROffsets: Vector<Int>;
/**
All HRIRs in this field.
**/
public var hrirs: Vector<HRIR>;
public function new() {}
}
/**
A single HRIR (head related impulse response)
**/
class HRIR {
/**
The impulse response coefficients. If the HRIR is stereo, the
coefficients are interleaved (left/right).
**/
public var coeffs: Float32Array;
/**
Delay of the impulse response per channel in samples.
**/
// TODO: Don't forget to also change this when resampling!
public var delays: Vector<Float>;
public function new() {}
}

View File

@ -0,0 +1,112 @@
package aura.types;
import haxe.ds.Vector;
import kha.arrays.Float32Array;
import aura.Types.AtomicInt;
// TODO: Make generic in some way
@:nullSafety(StrictThreaded)
class SwapBuffer {
static final ROW_COUNT = 2;
public final length: Int;
// https://www.usenix.org/legacy/publications/library/proceedings/usenix02/full_papers/huang/huang_html/node8.html
public final data: Vector<Vector<Float32Array>>;
final readerCount: Vector<AtomicInt>;
final newerBuf: Vector<AtomicInt>;
var latestWriteRow: AtomicInt = 0;
var curWriteBufIdx: AtomicInt = 0;
var curWriteRowIdx: AtomicInt = 0;
var curReadRowIdx: AtomicInt = 0;
public function new(length: Int) {
this.length = length;
this.data = new Vector(ROW_COUNT);
for (i in 0...ROW_COUNT) {
data[i] = new Vector(ROW_COUNT);
for (j in 0...ROW_COUNT) {
data[i][j] = new Float32Array(length);
}
}
this.readerCount = new Vector(ROW_COUNT);
for (i in 0...ROW_COUNT) {
readerCount[i] = 0;
}
this.newerBuf = new Vector(ROW_COUNT);
for (i in 0...ROW_COUNT) {
newerBuf[i] = 0;
}
}
public inline function beginRead() {
curReadRowIdx = latestWriteRow;
#if cpp
readerCount[curReadRowIdx] = AtomicInt.atomicInc(readerCount[curReadRowIdx].toPtr());
#else
readerCount[curReadRowIdx]++;
#end
}
public inline function endRead() {
#if cpp
readerCount[curReadRowIdx] = AtomicInt.atomicDec(readerCount[curReadRowIdx].toPtr());
#else
readerCount[curReadRowIdx]--;
#end
}
public inline function read(dst: Float32Array, dstStart: Int, srcStart: Int, length: Int) {
final bufIdx = newerBuf[curReadRowIdx];
for (i in 0...length) {
dst[dstStart + i] = data[curReadRowIdx][bufIdx][srcStart + i];
}
}
public inline function beginWrite() {
for (i in 0...ROW_COUNT) {
if (readerCount[i] == 0) {
curWriteRowIdx = i;
break;
}
}
// Select the least current row buffer
curWriteBufIdx = 1 - newerBuf[curWriteRowIdx];
}
public inline function endWrite() {
newerBuf[curWriteRowIdx] = curWriteBufIdx;
latestWriteRow = curWriteRowIdx;
}
public inline function write(src: Float32Array, srcStart: Int, dstStart: Int, length: Int = -1) {
if (length == -1) {
length = src.length - srcStart;
}
for (i in srcStart...srcStart + length) {
data[curWriteRowIdx][curWriteBufIdx][dstStart + i] = src[i]; // TODO: Investigate possible memory leaks through allocating
}
}
public inline function writeVecF(src: Vector<Float>, srcStart: Int, dstStart: Int, length: Int = -1) {
if (length == -1) {
length = src.length - srcStart;
}
for (i in srcStart...srcStart + length) {
data[curWriteRowIdx][curWriteBufIdx][dstStart + i] = src[i];
}
}
public inline function writeZero(dstStart: Int, dstEnd: Int) {
for (i in dstStart...dstEnd) {
data[curWriteRowIdx][curWriteBufIdx][i] = 0;
}
}
}

View File

@ -0,0 +1,126 @@
package aura.utils;
import haxe.Exception;
import haxe.PosInfos;
import haxe.exceptions.PosException;
import haxe.macro.Context;
import haxe.macro.Expr;
using haxe.macro.ExprTools;
class Assert {
/**
Checks whether the given expression evaluates to true. If this is not
the case, a `AuraAssertionException` with additional information is
thrown.
The assert level describes the severity of the assertion. If the
severity is lower than the level stored in the `AURA_ASSERT_LEVEL` flag,
the assertion is omitted from the code so that it doesn't decrease the
runtime performance.
@param level The severity of this assertion.
@param condition The conditional expression to test.
@param message Optional message to display when the assertion fails.
**/
public static macro function assert(level: ExprOf<AssertLevel>, condition: ExprOf<Bool>, ?message: ExprOf<String>): Expr {
final levelVal: AssertLevel = AssertLevel.fromExpr(level);
final assertThreshold = AssertLevel.fromString(Context.definedValue("AURA_ASSERT_LEVEL"));
if (levelVal < assertThreshold) {
return macro {};
}
return macro {
if (!$condition) {
#if AURA_ASSERT_QUIT kha.System.stop(); #end
@:pos(condition.pos)
final exception = new aura.utils.Assert.AuraAssertionException($v{condition.toString()}, ${message});
#if AURA_ASSERT_START_DEBUGGER
@:privateAccess aura.utils.Assert.logError(exception.details());
@:privateAccess aura.utils.Assert.logError("An assertion error was triggered, starting debugger...");
aura.utils.Debug.startDebugger();
#else
@:pos(condition.pos)
@:privateAccess aura.utils.Assert.throwAssertionError(exception);
#end
}
};
}
/**
Helper function to prevent Haxe "bug" that actually throws an error
even when using `macro throw` (inlining this method also does not work).
**/
static function throwAssertionError(exp: AuraAssertionException, ?pos: PosInfos) {
throw exp;
}
static function logError(str: String, ?infos: PosInfos) {
#if sys
Sys.stderr().writeString(str + "\n");
#elseif kha_krom
Krom.log(str + "\n");
#elseif kha_js
js.html.Console.error(str);
#else
haxe.Log.trace(str, infos);
#end
}
}
/**
Exception that is thrown when an assertion fails.
@see `Assert`
**/
class AuraAssertionException extends PosException {
/**
@param exprString The string representation of the failed assert condition.
@param message Custom error message, use `null` to omit printing the message.
**/
public function new(exprString: String, message: Null<String>, ?previous: Exception, ?pos: Null<PosInfos>) {
final optMsg = message != null ? '\n\tMessage: $message' : "";
super('\n[Aura] Failed assertion:$optMsg\n\tExpression: ($exprString)', previous, pos);
}
}
enum abstract AssertLevel(Int) from Int to Int {
var Debug: AssertLevel;
var Warning: AssertLevel;
var Error: AssertLevel;
var Critical: AssertLevel;
// Don't use this level in assert() calls!
var NoAssertions: AssertLevel;
public static function fromExpr(e: ExprOf<AssertLevel>): AssertLevel {
switch (e.expr) {
case EConst(CIdent(v)): return fromString(v);
default: throw new Exception('Unsupported expression: $e');
};
}
/**
Converts a string into an `AssertLevel`, the string must be spelled
exactly as the assert level. `null` defaults to `AssertLevel.Critical`.
**/
public static function fromString(s: String): AssertLevel {
return switch (s) {
case "Debug": Debug;
case "Warning": Warning;
case "Error": Error;
case "Critical" | null: Critical;
case "NoAssertions": NoAssertions;
default: throw 'Could not convert "$s" to AssertLevel';
}
}
@:op(A < B) static function lt(a:AssertLevel, b:AssertLevel):Bool;
@:op(A > B) static function gt(a:AssertLevel, b:AssertLevel):Bool;
}

View File

@ -0,0 +1,103 @@
package aura.utils;
import haxe.ds.Vector;
import kha.FastFloat;
import kha.arrays.Float32Array;
inline function fillBuffer(buffer: Float32Array, value: FastFloat, length: Int = -1) {
for (i in 0...(length == -1 ? buffer.length : length)) {
buffer[i] = value;
}
}
inline function clearBuffer(buffer: Float32Array) {
#if hl
hl_fillByteArray(buffer, 0);
#else
fillBuffer(buffer, 0);
#end
}
inline function initZeroesVecI(vector: Vector<Int>) {
#if (haxe_ver >= "4.300")
vector.fill(0);
#else
for (i in 0...vector.length) {
vector[i] = 0;
}
#end
}
inline function initZeroesF64(vector: Vector<Float>) {
#if (haxe_ver >= "4.300")
vector.fill(0);
#else
for (i in 0...vector.length) {
vector[i] = 0;
}
#end
}
inline function initZeroesF32(vector: Vector<FastFloat>) {
#if (haxe_ver >= "4.300")
vector.fill(0);
#else
for (i in 0...vector.length) {
vector[i] = 0;
}
#end
}
/**
Creates an empty integer vector with the given length. It is guaranteed to
be always filled with 0, independent of the target.
**/
inline function createEmptyVecI(length: Int): Vector<Int> {
#if target.static
return new Vector<Int>(length);
#else
// On dynamic targets, vectors hold `null` after creation instead of 0
final vec = new Vector<Int>(length);
inline initZeroesVecI(vec);
return vec;
#end
}
/**
Creates an empty float vector with the given length. It is guaranteed to be
always filled with 0, independent of the target.
**/
inline function createEmptyVecF64(length: Int): Vector<Float> {
#if target.static
return new Vector<Float>(length);
#else
final vec = new Vector<Float>(length);
inline initZeroesF64(vec);
return vec;
#end
}
inline function createEmptyVecF32(length: Int): Vector<FastFloat> {
#if target.static
return new Vector<FastFloat>(length);
#else
final vec = new Vector<FastFloat>(length);
inline initZeroesF32(vec);
return vec;
#end
}
inline function createEmptyF32Array(length: Int): Float32Array {
final out = new Float32Array(length);
#if !js
clearBuffer(out);
#end
return out;
}
#if hl
inline function hl_fillByteArray(a: kha.arrays.ByteArray, byteValue: Int) {
(a.buffer: hl.Bytes).fill(0, a.byteLength, byteValue);
}
#end

View File

@ -0,0 +1,48 @@
package aura.utils;
import kha.FastFloat;
import kha.arrays.Float32Array;
import aura.utils.BufferUtils;
class CircularBuffer {
final data: Float32Array;
var readHead: Int;
var writeHead: Int;
public var length(get, null): Int;
public var delay = 0;
public inline function new(size: Int) {
assert(Warning, size > 0);
this.data = createEmptyF32Array(size);
this.length = size;
this.writeHead = 0;
this.readHead = 1;
}
public inline function setDelay(delaySamples: Int) {
delay = delaySamples;
readHead = writeHead - delaySamples;
if (readHead < 0) {
readHead += length;
}
}
public inline function get_length(): Int {
return data.length;
}
public inline function get(): FastFloat {
return data[readHead];
}
public inline function set(value: FastFloat) {
data[writeHead] = value;
}
public inline function increment() {
if (++readHead >= length) readHead = 0;
if (++writeHead >= length) writeHead = 0;
}
}

View File

@ -0,0 +1,130 @@
package aura.utils;
import kha.Image;
import kha.arrays.Float32Array;
import kha.graphics2.Graphics;
import aura.utils.MathUtils;
using StringTools;
class Debug {
static var id = 0;
public static inline function startDebugger() {
#if js
js.Syntax.code("debugger");
#end
}
/**
Generates GraphViz/dot code to draw the channel tree for debugging. On
html5 this code is copied to the clipboard, on other targets it is
copied to the console but might be cut off (so better use html5 for
that).
**/
public static function debugTreeViz() {
#if AURA_DEBUG
final content = new StringBuf();
content.add("digraph Aura_Tree_Snapshot {\n");
content.add('\tranksep=equally;\n');
content.add('\trankdir=BT;\n');
content.add('\tnode [fontname = "helvetica"];\n');
addTreeToViz(content, Aura.masterChannel);
content.add("}");
copyToClipboard(content.toString());
#else
trace("Please build with 'AURA_DEBUG' flag!");
#end
}
#if AURA_DEBUG
static function addTreeToViz(buf: StringBuf, channelHandle: Handle) {
buf.add('\t${id++} [\n');
buf.add('\t\tshape=plaintext,\n');
buf.add('\t\tlabel=<<table border="1" cellborder="0" style="rounded">\n');
buf.add('\t\t\t<tr><td colspan="2"><b>${Type.getClassName(Type.getClass(channelHandle))}</b></td></tr>\n');
buf.add('\t\t\t<tr><td colspan="2">${Type.getClassName(Type.getClass(@:privateAccess channelHandle.channel))}</td></tr>\n');
buf.add('\t\t\t<hr/>\n');
buf.add('\t\t\t<tr><td><i>Tree level</i></td><td>${@:privateAccess channelHandle.channel.treeLevel}</td></tr>\n');
buf.add('\t\t\t<hr/>\n');
for (key => val in channelHandle.getDebugAttrs()) {
buf.add('\t\t\t<tr><td><i>$key</i></td><td>$val</td></tr>');
}
buf.add('\t\t</table>>\n');
buf.add('\t];\n');
final thisID = id - 1;
if (Std.isOfType(channelHandle, MixChannelHandle)) {
var mixHandle: MixChannelHandle = cast channelHandle;
for (inputHandle in mixHandle.inputHandles) {
final inputID = id;
addTreeToViz(buf, inputHandle);
buf.add('\t${inputID} -> ${thisID};\n');
}
}
}
#end
static function copyToClipboard(text: String) {
#if (kha_html5 || kha_debug_html5)
js.Browser.navigator.clipboard.writeText(text)
.then(
(_) -> { trace("Debug tree code has been copied to clipboard."); },
(err) -> {
trace('Debug tree code could not be copied to clipboard, writing to console instead. Reason: $err');
trace(text);
}
);
#else
trace(text);
#end
}
public static function drawWaveform(buffer: Float32Array, g: Graphics, x: Float, y: Float, w: Float, h: Float) {
g.begin(false);
g.opacity = 1.0;
g.color = kha.Color.fromFloats(0.176, 0.203, 0.223);
g.fillRect(x, y, w, h);
final borderSize = 2;
g.color = kha.Color.fromFloats(0.099, 0.099, 0.099);
g.drawRect(x + borderSize * 0.5, y + borderSize * 0.5, w - borderSize, h - borderSize, borderSize);
g.color = kha.Color.fromFloats(0.898, 0.411, 0.164);
final deinterleavedLength = Std.int(buffer.length / 2);
final numLines = buffer.length - 1;
final stepSize = w / numLines;
final innerHeight = h - 2 * borderSize;
for (c in 0...2) {
if ( c == 1 ) g.color = kha.Color.fromFloats(0.023, 0.443, 0.796);
for (i in 0...deinterleavedLength - 1) {
final idx = i + c * deinterleavedLength;
final y1 = y + borderSize + (1 - clampF(buffer[idx] * 0.5 + 0.5, 0, 1)) * innerHeight;
final y2 = y + borderSize + (1 - clampF(buffer[idx + 1] * 0.5 + 0.5, 0, 1)) * innerHeight;
g.drawLine(x + idx * stepSize, y1, x + (idx + 1) * stepSize, y2);
}
}
g.color = kha.Color.fromFloats(0.023, 0.443, 0.796);
g.opacity = 0.5;
// g.drawLine(x + w / 2, y, x + w / 2, y + h, 2);
g.end();
}
public static function createRenderTarget(w: Int, h: Int): Image {
return Image.createRenderTarget(Std.int(w), Std.int(h), null, NoDepthAndStencil, 1);
}
// #end
}

View File

@ -0,0 +1,22 @@
package aura.utils;
/**
The decibel (dB) is a relative unit of measurement equal to one tenth of a bel (B).
It expresses the ratio of two values of a power or root-power quantity on a logarithmic scale.
The number of decibels is ten times the logarithm to base 10 of the ratio of two power quantities.
A change in power by a factor of 10 corresponds to a 10 dB change in level.
At the half power point an audio circuit or an antenna exhibits an attenuation of approximately 3 dB.
A change in amplitude by a factor of 10 results in a change in power by a factor of 100, which corresponds to a 20 dB change in level.
A change in amplitude ratio by a factor of 2 (equivalently factor of 4 in power change) approximately corresponds to a 6 dB change in level.
**/
class Decibel {
@:pure public static inline function toDecibel(volume: Float): Float {
return 20 * MathUtils.log10(volume);
}
@:pure public static inline function toLinear(db: Float): Float {
return Math.pow(10, db / 20);
}
}

View File

@ -0,0 +1,27 @@
package aura.utils;
import aura.utils.Assert.*;
@:pure inline function frequencyToFactor(freq: Hertz, maxFreq: Hertz): Float {
assert(Debug, freq <= maxFreq);
return freq / maxFreq;
}
@:pure inline function factorToFrequency(factor: Float, maxFreq: Hertz): Hertz {
assert(Debug, 0.0 <= factor && factor <= 1.0);
return Std.int(factor * maxFreq);
}
@:pure inline function sampleRateToMaxFreq(sampleRate: Hertz): Hertz {
return Std.int(sampleRate / 2.0);
}
@:pure inline function msToSamples(sampleRate: Hertz, milliseconds: Millisecond): Int {
return Math.ceil((milliseconds * 0.001) * sampleRate);
}
@:pure inline function samplesToMs(sampleRate: Hertz, samples: Int): Millisecond {
return (samples / sampleRate) * 1000;
}

View File

@ -0,0 +1,53 @@
package aura.utils;
import kha.FastFloat;
import kha.simd.Float32x4;
import aura.types.AudioBuffer.AudioBufferChannelView;
class LinearInterpolator {
public var lastValue: FastFloat;
public var targetValue: FastFloat;
public var currentValue: FastFloat;
public inline function new(targetValue: FastFloat) {
this.targetValue = this.currentValue = this.lastValue = targetValue;
}
public inline function updateLast() {
this.lastValue = this.currentValue = this.targetValue;
}
public inline function getLerpStepSize(numSteps: Int): FastFloat {
return (this.targetValue - this.lastValue) / numSteps;
}
/**
Return a 32x4 SIMD register where each value contains the step size times
its index for efficient usage in `LinearInterpolator.applySIMD32x4()`.
**/
public inline function getLerpStepSizes32x4(numSteps: Int): Float32x4 {
final stepSize = getLerpStepSize(numSteps);
return Float32x4.mul(Float32x4.loadAllFast(stepSize), Float32x4.loadFast(1.0, 2.0, 3.0, 4.0));
}
/**
Applies four consecutive interpolation steps to `samples` (multiplicative)
using Kha's 32x4 SIMD API, starting at index `i`. `stepSizes32x4` must
be a SIMD register filled with `LinearInterpolator.getLerpStepSizes32x4()`.
There is no bound checking in place! It is assumed that 4 samples can
be accessed starting at `i`.
**/
public inline function applySIMD32x4(samples: AudioBufferChannelView, i: Int, stepSizes32x4: Float32x4) {
var rampValues = Float32x4.add(Float32x4.loadAllFast(currentValue), stepSizes32x4);
currentValue = Float32x4.getFast(rampValues, 3);
var signalValues = Float32x4.loadFast(samples[i], samples[i + 1], samples[i + 2], samples[i + 3]);
var res = Float32x4.mul(signalValues, rampValues);
samples[i + 0] = Float32x4.getFast(res, 0);
samples[i + 1] = Float32x4.getFast(res, 1);
samples[i + 2] = Float32x4.getFast(res, 2);
samples[i + 3] = Float32x4.getFast(res, 3);
}
}

View File

@ -0,0 +1,13 @@
package aura.utils;
/**
Merges the contents of `from` into `to` and returns the latter (`to` is
modified).
**/
@:generic
inline function mergeIntoThis<K, V>(to: Map<K, V>, from: Map<K, V>): Map<K, V> {
for (key => val in from) {
to[key] = val;
}
return to;
}

View File

@ -0,0 +1,131 @@
/**
Various math helper functions.
**/
package aura.utils;
import kha.FastFloat;
import aura.math.Vec3;
/** 1.0 / ln(10) in double precision **/
inline var LN10_INV_DOUBLE: Float = 0.43429448190325181666793241674895398318767547607421875;
/** 1.0 / ln(10) in single precision **/
inline var LN10_INV_SINGLE: kha.FastFloat = 0.4342944920063018798828125;
/** 1.0 / e (Euler's number) **/
inline var E_INV: kha.FastFloat = 0.367879441171442321595523770161460867;
@:pure inline function maxI(a: Int, b: Int): Int {
return a > b ? a : b;
}
@:pure inline function minI(a: Int, b: Int): Int {
return a < b ? a : b;
}
@:pure inline function maxF(a: Float, b: Float): Float {
return a > b ? a : b;
}
@:pure inline function minF(a: Float, b: Float): Float {
return a < b ? a : b;
}
@:pure inline function lerp(valA: Float, valB: Float, fac: Float): Float {
return valA * (1 - fac) + valB * fac;
}
@:pure inline function lerpF32(valA: FastFloat, valB: FastFloat, fac: FastFloat): FastFloat {
return valA * (1 - fac) + valB * fac;
}
@:pure inline function clampI(val: Int, min: Int = 0, max: Int = 1): Int {
return maxI(min, minI(max, val));
}
@:pure inline function clampF(val: Float, min: Float = 0.0, max: Float = 1.0): Float {
return maxF(min, minF(max, val));
}
/**
Returns the base-10 logarithm of a number.
**/
@:pure inline function log10(v: Float): Float {
return Math.log(v) * LN10_INV_DOUBLE;
}
/**
Calculate the counterclockwise angle of the rotation of `vecOther` relative
to `vecBase` around the rotation axis of `vecNormal`. All input vectors
*must* be normalized!
**/
@:pure inline function getFullAngleDegrees(vecBase: Vec3, vecOther: Vec3, vecNormal: Vec3): Float {
final dot = vecBase.dot(vecOther);
final det = determinant3x3(vecBase, vecOther, vecNormal);
var radians = Math.atan2(det, dot);
// Move [-PI, 0) to [PI, 2 * PI]
if (radians < 0) {
radians += 2 * Math.PI;
}
return radians * 180 / Math.PI;
}
@:pure inline function determinant3x3(col1: Vec3, col2: Vec3, col3: Vec3): Float {
return (
col1.x * col2.y * col3.z
+ col2.x * col3.y * col1.z
+ col3.x * col1.y * col2.z
- col1.z * col2.y * col3.x
- col2.z * col3.y * col1.x
- col3.z * col1.y * col2.x
);
}
/**
Projects the given point to a plane described by its normal vector. The
origin of the plane is assumed to be at (0, 0, 0).
**/
@:pure inline function projectPointOntoPlane(point: Vec3, planeNormal: Vec3): Vec3 {
return point.sub(planeNormal.mult(planeNormal.dot(point)));
}
@:pure inline function isPowerOf2(val: Int): Bool {
return (val & (val - 1)) == 0;
}
@:pure inline function getNearestIndexF(value: Float, stepSize: Float): Int {
final quotient: Int = Std.int(value / stepSize);
final remainder: Float = value % stepSize;
return (remainder > stepSize / 2) ? (quotient + 1) : (quotient);
}
/**
Calculates the logarithm of base 2 for the given unsigned(!) integer `n`,
which is the position of the most significant bit set.
**/
@:pure inline function log2Unsigned(n: Int): Int {
// TODO: optimize? See https://graphics.stanford.edu/~seander/bithacks.html#IntegerLog
var res = 0;
var tmp = n >>> 1; // Workaround for https://github.com/HaxeFoundation/haxe/issues/10783
while (tmp != 0) {
res++;
tmp >>>= 1;
}
return res;
}
/** Calculates 2^n for a given unsigned integer `n`. **/
@:pure inline function exp2(n: Int): Int {
return 1 << n;
}
@:pure inline function div4(n: Int): Int {
return n >>> 2;
}
@:pure inline function mod4(n: Int): Int {
return n & 3;
}

View File

@ -0,0 +1,37 @@
package aura.utils;
@:generic
class Pointer<T> {
public var value: Null<T>;
public inline function new(value: Null<T> = null) {
set(value);
}
public inline function set(value: Null<T>) {
this.value = value;
}
public inline function get(): Null<T> {
return this.value;
}
/**
Return the pointer's value typed as not-nullable. Use at your own risk.
**/
public inline function getSure(): T {
return @:nullSafety(Off) (this.value: T);
}
}
/**
Workaround for covariance issues when using generics. Use `PointerType<T>`
instead of `Pointer<T>` when using generic pointers as function parameters.
**/
@:generic
typedef PointerType<T> = {
public var value: Null<T>;
public function set(value: Null<T>): Void;
public function get(): Null<T>;
}

View File

@ -0,0 +1,24 @@
package aura.utils;
#if (cpp && AURA_WITH_OPTICK)
@:cppInclude('optick.h')
#end
class Profiler {
public static inline function frame(threadName: String) {
#if (cpp && AURA_WITH_OPTICK)
untyped __cpp__("OPTICK_FRAME({0})", threadName);
#end
}
public static inline function event() {
#if (cpp && AURA_WITH_OPTICK)
untyped __cpp__("OPTICK_EVENT()");
#end
}
public static inline function shutdown() {
#if (cpp && AURA_WITH_OPTICK)
untyped __cpp__("OPTICK_SHUTDOWN()");
#end
}
}

View File

@ -0,0 +1,79 @@
package aura.utils;
import kha.arrays.Float32Array;
import aura.utils.MathUtils;
/**
Various utilities for resampling (i.e. changing the sample rate) of signals.
Terminology used in this class for a resampling process:
- **Source data** describes the data prior to resampling.
- **Target data** describes the resampled data.
**/
class Resampler {
/**
Return the amount of samples required for storing the result of
resampling data with the given `sourceDataLength` to the
`targetSampleRate`.
**/
public static inline function getResampleLength(sourceDataLength: Int, sourceSampleRate: Hertz, targetSampleRate: Hertz): Int {
return Math.ceil(sourceDataLength * (targetSampleRate / sourceSampleRate));
}
/**
Transform a position (in samples) relative to the source's sample rate
into a position (in samples) relative to the target's sample rate and
return the transformed position.
**/
public static inline function sourceSamplePosToTargetPos(sourceSamplePos: Float, sourceSampleRate: Hertz, targetSampleRate: Hertz): Float {
return sourceSamplePos * (targetSampleRate / sourceSampleRate);
}
/**
Transform a position (in samples) relative to the target's sample rate
into a position (in samples) relative to the source's sample rate and
return the transformed position.
**/
public static inline function targetSamplePosToSourcePos(targetSamplePos: Float, sourceSampleRate: Hertz, targetSampleRate: Hertz): Float {
return targetSamplePos * (sourceSampleRate / targetSampleRate);
}
/**
Resample the given `sourceData` from `sourceSampleRate` to
`targetSampleRate` and write the resampled data into `targetData`.
It is expected that
`targetData.length == Resampler.getResampleLength(sourceData.length, sourceSampleRate, targetSampleRate)`,
otherwise this method may fail (there are no safety checks in place)!
**/
public static inline function resampleFloat32Array(sourceData: Float32Array, sourceSampleRate: Hertz, targetData: Float32Array, targetSampleRate: Hertz) {
for (i in 0...targetData.length) {
targetData[i] = sampleAtTargetPositionLerp(sourceData, i, sourceSampleRate, targetSampleRate);
}
}
/**
Sample the given `sourceData` at `targetSamplePos` (position in samples
relative to the target data) using linear interpolation for values
between source samples.
@param sourceSampleRate The sample rate of the source data
@param targetSampleRate The sample rate of the target data
**/
public static function sampleAtTargetPositionLerp(sourceData: Float32Array, targetSamplePos: Float, sourceSampleRate: Hertz, targetSampleRate: Hertz): Float {
assert(Critical, targetSamplePos >= 0.0);
final sourceSamplePos = targetSamplePosToSourcePos(targetSamplePos, sourceSampleRate, targetSampleRate);
final maxPos = sourceData.length - 1;
final pos1 = Math.floor(sourceSamplePos);
final pos2 = pos1 + 1;
final value1 = (pos1 > maxPos) ? sourceData[maxPos] : sourceData[pos1];
final value2 = (pos2 > maxPos) ? sourceData[maxPos] : sourceData[pos2];
return lerp(value1, value2, sourceSamplePos - Math.floor(sourceSamplePos));
}
}

View File

@ -0,0 +1,32 @@
package aura.utils;
/**
Use this as a static extension:
```haxe
using ReverseIterator;
for (i in (0...10).reversed()) {
// Do something...
}
```
**/
inline function reversed(iter: IntIterator, step: Int = 1) {
return @:privateAccess new ReverseIterator(iter.min, iter.max, step);
}
private class ReverseIterator {
var currentIndex: Int;
var end: Int;
var step: Int;
public inline function new(start: Int, end: Int, step: Int) {
this.currentIndex = start;
this.end = end;
this.step = step;
}
public inline function hasNext() return currentIndex > end;
public inline function next() return (currentIndex -= step) + step;
}

View File

@ -0,0 +1,36 @@
// =============================================================================
// Adapted from
// https://code.haxe.org/category/data-structures/step-iterator.html
// =============================================================================
package aura.utils;
/**
Use this as a static extension:
```haxe
using aura.utils.StepIterator;
for (i in (0...10).step(2)) {
// Do something...
}
```
**/
inline function step(iter: IntIterator, step: Int) {
return @:privateAccess new StepIterator(iter.min, iter.max, step);
}
private class StepIterator {
var currentIndex: Int;
final end: Int;
final step: Int;
public inline function new(start: Int, end: Int, step: Int) {
this.currentIndex = start;
this.end = end;
this.step = step;
}
public inline function hasNext() return currentIndex < end;
public inline function next() return (currentIndex += step) - step;
}

View File

@ -0,0 +1,23 @@
package aura.utils;
import kha.arrays.Float32Array;
class TestSignals {
/**
Fill the given `array` with a signal that represents a DC or 0Hz signal.
**/
public static inline function fillDC(array: Float32Array) {
for (i in 0...array.length) {
array[i] = (i == 0) ? 0.0 : 1.0;
}
}
/**
Fill the given `array` with a single unit impulse.
**/
public static inline function fillUnitImpulse(array: Float32Array) {
for (i in 0...array.length) {
array[i] = (i == 0) ? 1.0 : 0.0;
}
}
}

View File

@ -0,0 +1,136 @@
package aura.utils.macro;
import haxe.macro.Context;
import haxe.macro.Expr;
import haxe.macro.Type.ClassType;
/**
This macro implements integer enum types that can extend from others, at the
cost of some limitations.
## Usage
```haxe
@:autoBuild(aura.utils.macro.ExtensibleEnumBuilder.build())
@:build(aura.utils.macro.ExtensibleEnumBuilder.build())
class BaseEnum {
var ABaseEnumValue;
}
class ExtendingEnum extends BaseEnum {
var AnExtendingEnumValue;
}
```
This macro transforms the variables in above example into the static inline
variables `BaseEnum.ABaseEnumValue = 0` and `ExtendingEnum.AnExtendingEnumValue = 1`.
The compiler dump after the macro looks as follows:
```haxe
// BaseEnum.dump
@:used @:autoBuild(aura.utils.macro.ExtensibleEnumBuilder.build()) @:build(aura.utils.macro.ExtensibleEnumBuilder.build())
class BaseEnum {
@:value(0)
public static inline var ABaseEnumValue:Int = 0;
@:value(ABaseEnumValue + 1)
static inline var _SubtypeOffset:Int = 1;
}
// ExtendingEnum.dump
@:used @:build(aura.utils.macro.ExtensibleEnumBuilder.build()) @:autoBuild(aura.utils.macro.ExtensibleEnumBuilder.build())
class ExtendingEnum extends BaseEnum {
@:value(@:privateAccess Main.BaseEnum._SubtypeOffset)
public static inline var AnExtendingEnumValue:Int = 1;
@:value(AnExtendingEnumValue + 1)
static inline var _SubtypeOffset:Int = 2;
}
```
## Limitations
- Only integer types are supported.
- The enums are stored in classes instead of `enum abstract` types.
- Actual values are typed as int, no auto-completion and less intelligent switch/case statements.
- No actual OOP-like inheritance (which wouldn't work with enums since enum inheritance would need to be contravariant).
More importantly, only the values of the variables are extended, but subclassing enums _don't inherit the variables_
of their superclass enums.
- Little complexity and compile time added by using a macro.
**/
class ExtensibleEnumBuilder {
@:persistent static final SUBTYPE_VARNAME = "_SubtypeOffset";
public static macro function build(): Array<Field> {
final fields = Context.getBuildFields();
final newFields = new Array<Field>();
final cls = Context.getLocalClass().get();
final superClass = cls.superClass;
final isExtending = superClass != null;
var lastField: Null<Field> = null;
for (field in fields) {
switch (field.kind) {
case FVar(complexType, expr):
var newExpr: Expr;
if (lastField == null) {
if (isExtending) {
final path = classTypeToStringPath(superClass.t.get());
newExpr = macro @:pos(Context.currentPos()) @:privateAccess ${strPathToExpr(path)}.$SUBTYPE_VARNAME;
}
else {
newExpr = macro 0;
}
}
else {
newExpr = macro $i{lastField.name} + 1;
}
newFields.push({
name: field.name,
access: [APublic, AStatic, AInline],
kind: FVar(complexType, newExpr),
meta: field.meta,
doc: field.doc,
pos: Context.currentPos()
});
lastField = field;
default:
newFields.push(field);
}
}
newFields.push({
name: SUBTYPE_VARNAME,
access: [APrivate, AStatic, AInline],
kind: FVar(macro: Int, lastField != null ? macro $i{lastField.name} + 1 : macro 0),
pos: Context.currentPos()
});
return newFields;
}
static function classTypeToStringPath(classType: ClassType): String {
var moduleName = classType.module.split(".").pop();
final name = moduleName + "." + classType.name;
return classType.pack.length == 0 ? name : classType.pack.join(".") + "." + name;
}
static inline function strPathToExpr(path: String): Expr {
// final pathArray = path.split(".");
// final first = EConst(CIdent(pathArray.shift()));
// var expr = { expr: first, pos: Context.currentPos() };
// for (item in pathArray) {
// expr = { expr: EField(expr, item), pos: Context.currentPos() };
// }
// return expr;
return macro $p{path.split(".")}
}
}

80
lib/aura/Tests/.vscode/tasks.json vendored Normal file
View File

@ -0,0 +1,80 @@
{
// See https://go.microsoft.com/fwlink/?LinkId=733558
// for the documentation about the tasks.json format
"version": "2.0.0",
"tasks": [
{
"label": "Aura: Run unit tests",
"type": "process",
"group": {
"kind": "test",
"isDefault": true
},
"presentation": {
"reveal": "always",
"panel": "dedicated",
},
"command": "node",
"args": [
"run.js"
],
"options": {
"cwd": "${workspaceFolder}",
"env": {
"KHA_PATH": "${command:kha.findKha}",
"ELECTRON_BIN": "${command:kha.findKhaElectron}",
// "ELECTRON_NO_ATTACH_CONSOLE": "true",
}
},
"problemMatcher": [
"$haxe", // Default Haxe matcher
{
// Electron
"owner": "custom",
"fileLocation": ["relative", "${workspaceFolder}"],
"pattern": [
// {
// "regexp": "^\\s*Error:\\s+(.*)\\s*$",
// "message": 1
// },
// {
// "regexp": "^\\s*Stack:\\s*$",
// },
// {
// "regexp": "^\\s*$",
// },
// {
// "regexp": "^\\s*Called from\\s+(.*)\\s+\\(file:\\/\\/\\/(.*)\\s+line\\s+(\\d+)\\s+column\\s+(\\d+)\\)\\s*$",
// "code": 1,
// "file": 2,
// "line": 3,
// "column": 4
// }
{
"regexp": "^\\s*Error:\\s+Uncaught\\s+(.*):(\\d+):\\s+(.*)\\s*$",
"file": 1,
"line": 2,
"message": 3
},
]
},
{
// Also catch exceptions thrown by test failures in nodejs
"owner": "haxe",
"fileLocation": ["relative", "${workspaceFolder}"],
"pattern": [
{
"regexp": "^\\<ref\\s+\\*1\\>\\s+Error:\\s+(.*):(\\d+):(.*)$",
"file": 1,
"line": 2,
"message": 3,
},
]
}
],
},
]
}

View File

@ -0,0 +1,18 @@
electron.ipcMain.on('log-main', (event, type, text) => {
switch (type) {
case "log":
console.log(text);
break;
case "info":
console.info(text);
break;
case "warn":
console.warn(text);
break;
case "error":
console.error(text);
break;
default:
throw "Unreachable";
}
});

View File

@ -0,0 +1,20 @@
<!--
Custom index.html to prevent a wrong layout of test results due to the Kha canvas.
-->
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8"/>
<title>Aura Unit Tests</title>
</head>
<body style="margin: 0; padding: 0;">
<!--
We still need to include the canvas to prevent Kha errors, but it's
at size (0, 0).
-->
<canvas id="khanvas" width="0" height="0" tabindex="-1"></canvas>
<script src="kha.js"></script>
</body>
</html>

96
lib/aura/Tests/Main.hx Normal file
View File

@ -0,0 +1,96 @@
package;
import utest.Runner;
import utest.ui.Report;
#if instrument
import instrument.Instrumentation;
#end
class Main {
static function main() {
kha.System.start({title: "Aura Unit Tests", width: 1024, height: 768}, (window: kha.Window) -> {
replaceConsoleFunctions();
#if (AURA_ASSERT_LEVEL!="Debug")
trace("Warning: Running tests below highest assertion level, some tests might erroneously succeed");
#end
kha.Assets.loadEverything(() -> {
kha.audio2.Audio.samplesPerSecond = 44100;
aura.Aura.init();
var runner = new Runner();
runner.addCases(auratests, true);
// addCases() only allows one class per file (https://github.com/haxe-utest/utest/blob/f759c0aa257aa723b3dd607cf7cb53d16194d13f/src/utest/Runner.hx#L171),
// so we manually add classes here where this is not the case
runner.addCase(new auratests.dsp.TestSparseConvolver.TestSparseImpulseBuffer());
runner.onComplete.add((_) -> {
#if instrument
Instrumentation.endInstrumentation(Coverage);
#end
});
Report.create(runner);
// new utest.ui.text.PrintReport(runner);
runner.run();
});
});
}
/**
In Kha applications, `console.log()` calls called by `trace` are called
from within the renderer process which prevents them from showing up in
the console (instead they only show up in the devtools console).
A possible workaround is to run electron with `--enable-logging`,
but this will show the traces in a bunch of irrelevant and noisy debug
information and on Windows a bunch of terminal windows are opened if
electron is not directly called from the shell. So instead, we send
traces to the main thread and then log them there.
**See:**
- Log in main process/renderer process:
- https://stackoverflow.com/a/31759944/9985959
- Overriding console functions:
- https://stackoverflow.com/a/30197398/9985959
- Electron opening multiple empty terminals on Windows:
- https://github.com/electron/electron/issues/3846
- https://github.com/electron/electron/issues/4582
- https://github.com/electron-userland/spectron/issues/60#issuecomment-482070086
**/
static function replaceConsoleFunctions() {
#if kha_debug_html5
final oldConsole: Dynamic = js.Syntax.code("window.console");
function log(text: Dynamic) {
oldConsole.log(text);
js.Syntax.code("window.electron.logToMainProcess('log', {0})", text);
}
function info(text: Dynamic) {
oldConsole.info(text);
js.Syntax.code("window.electron.logToMainProcess('info', {0})", text);
}
function warn(text: Dynamic) {
oldConsole.warn(text);
js.Syntax.code("window.electron.logToMainProcess('warn', {0})", text);
}
function error(text: Dynamic) {
oldConsole.error(text);
js.Syntax.code("window.electron.logToMainProcess('error', {0})", text);
}
js.Syntax.code("window.console = {log: {0}, info: {1}, warn: {2}, error: {3}}", log, info, warn, error);
#end
}
}

View File

@ -0,0 +1,2 @@
-lib utest:1.13.2
-lib instrument:git:https://github.com/AlexHaxe/haxe-instrument.git#92a5691c7e77a696532e2e13ac1f717841f43015

48
lib/aura/Tests/Utils.hx Normal file
View File

@ -0,0 +1,48 @@
package;
import haxe.PosInfos;
import utest.Assert;
import kha.arrays.Float32Array;
import aura.Aura;
import aura.channels.UncompBufferChannel;
inline function createDummyHandle(): BaseChannelHandle {
final data = new kha.arrays.Float32Array(8);
final channel = new UncompBufferChannel(data, false);
return new BaseChannelHandle(channel);
}
inline function int32ToBytesString(i: Int): String {
var str = "";
for (j in 0...32) {
final mask = 1 << (31 - j);
str += (i & mask) == 0 ? "0" : "1";
}
return str;
}
inline function assertRaisesAssertion(func: Void->Void) {
#if (AURA_ASSERT_LEVEL!="NoAssertions")
Assert.raises(func, aura.utils.Assert.AuraAssertionException);
#else
Assert.pass();
#end
}
function assertEqualsFloat32Array(expected: Float32Array, have: Float32Array, ?pos: PosInfos) {
if (expected.length != have.length) {
Assert.fail('Expected Float32Array of length ${expected.length}, but got length ${have.length}', pos);
return;
}
for (i in 0...expected.length) {
if (!@:privateAccess Assert._floatEquals(expected[i], have[i])) {
Assert.fail('Expected value at index $i to be ${expected[i]}, but got ${have[i]} (only first difference reported)', pos);
return;
}
}
Assert.pass(null, pos);
}

View File

@ -0,0 +1,121 @@
package auratests;
import utest.Assert;
import kha.arrays.Float32Array;
import aura.Aura;
import aura.Types.Hertz;
import aura.types.AudioBuffer;
import aura.utils.BufferUtils;
import Utils;
class StaticValueGenerator extends aura.channels.generators.BaseGenerator {
public var counter = 0;
inline function new() {}
public static function create(): BaseChannelHandle {
return new BaseChannelHandle(new StaticValueGenerator());
}
function nextSamples(requestedSamples: AudioBuffer, sampleRate: Hertz) {
for (i in 0...requestedSamples.channelLength) {
for (c in 0...requestedSamples.numChannels) {
requestedSamples.getChannelView(c)[i] = (++counter) / 4096;
}
}
}
}
@:access(aura.Aura)
class TestAura extends utest.Test {
final staticInput = StaticValueGenerator.create();
function setup() {
staticInput.play();
@:privateAccess (cast staticInput.channel: StaticValueGenerator).counter = 0;
Aura.blockBufPos = 0;
}
function teardown() {
staticInput.setMixChannel(null);
}
function test_audioCallback_zeroIfNoInput() {
final compareArray = createEmptyF32Array(Aura.BLOCK_SIZE);
final buffer = new kha.audio2.Buffer(Aura.BLOCK_SIZE, 2, 44100);
fillBuffer(buffer.data, -1.0); // Poison buffer
Aura.audioCallback(new kha.internal.IntBox(Aura.BLOCK_SIZE), buffer);
assertEqualsFloat32Array(compareArray, buffer.data);
}
function test_audioCallback_zeroIfNoSampleCache() {
final compareArray = createEmptyF32Array(Aura.BLOCK_SIZE);
staticInput.setMixChannel(Aura.masterChannel);
// Force sampleCache to be null
Aura.p_samplesBuffer.set(null);
kha.audio2.Audio.disableGcInteractions = true;
final buffer = new kha.audio2.Buffer(Aura.BLOCK_SIZE, 2, 44100);
fillBuffer(buffer.data, -1.0); // Poison buffer
Aura.audioCallback(new kha.internal.IntBox(Aura.BLOCK_SIZE), buffer);
assertEqualsFloat32Array(compareArray, buffer.data);
}
function test_audioCallback_contiguouslyWritesBlocksToOutput() {
final numRequestedSamples = Aura.BLOCK_SIZE * 2 + 2;
final compareArray = new Float32Array(numRequestedSamples);
for (i in 0...compareArray.length) {
compareArray[i] = (i + 1) / 4096;
}
staticInput.setMixChannel(Aura.masterChannel);
final buffer = new kha.audio2.Buffer(numRequestedSamples, 2, 44100);
fillBuffer(buffer.data, -1.0); // Poison buffer
Aura.audioCallback(new kha.internal.IntBox(numRequestedSamples), buffer);
assertEqualsFloat32Array(compareArray, buffer.data);
}
function test_audioCallback_splitLargeBlockOverMultipleCallbacks() {
final numRequestedSamples = Std.int(Aura.BLOCK_SIZE / 2) - 2;
final compareArray = new Float32Array(3 * numRequestedSamples);
for (i in 0...compareArray.length) {
compareArray[i] = (i + 1) / 4096;
}
staticInput.setMixChannel(Aura.masterChannel);
final buffer = new kha.audio2.Buffer(numRequestedSamples, 2, 44100);
fillBuffer(buffer.data, -1.0); // Poison buffer
Aura.audioCallback(new kha.internal.IntBox(numRequestedSamples), buffer);
assertEqualsFloat32Array(compareArray.subarray(0, numRequestedSamples), buffer.data);
fillBuffer(buffer.data, -1.0); // Poison buffer
Aura.audioCallback(new kha.internal.IntBox(numRequestedSamples), buffer);
assertEqualsFloat32Array(compareArray.subarray(numRequestedSamples, numRequestedSamples * 2), buffer.data);
fillBuffer(buffer.data, -1.0); // Poison buffer
Aura.audioCallback(new kha.internal.IntBox(numRequestedSamples), buffer);
assertEqualsFloat32Array(compareArray.subarray(numRequestedSamples * 2, numRequestedSamples * 3), buffer.data);
}
// TODO
// function test_audioCallback_synchronizesMasterChannel() {}
// function test_audioCallback_updatesTime() {}
// function test_audioCallback_numChannelsOtherThanNUM_OUTPUT_CHANNELS() {
// TODO this needs changes in the audio callback. Too dynamic? But Kha might request this...
// }
}

View File

@ -0,0 +1,95 @@
package auratests;
import utest.Assert;
import aura.Aura;
import aura.Time;
import aura.Listener;
import aura.math.Vec3;
import Utils;
@:access(aura.Listener)
class TestListener extends utest.Test {
var listener: Listener;
function setup() {
listener = new Listener();
}
function teardown() {
Time.overrideTime = null;
}
function test_setLocation_multipleCallsOnFirstTimestep() {
Time.overrideTime = 0.0;
listener.setLocation(new Vec3(0.5, 0.6, 0.7));
Assert.floatEquals(0.5, listener.location.x);
Assert.floatEquals(0.6, listener.location.y);
Assert.floatEquals(0.7, listener.location.z);
Assert.floatEquals(0.0, listener.velocity.x);
Assert.floatEquals(0.0, listener.velocity.y);
Assert.floatEquals(0.0, listener.velocity.z);
Time.overrideTime = 0.0;
listener.setLocation(new Vec3(1.0, 2.0, 3.0));
Assert.floatEquals(1.0, listener.location.x);
Assert.floatEquals(2.0, listener.location.y);
Assert.floatEquals(3.0, listener.location.z);
Assert.floatEquals(0.0, listener.velocity.x);
Assert.floatEquals(0.0, listener.velocity.y);
Assert.floatEquals(0.0, listener.velocity.z);
}
function test_setLocation_firstCall_timeDeltaZero() {
Time.overrideTime = 0.0;
listener.setLocation(new Vec3(0.5, 0.6, 0.7));
Assert.floatEquals(0.5, listener.location.x);
Assert.floatEquals(0.6, listener.location.y);
Assert.floatEquals(0.7, listener.location.z);
Assert.floatEquals(0.0, listener.velocity.x);
Assert.floatEquals(0.0, listener.velocity.y);
Assert.floatEquals(0.0, listener.velocity.z);
}
function test_setLocation_firstCall_timeDeltaPositive() {
Time.overrideTime = 2.0;
listener.setLocation(new Vec3(0.5, 0.6, 0.7));
Assert.floatEquals(0.5, listener.location.x);
Assert.floatEquals(0.6, listener.location.y);
Assert.floatEquals(0.7, listener.location.z);
Assert.floatEquals(0.0, listener.velocity.x);
Assert.floatEquals(0.0, listener.velocity.y);
Assert.floatEquals(0.0, listener.velocity.z);
}
function test_setLocation_subsequentCalls_timeDeltaZero() {
// Regression test for https://github.com/MoritzBrueckner/aura/pull/8
Time.overrideTime = 1.0;
listener.setLocation(new Vec3(0.0, 0.0, 0.0));
Time.overrideTime = 3.0;
listener.setLocation(new Vec3(1.0, 2.0, 3.0));
Time.overrideTime = 3.0;
listener.setLocation(new Vec3(2.0, 4.0, 6.0));
Assert.floatEquals(2.0, listener.location.x);
Assert.floatEquals(4.0, listener.location.y);
Assert.floatEquals(6.0, listener.location.z);
// Compute velocity based on timestep 1.0
Assert.floatEquals(1.0, listener.velocity.x);
Assert.floatEquals(2.0, listener.velocity.y);
Assert.floatEquals(3.0, listener.velocity.z);
}
}

View File

@ -0,0 +1,95 @@
package auratests.channels;
import utest.Assert;
import aura.Aura;
import aura.channels.MixChannel;
import aura.channels.UncompBufferResamplingChannel;
class TestBaseChannelHandle extends utest.Test {
var handle: BaseChannelHandle;
var channel: UncompBufferResamplingChannel;
var data = new kha.arrays.Float32Array(8);
function setup() {
channel = new UncompBufferResamplingChannel(data, false, 44100);
handle = new BaseChannelHandle(channel);
}
function teardown() {}
function test_setMixChannelAddsInputIfNotYetExisting() {
final handle1 = new MixChannelHandle(new MixChannel());
final handle2 = new MixChannelHandle(new MixChannel());
Assert.equals(0, handle2.getNumInputs());
Assert.isTrue(handle1.setMixChannel(handle2));
Assert.equals(1, handle2.getNumInputs());
}
function test_setMixChannelDoesntAddAlreadyExistingInput() {
final handle1 = new MixChannelHandle(new MixChannel());
final handle2 = new MixChannelHandle(new MixChannel());
Assert.isTrue(handle1.setMixChannel(handle2));
Assert.equals(1, handle2.getNumInputs());
Assert.isTrue(handle1.setMixChannel(handle2));
Assert.equals(1, handle2.getNumInputs());
}
function test_setMixChannelNullRemovesInputIfExisting() {
final handle1 = new MixChannelHandle(new MixChannel());
final handle2 = new MixChannelHandle(new MixChannel());
Assert.equals(0, handle2.getNumInputs());
Assert.isTrue(handle1.setMixChannel(null));
Assert.equals(0, handle2.getNumInputs());
Assert.isTrue(handle1.setMixChannel(handle2));
Assert.equals(1, handle2.getNumInputs());
Assert.isTrue(handle1.setMixChannel(null));
Assert.equals(0, handle2.getNumInputs());
}
function test_setMixChannelSwitchingMixChannelCorrectlyChangesInputs() {
final handle1 = new MixChannelHandle(new MixChannel());
final handle2 = new MixChannelHandle(new MixChannel());
final handle3 = new MixChannelHandle(new MixChannel());
Assert.equals(0, handle2.getNumInputs());
Assert.equals(0, handle3.getNumInputs());
Assert.isTrue(handle1.setMixChannel(handle2));
Assert.equals(1, handle2.getNumInputs());
Assert.equals(0, handle3.getNumInputs());
Assert.isTrue(handle1.setMixChannel(handle3));
Assert.equals(0, handle2.getNumInputs());
Assert.equals(1, handle3.getNumInputs());
}
function test_setMixChannelSelfReferenceReturnsFalseAndRemovesInput() {
final handle1 = new MixChannelHandle(new MixChannel());
final handle2 = new MixChannelHandle(new MixChannel());
Assert.isTrue(handle1.setMixChannel(handle2));
Assert.equals(1, handle2.getNumInputs());
Assert.isFalse(handle1.setMixChannel(handle1));
Assert.equals(0, handle2.getNumInputs());
}
function test_setMixChannelCircularDependencyReturnsFalseAndRemovesInput() {
final handle1 = new MixChannelHandle(new MixChannel());
final handle2 = new MixChannelHandle(new MixChannel());
final handle3 = new MixChannelHandle(new MixChannel());
final handle4 = new MixChannelHandle(new MixChannel());
Assert.isTrue(handle3.setMixChannel(handle4));
Assert.isTrue(handle1.setMixChannel(handle2));
Assert.isTrue(handle2.setMixChannel(handle3));
Assert.isFalse(handle3.setMixChannel(handle1));
Assert.equals(0, handle4.getNumInputs());
}
}

View File

@ -0,0 +1,41 @@
package auratests.channels;
import utest.Assert;
import aura.channels.MixChannel;
@:access(aura.channels.MixChannel)
class TestMixChannel extends utest.Test {
var mixChannel: MixChannel;
var mixChannelHandle: MixChannelHandle;
function setupClass() {}
function setup() {
mixChannel = new MixChannel();
mixChannelHandle = new MixChannelHandle(mixChannel);
}
function teardown() {}
function test_startUnpausedAndUnfinished() {
// Regression test for https://github.com/MoritzBrueckner/aura/issues/7
final inputHandle = new MixChannelHandle(new MixChannel());
Assert.isFalse(mixChannel.paused);
Assert.isFalse(mixChannel.finished);
}
function test_isNotPlayable_ifNoInputChannelExists() {
Assert.isFalse(mixChannel.isPlayable());
}
function test_isPlayable_ifInputChannelExists() {
final inputHandle = new MixChannelHandle(new MixChannel());
inputHandle.setMixChannel(mixChannelHandle);
Assert.isTrue(mixChannel.isPlayable());
}
}

View File

@ -0,0 +1,218 @@
package auratests.channels;
import utest.Assert;
import kha.arrays.Float32Array;
import aura.Types.Balance;
import aura.channels.UncompBufferChannel;
import aura.dsp.sourcefx.SourceEffect;
import aura.types.AudioBuffer;
@:access(aura.channels.UncompBufferChannel)
class TestUncompBufferChannel extends utest.Test {
static inline var channelLength = 16;
var audioChannel: UncompBufferChannel;
var sourceFX1: SourceFXDummy;
var sourceFX2: SourceFXDummy;
final data = new Float32Array(2 * channelLength);
function setupClass() {}
function setup() {
audioChannel = new UncompBufferChannel(data, false);
sourceFX1 = new SourceFXDummy();
sourceFX2 = new SourceFXDummy();
audioChannel.addSourceEffect(sourceFX1);
audioChannel.addSourceEffect(sourceFX2);
}
function teardown() {}
function test_optionallyApplySourceEffects_isAppliedOnFirstPlay_ifNoEffectIsConfiguredToApplyOnReplay() {
sourceFX1.applyOnReplay.store(false);
sourceFX2.applyOnReplay.store(false);
Assert.isFalse(sourceFX1.wasProcessCalled);
Assert.isFalse(sourceFX2.wasProcessCalled);
audioChannel.play(false);
Assert.isTrue(sourceFX1.wasProcessCalled);
Assert.isTrue(sourceFX2.wasProcessCalled);
}
function test_optionallyApplySourceEffects_isAppliedOnFirstPlay_ifAnyEffectIsConfiguredToApplyOnReplay() {
sourceFX1.applyOnReplay.store(false);
sourceFX2.applyOnReplay.store(true);
Assert.isFalse(sourceFX1.wasProcessCalled);
Assert.isFalse(sourceFX2.wasProcessCalled);
audioChannel.play(false);
Assert.isTrue(sourceFX1.wasProcessCalled);
Assert.isTrue(sourceFX2.wasProcessCalled);
}
function test_optionallyApplySourceEffects_isNotAppliedOnSecondPlayAfterFinish_ifNoEffectIsConfiguredToApplyOnReplay() {
sourceFX1.applyOnReplay.store(false);
sourceFX2.applyOnReplay.store(false);
audioChannel.play(false);
audioChannel.stop();
sourceFX1.wasProcessCalled = false;
sourceFX2.wasProcessCalled = false;
audioChannel.play(false);
Assert.isFalse(sourceFX1.wasProcessCalled);
Assert.isFalse(sourceFX2.wasProcessCalled);
}
function test_optionallyApplySourceEffects_isAppliedOnSecondPlayAfterFinish_ifAnyEffectIsConfiguredToApplyOnReplay() {
sourceFX1.applyOnReplay.store(false);
sourceFX2.applyOnReplay.store(true);
audioChannel.play(false);
audioChannel.stop();
sourceFX1.wasProcessCalled = false;
sourceFX2.wasProcessCalled = false;
audioChannel.play(false);
Assert.isTrue(sourceFX1.wasProcessCalled);
Assert.isTrue(sourceFX2.wasProcessCalled);
}
function test_optionallyApplySourceEffects_isNotAppliedOnPlayAfterPause_ifNoEffectIsConfiguredToApplyOnReplay() {
sourceFX1.applyOnReplay.store(false);
sourceFX2.applyOnReplay.store(false);
audioChannel.play(false);
audioChannel.pause();
sourceFX1.wasProcessCalled = false;
sourceFX2.wasProcessCalled = false;
audioChannel.play(false);
Assert.isFalse(sourceFX1.wasProcessCalled);
Assert.isFalse(sourceFX2.wasProcessCalled);
}
function test_optionallyApplySourceEffects_isNotAppliedOnPlayAfterPause_ifAnyEffectIsConfiguredToApplyOnReplay() {
sourceFX1.applyOnReplay.store(false);
sourceFX2.applyOnReplay.store(true);
audioChannel.play(false);
audioChannel.pause();
sourceFX1.wasProcessCalled = false;
sourceFX2.wasProcessCalled = false;
audioChannel.play(false);
Assert.isFalse(sourceFX1.wasProcessCalled);
Assert.isFalse(sourceFX2.wasProcessCalled);
}
function test_optionallyApplySourceEffects_isNotAppliedOnRetrigger_ifNoEffectIsConfiguredToApplyOnReplay() {
sourceFX1.applyOnReplay.store(false);
sourceFX2.applyOnReplay.store(false);
audioChannel.play(false);
sourceFX1.wasProcessCalled = false;
sourceFX2.wasProcessCalled = false;
audioChannel.play(true);
Assert.isFalse(sourceFX1.wasProcessCalled);
Assert.isFalse(sourceFX2.wasProcessCalled);
}
function test_optionallyApplySourceEffects_isAppliedOnRetrigger_ifAnyEffectIsConfiguredToApplyOnReplay() {
sourceFX1.applyOnReplay.store(false);
sourceFX2.applyOnReplay.store(true);
audioChannel.play(false);
sourceFX1.wasProcessCalled = false;
sourceFX2.wasProcessCalled = false;
audioChannel.play(true);
Assert.isTrue(sourceFX1.wasProcessCalled);
Assert.isTrue(sourceFX2.wasProcessCalled);
}
function test_optionallyApplySourceEffects_isAppliedOnConsecutivePlays_ifEffectsHaveChanged() {
sourceFX1.applyOnReplay.store(false);
sourceFX2.applyOnReplay.store(false);
audioChannel.play(false);
audioChannel.stop();
sourceFX1.wasProcessCalled = false;
sourceFX2.wasProcessCalled = false;
final tempSourceFX = new SourceFXDummy();
audioChannel.addSourceEffect(tempSourceFX);
audioChannel.play(false);
Assert.isTrue(sourceFX1.wasProcessCalled);
Assert.isTrue(sourceFX2.wasProcessCalled);
audioChannel.stop();
sourceFX1.wasProcessCalled = false;
sourceFX2.wasProcessCalled = false;
audioChannel.removeSourceEffect(tempSourceFX);
audioChannel.play(false);
Assert.isTrue(sourceFX1.wasProcessCalled);
Assert.isTrue(sourceFX2.wasProcessCalled);
}
function test_nextSamples_onLoop_ApplySourceEffectsOnce() {
audioChannel.looping = true;
Assert.equals(0, sourceFX1.numProcessCalled);
Assert.equals(0, sourceFX2.numProcessCalled);
final outBuffer = new AudioBuffer(2, channelLength + 1);
audioChannel.nextSamples(outBuffer, 1000);
// Make sure process is only called once for _all_ channels
Assert.equals(1, sourceFX1.numProcessCalled);
Assert.equals(1, sourceFX2.numProcessCalled);
}
}
private class SourceFXDummy extends SourceEffect {
public var wasProcessCalled = false;
public var numProcessCalled = 0;
public function new() {}
function calculateRequiredChannelLength(srcChannelLength: Int): Int {
return srcChannelLength;
}
function process(srcBuffer: AudioBuffer, srcChannelLength: Int, dstBuffer: AudioBuffer): Int {
wasProcessCalled = true;
numProcessCalled++;
return srcChannelLength;
}
}

View File

@ -0,0 +1,126 @@
package auratests.channels;
import utest.Assert;
import kha.arrays.Float32Array;
import aura.Types.Balance;
import aura.channels.UncompBufferResamplingChannel;
import aura.dsp.sourcefx.SourceEffect;
import aura.types.AudioBuffer;
@:access(aura.channels.UncompBufferResamplingChannel)
class TestUncompBufferResamplingChannel extends utest.Test {
static inline var channelLength = 16;
var audioChannel: UncompBufferResamplingChannel;
final rampLeft = new Array<Float>();
final rampRight = new Array<Float>();
final data = new Float32Array(2 * channelLength); // interleaved stereo
function setupClass() {
rampLeft.resize(channelLength);
rampRight.resize(channelLength);
for (i in 0...channelLength) { // Fill data with a value ramp
final val = (i + 1) / channelLength;
data[i * 2 + 0] = rampLeft[i] = val;
data[i * 2 + 1] = rampRight[i] = -val;
}
}
function setup() {
audioChannel = new UncompBufferResamplingChannel(data, false, 1000);
}
function teardown() {}
function test_dataConversion() {
for (i in 0...channelLength) {
Assert.floatEquals(rampLeft[i], audioChannel.data.getChannelView(0)[i]);
Assert.floatEquals(rampRight[i], audioChannel.data.getChannelView(1)[i]);
}
}
function test_nextSamples() {
final outBuffer = new AudioBuffer(2, channelLength);
audioChannel.nextSamples(outBuffer, 1000);
for (i in 0...channelLength) {
Assert.floatEquals(rampLeft[i], outBuffer.getChannelView(0)[i]);
Assert.floatEquals(rampRight[i], outBuffer.getChannelView(1)[i]);
}
// Now the channel has processed all data and will reset position to 0
Assert.equals(0, audioChannel.playbackPosition);
Assert.floatEquals(0.0, audioChannel.floatPosition);
// No looping, but request more samples
final longOutBuffer = new AudioBuffer(2, channelLength + 4);
audioChannel.nextSamples(longOutBuffer, 1000);
for (i in 0...channelLength) {
Assert.floatEquals(rampLeft[i], longOutBuffer.getChannelView(0)[i]);
Assert.floatEquals(rampRight[i], longOutBuffer.getChannelView(1)[i]);
}
for (i in channelLength...channelLength + 4) {
Assert.floatEquals(0.0, longOutBuffer.getChannelView(0)[i]);
Assert.floatEquals(0.0, longOutBuffer.getChannelView(1)[i]);
}
// Now change the sample rate, second half should be zero
audioChannel.playbackPosition = 0;
audioChannel.floatPosition = 0.0;
audioChannel.nextSamples(outBuffer, 500);
for (i in Std.int(channelLength / 2)...channelLength) {
Assert.floatEquals(0.0, outBuffer.getChannelView(0)[i]);
Assert.floatEquals(0.0, outBuffer.getChannelView(1)[i]);
}
// Now with looping
audioChannel.playbackPosition = 0;
audioChannel.floatPosition = 0.0;
audioChannel.looping = true;
audioChannel.nextSamples(outBuffer, 500);
final halfChannelLength = Std.int(channelLength / 2);
for (i in 0...halfChannelLength) {
Assert.floatEquals(outBuffer.getChannelView(0)[i], outBuffer.getChannelView(0)[halfChannelLength + i], null, '$i');
Assert.floatEquals(outBuffer.getChannelView(1)[i], outBuffer.getChannelView(1)[halfChannelLength + i], null, '$i');
}
// TODO: check sample precise looping without gaps with unusual sample rates?
}
function test_nextSamples_onLoop_ApplySourceEffectsOnce() {
audioChannel.looping = true;
final sourceFX = new SourceFXDummy();
audioChannel.addSourceEffect(sourceFX);
Assert.equals(0, sourceFX.numProcessCalled);
final outBuffer = new AudioBuffer(2, channelLength + 1);
audioChannel.nextSamples(outBuffer, 1000);
// Make sure process is only called once for _all_ channels
Assert.equals(1, sourceFX.numProcessCalled);
}
}
private class SourceFXDummy extends SourceEffect {
public var numProcessCalled = 0;
public function new() {}
function calculateRequiredChannelLength(srcChannelLength: Int): Int {
return srcChannelLength;
}
function process(srcBuffer: AudioBuffer, srcChannelLength: Int, dstBuffer: AudioBuffer): Int {
numProcessCalled++;
return srcChannelLength;
}
}

View File

@ -0,0 +1,126 @@
package auratests.dsp;
import utest.Assert;
import kha.arrays.Float32Array;
import aura.Aura;
import aura.dsp.FFTConvolver;
import aura.types.AudioBuffer;
import aura.types.Complex;
import aura.utils.MathUtils;
import aura.utils.TestSignals;
@:access(aura.dsp.FFTConvolver)
class TestFFTConvolver extends utest.Test {
var audioBuffer: AudioBuffer;
var fftConvolver: FFTConvolver;
function setup() {
audioBuffer = new AudioBuffer(2, FFTConvolver.FFT_SIZE);
fftConvolver = new FFTConvolver();
}
function test_process_noFadeIfTemporalInterpLengthIsZero() {
fftConvolver.temporalInterpolationLength = 0;
for (i in 0...audioBuffer.channelLength) {
audioBuffer.getChannelView(0)[i] = Math.sin(i * 4 * Math.PI / audioBuffer.channelLength);
audioBuffer.getChannelView(1)[i] = Math.sin(i * 4 * Math.PI / audioBuffer.channelLength);
}
setImpulseFreqsToConstant(new Complex(1.0, 0.0));
fftConvolver.process(audioBuffer);
discardOverlapForNextProcess();
for (i in 0...FFTConvolver.FFT_SIZE) {
Assert.floatEquals(Math.sin(i * 4 * Math.PI / audioBuffer.channelLength), audioBuffer.getChannelView(0)[i]);
Assert.floatEquals(Math.sin(i * 4 * Math.PI / audioBuffer.channelLength), audioBuffer.getChannelView(1)[i]);
}
setImpulseFreqsToConstant(new Complex(0.0, 0.0));
fftConvolver.process(audioBuffer);
for (i in 0...FFTConvolver.FFT_SIZE) {
Assert.floatEquals(0, audioBuffer.getChannelView(0)[i]);
Assert.floatEquals(0, audioBuffer.getChannelView(1)[i]);
}
}
function test_process_crossfadeIfTemporalInterpLengthIsLargerZero() {
fftConvolver.temporalInterpolationLength = 20;
for (i in 0...audioBuffer.channelLength) {
audioBuffer.getChannelView(0)[i] = Math.sin(i * 4 * Math.PI / audioBuffer.channelLength);
audioBuffer.getChannelView(1)[i] = Math.sin(i * 4 * Math.PI / audioBuffer.channelLength);
}
setImpulseFreqsToConstant(new Complex(1.0, 0.0));
fftConvolver.process(audioBuffer);
discardOverlapForNextProcess();
for (i in 0...FFTConvolver.FFT_SIZE) {
final t = minF(i, fftConvolver.temporalInterpolationLength) / fftConvolver.temporalInterpolationLength;
Assert.floatEquals(lerp(0.0, Math.sin(i * 4 * Math.PI / audioBuffer.channelLength), t), audioBuffer.getChannelView(0)[i]);
Assert.floatEquals(lerp(0.0, Math.sin(i * 4 * Math.PI / audioBuffer.channelLength), t), audioBuffer.getChannelView(1)[i]);
}
for (i in 0...audioBuffer.channelLength) {
audioBuffer.getChannelView(0)[i] = Math.sin(i * 8 * Math.PI / audioBuffer.channelLength);
audioBuffer.getChannelView(1)[i] = Math.sin(i * 8 * Math.PI / audioBuffer.channelLength);
}
setImpulseFreqsToConstant(new Complex(0.0, 0.0));
fftConvolver.process(audioBuffer);
for (i in 0...FFTConvolver.FFT_SIZE) {
final t = minF(i, fftConvolver.temporalInterpolationLength) / fftConvolver.temporalInterpolationLength;
Assert.floatEquals(lerp(Math.sin(i * 8 * Math.PI / audioBuffer.channelLength), 0.0, t), audioBuffer.getChannelView(0)[i]);
Assert.floatEquals(lerp(Math.sin(i * 8 * Math.PI / audioBuffer.channelLength), 0.0, t), audioBuffer.getChannelView(1)[i]);
}
}
function test_process_crossfadeEntireChunkSize() {
fftConvolver.temporalInterpolationLength = -1;
for (i in 0...audioBuffer.channelLength) {
audioBuffer.getChannelView(0)[i] = Math.sin(i * 4 * Math.PI / audioBuffer.channelLength);
audioBuffer.getChannelView(1)[i] = Math.sin(i * 4 * Math.PI / audioBuffer.channelLength);
}
setImpulseFreqsToConstant(new Complex(1.0, 0.0));
fftConvolver.process(audioBuffer);
discardOverlapForNextProcess();
for (i in 0...FFTConvolver.FFT_SIZE) {
final t = minF(i, FFTConvolver.CHUNK_SIZE) / FFTConvolver.CHUNK_SIZE;
Assert.floatEquals(lerp(0.0, Math.sin(i * 4 * Math.PI / audioBuffer.channelLength), t), audioBuffer.getChannelView(0)[i]);
Assert.floatEquals(lerp(0.0, Math.sin(i * 4 * Math.PI / audioBuffer.channelLength), t), audioBuffer.getChannelView(1)[i]);
}
for (i in 0...audioBuffer.channelLength) {
audioBuffer.getChannelView(0)[i] = Math.sin(i * 8 * Math.PI / audioBuffer.channelLength);
audioBuffer.getChannelView(1)[i] = Math.sin(i * 8 * Math.PI / audioBuffer.channelLength);
}
setImpulseFreqsToConstant(new Complex(0.0, 0.0));
fftConvolver.process(audioBuffer);
for (i in 0...FFTConvolver.FFT_SIZE) {
final t = minF(i, FFTConvolver.CHUNK_SIZE) / FFTConvolver.CHUNK_SIZE;
Assert.floatEquals(lerp(Math.sin(i * 8 * Math.PI / audioBuffer.channelLength), 0.0, t), audioBuffer.getChannelView(0)[i]);
Assert.floatEquals(lerp(Math.sin(i * 8 * Math.PI / audioBuffer.channelLength), 0.0, t), audioBuffer.getChannelView(1)[i]);
}
}
function setImpulseFreqsToConstant(value: Complex) {
for (i in 0...FFTConvolver.FFT_SIZE) {
fftConvolver.impulseFFT.getOutput(0 + fftConvolver.currentImpulseAlternationIndex)[i] = value;
fftConvolver.impulseFFT.getOutput(2 + fftConvolver.currentImpulseAlternationIndex)[i] = value;
}
fftConvolver.currentImpulseAlternationIndex = 1 - fftConvolver.currentImpulseAlternationIndex;
fftConvolver.overlapLength[0] = FFTConvolver.CHUNK_SIZE;
fftConvolver.overlapLength[1] = FFTConvolver.CHUNK_SIZE;
fftConvolver.prevImpulseLengths[0] = FFTConvolver.CHUNK_SIZE;
fftConvolver.prevImpulseLengths[1] = FFTConvolver.CHUNK_SIZE;
}
function discardOverlapForNextProcess() {
for (c in 0...FFTConvolver.NUM_CHANNELS) {
for (i in 0...fftConvolver.overlapPrev[c].length) {
fftConvolver.overlapPrev[c][i] = 0.0;
}
}
}
}

View File

@ -0,0 +1,67 @@
package auratests.dsp;
import utest.Assert;
import aura.Aura;
import aura.dsp.FractionalDelayLine;
import aura.types.AudioBuffer;
import aura.utils.TestSignals;
@:access(aura.dsp.FractionalDelayLine)
class TestFractionalDelayLine extends utest.Test {
var audioBuffer: AudioBuffer;
var delayLine: FractionalDelayLine;
function setup() {
audioBuffer = new AudioBuffer(2, 8);
delayLine = new FractionalDelayLine(2, 8);
}
function test_zeroDelayTime_noDelay() {
TestSignals.fillUnitImpulse(audioBuffer.getChannelView(0));
TestSignals.fillUnitImpulse(audioBuffer.getChannelView(1));
delayLine.at_setDelayLength(Left, 0.0);
delayLine.at_setDelayLength(Right, 0.0);
delayLine.process(audioBuffer);
Assert.floatEquals(1.0, audioBuffer.getChannelView(0)[0]);
Assert.floatEquals(0.0, audioBuffer.getChannelView(0)[1]);
Assert.floatEquals(1.0, audioBuffer.getChannelView(1)[0]);
Assert.floatEquals(0.0, audioBuffer.getChannelView(1)[1]);
}
function test_integralDelayTime_independentChannels() {
TestSignals.fillUnitImpulse(audioBuffer.getChannelView(0));
TestSignals.fillUnitImpulse(audioBuffer.getChannelView(1));
delayLine.at_setDelayLength(Left, 1.0);
delayLine.at_setDelayLength(Right, 3.0);
delayLine.process(audioBuffer);
Assert.floatEquals(0.0, audioBuffer.getChannelView(0)[0]);
Assert.floatEquals(1.0, audioBuffer.getChannelView(0)[1]);
Assert.floatEquals(0.0, audioBuffer.getChannelView(1)[0]);
Assert.floatEquals(1.0, audioBuffer.getChannelView(1)[3]);
}
function test_floatDelayTime_independentChannels() {
TestSignals.fillUnitImpulse(audioBuffer.getChannelView(0));
TestSignals.fillUnitImpulse(audioBuffer.getChannelView(1));
delayLine.at_setDelayLength(Left, 0.8);
delayLine.at_setDelayLength(Right, 3.4);
delayLine.process(audioBuffer);
Assert.floatEquals(0.2, audioBuffer.getChannelView(0)[0]);
Assert.floatEquals(0.8, audioBuffer.getChannelView(0)[1]);
Assert.floatEquals(0.6, audioBuffer.getChannelView(1)[3]);
Assert.floatEquals(0.4, audioBuffer.getChannelView(1)[4]);
}
}

View File

@ -0,0 +1,102 @@
package auratests.dsp;
import utest.Assert;
import kha.arrays.Float32Array;
import aura.Aura;
import aura.dsp.SparseConvolver;
import aura.types.AudioBuffer;
import aura.utils.TestSignals;
@:access(aura.dsp.SparseConvolver)
class TestSparseConvolver extends utest.Test {
var audioBuffer: AudioBuffer;
var sparseConvolver: SparseConvolver;
function setup() {
audioBuffer = new AudioBuffer(2, 512);
sparseConvolver = new SparseConvolver(1, 4);
}
function test_simpleDelay() {
for (i in 0...audioBuffer.channelLength) {
audioBuffer.getChannelView(0)[i] = Math.sin(i * 2 * Math.PI / audioBuffer.channelLength);
audioBuffer.getChannelView(1)[i] = Math.cos(i * 2 * Math.PI / audioBuffer.channelLength);
}
final impulse = sparseConvolver.impulseBuffer;
impulse.setImpulsePos(0, 3);
impulse.setImpulseMagnitude(0, 1.0);
sparseConvolver.process(audioBuffer);
final wanted = new AudioBuffer(2, audioBuffer.channelLength);
for (i in 0...wanted.channelLength) {
wanted.getChannelView(0)[i] = Math.sin((i - 3) * 2 * Math.PI / wanted.channelLength);
wanted.getChannelView(1)[i] = Math.cos((i - 3) * 2 * Math.PI / wanted.channelLength);
}
for (i in 0...3) {
Assert.floatEquals(0, audioBuffer.getChannelView(0)[i]);
Assert.floatEquals(0, audioBuffer.getChannelView(1)[i]);
}
for (i in 3...wanted.channelLength) {
Assert.floatEquals(wanted.getChannelView(0)[i], audioBuffer.getChannelView(0)[i]);
Assert.floatEquals(wanted.getChannelView(1)[i], audioBuffer.getChannelView(1)[i]);
}
// Overlap
audioBuffer.clear();
sparseConvolver.process(audioBuffer);
for (i in 0...3) {
Assert.floatEquals(wanted.getChannelView(0)[i], audioBuffer.getChannelView(0)[i]);
Assert.floatEquals(wanted.getChannelView(1)[i], audioBuffer.getChannelView(1)[i]);
}
for (i in 3...wanted.channelLength) {
Assert.floatEquals(0, audioBuffer.getChannelView(0)[i]);
Assert.floatEquals(0, audioBuffer.getChannelView(1)[i]);
}
}
}
@:access(aura.dsp.SparseConvolver.SparseImpulseBuffer)
class TestSparseImpulseBuffer extends utest.Test {
var buffer: SparseImpulseBuffer;
function setup() {
buffer = new SparseImpulseBuffer(4);
}
function test_length() {
Assert.equals(1, new SparseImpulseBuffer(1).length);
Assert.equals(2, new SparseImpulseBuffer(2).length);
Assert.equals(3, new SparseImpulseBuffer(3).length);
Assert.equals(1024, new SparseImpulseBuffer(1024).length);
}
function test_impulsePos_notOverwrittenByOtherImpulses() {
buffer.setImpulsePos(0, 3);
buffer.setImpulsePos(1, 9);
buffer.setImpulsePos(2, 17);
buffer.setImpulsePos(3, 42);
Assert.equals(3, buffer.getImpulsePos(0));
Assert.equals(9, buffer.getImpulsePos(1));
Assert.equals(17, buffer.getImpulsePos(2));
Assert.equals(42, buffer.getImpulsePos(3));
}
function test_impulseMagnitude_notOverwrittenByOtherImpulses() {
buffer.setImpulseMagnitude(0, 0.3);
buffer.setImpulseMagnitude(1, 0.9);
buffer.setImpulseMagnitude(2, 0.17);
buffer.setImpulseMagnitude(3, 0.42);
Assert.floatEquals(0.3, buffer.getImpulseMagnitude(0));
Assert.floatEquals(0.9, buffer.getImpulseMagnitude(1));
Assert.floatEquals(0.17, buffer.getImpulseMagnitude(2));
Assert.floatEquals(0.42, buffer.getImpulseMagnitude(3));
}
}

View File

@ -0,0 +1,283 @@
package auratests.dsp.panner;
import utest.Assert;
import aura.Aura;
import aura.Time;
import aura.dsp.panner.Panner;
import aura.math.Vec3;
import aura.types.AudioBuffer;
import Utils;
private class NonAbstractPanner extends Panner {
public function process(buffer: AudioBuffer) {}
}
@:access(aura.channels.BaseChannel)
@:access(aura.channels.BaseChannelHandle)
@:access(aura.dsp.panner.Panner)
class TestPanner extends utest.Test {
var handle: BaseChannelHandle;
var panner: Panner;
function setup() {
handle = Utils.createDummyHandle();
panner = new NonAbstractPanner(handle);
}
function teardown() {
Time.overrideTime = null;
}
function test_setLocation_multipleCallsOnFirstTimestep() {
Time.overrideTime = 0.0;
panner.setLocation(new Vec3(0.5, 0.6, 0.7));
Assert.floatEquals(0.5, panner.location.x);
Assert.floatEquals(0.6, panner.location.y);
Assert.floatEquals(0.7, panner.location.z);
Assert.floatEquals(0.0, panner.velocity.x);
Assert.floatEquals(0.0, panner.velocity.y);
Assert.floatEquals(0.0, panner.velocity.z);
Time.overrideTime = 0.0;
panner.setLocation(new Vec3(1.0, 2.0, 3.0));
Assert.floatEquals(1.0, panner.location.x);
Assert.floatEquals(2.0, panner.location.y);
Assert.floatEquals(3.0, panner.location.z);
Assert.floatEquals(0.0, panner.velocity.x);
Assert.floatEquals(0.0, panner.velocity.y);
Assert.floatEquals(0.0, panner.velocity.z);
}
function test_setLocation_firstCall_timeDeltaZero() {
Time.overrideTime = 0.0;
panner.setLocation(new Vec3(0.5, 0.6, 0.7));
Assert.floatEquals(0.5, panner.location.x);
Assert.floatEquals(0.6, panner.location.y);
Assert.floatEquals(0.7, panner.location.z);
Assert.floatEquals(0.0, panner.velocity.x);
Assert.floatEquals(0.0, panner.velocity.y);
Assert.floatEquals(0.0, panner.velocity.z);
}
function test_setLocation_firstCall_timeDeltaPositive() {
Time.overrideTime = 2.0;
panner.setLocation(new Vec3(0.5, 0.6, 0.7));
Assert.floatEquals(0.5, panner.location.x);
Assert.floatEquals(0.6, panner.location.y);
Assert.floatEquals(0.7, panner.location.z);
Assert.floatEquals(0.0, panner.velocity.x);
Assert.floatEquals(0.0, panner.velocity.y);
Assert.floatEquals(0.0, panner.velocity.z);
}
function test_setLocation_subsequentCalls_timeDeltaZero() {
// Regression test for https://github.com/MoritzBrueckner/aura/pull/8
Time.overrideTime = 1.0;
panner.setLocation(new Vec3(0.0, 0.0, 0.0));
Time.overrideTime = 3.0;
panner.setLocation(new Vec3(1.0, 2.0, 3.0));
Time.overrideTime = 3.0;
panner.setLocation(new Vec3(2.0, 4.0, 6.0));
Assert.floatEquals(2.0, panner.location.x);
Assert.floatEquals(4.0, panner.location.y);
Assert.floatEquals(6.0, panner.location.z);
// Compute velocity based on timestep 1.0
Assert.floatEquals(1.0, panner.velocity.x);
Assert.floatEquals(2.0, panner.velocity.y);
Assert.floatEquals(3.0, panner.velocity.z);
}
function test_update3D_noDopplerJumpIfLocationWasUninitialized() {
Time.overrideTime = 0.0;
panner.setLocation(new Vec3(20.0, 0.0, 0.0));
panner.update3D();
handle.channel.synchronize();
Assert.floatEquals(1.0, handle.channel.pDopplerRatio.targetValue);
}
function test_noDopplerEffect_ifNoMovement() {
Time.overrideTime = 0.0;
aura.Aura.listener.setLocation(new Vec3(0.0, 0.0, 0.0));
panner.setLocation(new Vec3(5.0, 4.0, 3.0));
panner.update3D();
Time.overrideTime = 0.5;
aura.Aura.listener.setLocation(new Vec3(0.0, 0.0, 0.0));
panner.setLocation(new Vec3(5.0, 4.0, 3.0));
panner.update3D();
handle.channel.synchronize();
Assert.floatEquals(1.0, handle.channel.pDopplerRatio.targetValue);
}
function test_calculateDoppler_physicallyCorrectValues_pannerMovesAway() {
Time.overrideTime = 0.0;
aura.Aura.listener.setLocation(new Vec3(0.0, 0.0, 0.0));
panner.setLocation(new Vec3(0.0, 0.0, 0.0));
panner.update3D();
Time.overrideTime = 0.5;
aura.Aura.listener.setLocation(new Vec3(0.0, 0.0, 0.0));
panner.setLocation(new Vec3(2.0, 0.0, 0.0));
panner.update3D();
Assert.floatEquals(4.0, @:privateAccess panner.velocity.length);
Assert.floatEquals(0.0, @:privateAccess aura.Aura.listener.velocity.length);
handle.channel.synchronize();
// Values calculated at
// https://www.omnicalculator.com/physics/doppler-effect?c=EUR&v=f0:5000!Hz,v:343.4!ms,vs:2!ms,vr:0!ms
// Assuming that their implementation is correct
Assert.floatEquals(4942.43 / 5000, handle.channel.pDopplerRatio.targetValue);
}
function test_calculateDoppler_physicallyCorrectValues_listenerMovesAway() {
Time.overrideTime = 0.0;
aura.Aura.listener.setLocation(new Vec3(0.0, 0.0, 0.0));
panner.setLocation(new Vec3(0.0, 0.0, 0.0));
panner.update3D();
Time.overrideTime = 0.5;
aura.Aura.listener.setLocation(new Vec3(2.0, 0.0, 0.0));
panner.setLocation(new Vec3(0.0, 0.0, 0.0));
panner.update3D();
Assert.floatEquals(4.0, @:privateAccess aura.Aura.listener.velocity.length);
Assert.floatEquals(0.0, @:privateAccess panner.velocity.length);
handle.channel.synchronize();
Assert.floatEquals(4941.76 / 5000, handle.channel.pDopplerRatio.targetValue);
}
function test_calculateDoppler_physicallyCorrectValues_pannerMovesCloser() {
Time.overrideTime = 0.0;
aura.Aura.listener.setLocation(new Vec3(0.0, 0.0, 0.0));
panner.setLocation(new Vec3(4.0, 0.0, 0.0));
panner.update3D();
Time.overrideTime = 0.5;
aura.Aura.listener.setLocation(new Vec3(0.0, 0.0, 0.0));
panner.setLocation(new Vec3(2.0, 0.0, 0.0));
panner.update3D();
Assert.floatEquals(4.0, @:privateAccess panner.velocity.length);
Assert.floatEquals(0.0, @:privateAccess aura.Aura.listener.velocity.length);
handle.channel.synchronize();
Assert.floatEquals(5058.93 / 5000, handle.channel.pDopplerRatio.targetValue);
}
function test_calculateDoppler_physicallyCorrectValues_listenerMovesCloser() {
Time.overrideTime = 0.0;
aura.Aura.listener.setLocation(new Vec3(4.0, 0.0, 0.0));
panner.setLocation(new Vec3(0.0, 0.0, 0.0));
panner.update3D();
Time.overrideTime = 0.5;
aura.Aura.listener.setLocation(new Vec3(2.0, 0.0, 0.0));
panner.setLocation(new Vec3(0.0, 0.0, 0.0));
panner.update3D();
Assert.floatEquals(4.0, @:privateAccess aura.Aura.listener.velocity.length);
Assert.floatEquals(0.0, @:privateAccess panner.velocity.length);
handle.channel.synchronize();
Assert.floatEquals(5058.24 / 5000, handle.channel.pDopplerRatio.targetValue);
}
function test_calculateDoppler_noDopplerEffectIfNoRadialVelocity() {
Time.overrideTime = 0.0;
aura.Aura.listener.setLocation(new Vec3(0.0, 0.0, 0.0));
panner.setLocation(new Vec3(2.0, 0.0, 0.0));
panner.update3D();
Time.overrideTime = 0.5;
aura.Aura.listener.setLocation(new Vec3(0.0, 0.0, 0.0));
panner.setLocation(new Vec3(0.0, 0.0, 0.0));
panner.update3D();
handle.channel.synchronize();
Assert.floatEquals(1, handle.channel.pDopplerRatio.targetValue);
}
function test_calculateDoppler_noDopplerEffectIfNoRadialVelocity2() {
Time.overrideTime = 0.0;
aura.Aura.listener.setLocation(new Vec3(5.0, 0.0, 0.0));
panner.setLocation(new Vec3(5.0, 0.0, 0.0));
panner.update3D();
Time.overrideTime = 0.5;
aura.Aura.listener.setLocation(new Vec3(5.0, 0.0, 0.0));
panner.setLocation(new Vec3(5.0, 0.0, 0.0));
panner.update3D();
handle.channel.synchronize();
Assert.floatEquals(1, handle.channel.pDopplerRatio.targetValue);
}
function test_calculateDoppler_noDopplerEffectIfNoRadialVelocity3() {
Time.overrideTime = 0.0;
aura.Aura.listener.setLocation(new Vec3(0.0, 0.0, 0.0));
panner.setLocation(new Vec3(2.0, 2.0, 0.0));
panner.update3D();
Time.overrideTime = 0.5;
aura.Aura.listener.setLocation(new Vec3(0.0, 0.0, 0.0));
panner.setLocation(new Vec3(0.0, 2.0, 0.0));
panner.update3D();
handle.channel.synchronize();
Assert.floatEquals(1, handle.channel.pDopplerRatio.targetValue);
}
function test_dopplerEffect_isZeroIfPannerMovesCloserAtSpeedOfSound() {
Time.overrideTime = 0.0;
panner.setLocation(new Vec3(Panner.SPEED_OF_SOUND + 1, 0.0, 0.0));
panner.update3D();
Time.overrideTime = 1.0;
panner.setLocation(new Vec3(1, 0.0, 0.0));
panner.update3D();
handle.channel.synchronize();
Assert.floatEquals(0, handle.channel.pDopplerRatio.targetValue);
}
function test_dopplerEffect_pannerMovesCloserAboveSpeedOfSound() {
Time.overrideTime = 0.0;
panner.setLocation(new Vec3(Panner.SPEED_OF_SOUND + 5, 0.0, 0.0));
panner.update3D();
Time.overrideTime = 1.0;
panner.setLocation(new Vec3(1, 0.0, 0.0));
panner.update3D();
handle.channel.synchronize();
Assert.floatEquals(-85.85, handle.channel.pDopplerRatio.targetValue);
}
}

View File

@ -0,0 +1,66 @@
package auratests.format;
import haxe.Int64;
import haxe.io.Bytes;
import haxe.io.BytesInput;
import utest.Assert;
using aura.format.InputExtension;
class TestInputExtension extends utest.Test {
var bytes: Bytes;
var inp: BytesInput;
// 10000000 01000000 00100000 00010000 - 00001000 00000100 00000010 00000001
var inputValue = Int64.make(
1 << 31 | 1 << 22 | 1 << 13 | 1 << 4,
1 << 27 | 1 << 18 | 1 << 9 | 1
);
// 00000001 00000010 00000100 00001000 - 00010000 00100000 01000000 10000000
var inputValueInverted = Int64.make(
1 << 24 | 1 << 17 | 1 << 10 | 1 << 3,
1 << 28 | 1 << 21 | 1 << 14 | 1 << 7
);
function setup() {
bytes = Bytes.alloc(8);
inp = new BytesInput(bytes);
}
function test_readInt64_littleEndian_correctRead() {
bytes.setInt64(0, inputValue); // setInt64 is little-endian
inp.bigEndian = false;
assertI64Equals(inputValue, inp.readInt64());
}
function test_readInt64_bigEndian_correctRead() {
bytes.setInt64(0, inputValue);
inp.bigEndian = true;
assertI64Equals(inputValueInverted, inp.readInt64());
}
function test_readUint32_isUnsigned() {
bytes.setInt32(0, 1 << 31);
inp.bigEndian = false;
assertI64Equals(Int64.make(0, -2147483648/* -2^31, sign bit doesn't mean anything in low part */) , inp.readUInt32());
}
function test_readUint32_littleEndian_correctRead() {
bytes.setInt32(0, inputValue.high);
inp.bigEndian = false;
assertI64Equals(Int64.make(0, inputValue.high), inp.readUInt32());
}
function test_readUint32_bigEndian_correctRead() {
bytes.setInt32(0, inputValue.high);
inp.bigEndian = true;
assertI64Equals(Int64.make(0, inputValueInverted.low), inp.readUInt32());
}
function assertI64Equals(want: Int64, have: Int64, ?pos: haxe.PosInfos) {
final errorMessage = 'Expected (high: ${want.high}, low: ${want.low}), have (high: ${have.high}, low: ${have.low}).';
Assert.isTrue(want.low == have.low && want.high == have.high, errorMessage, pos);
}
}

Some files were not shown because too many files have changed in this diff Show More