Update Files

This commit is contained in:
2025-01-22 16:18:30 +01:00
parent ed4603cf95
commit a36294b518
16718 changed files with 2960346 additions and 0 deletions

View File

@ -0,0 +1,160 @@
package kha.audio2.ogg.vorbis;
import haxe.io.BytesOutput;
import haxe.io.Output;
import haxe.io.StringInput;
import kha.audio2.ogg.tools.Mdct;
import kha.audio2.ogg.vorbis.data.Floor;
import kha.audio2.ogg.vorbis.data.Mapping;
import kha.audio2.ogg.vorbis.data.Mode;
import kha.audio2.ogg.vorbis.data.Header;
import kha.audio2.ogg.vorbis.VorbisDecodeState;
import haxe.ds.Vector;
import haxe.io.Bytes;
import haxe.io.BytesInput;
import haxe.io.Eof;
import haxe.io.Input;
import haxe.PosInfos;
#if sys
import sys.FileSystem;
import sys.io.File;
import sys.io.FileInput;
#end
/**
* public domain ogg reader.
* @author shohei909
*/
class Reader {
public var decoder(default, null):VorbisDecoder;
public var header(get, never):Header;
function get_header():Header {
return decoder.header;
}
public var totalSample(get, never):Int;
function get_totalSample():Int {
return decoder.totalSample;
}
public var totalMillisecond(get, never):Float;
function get_totalMillisecond():Float {
return sampleToMillisecond(decoder.totalSample);
}
public var currentSample(get, set):Int;
function get_currentSample():Int {
return decoder.currentSample;
}
function set_currentSample(value:Int):Int {
decoder.seek(seekFunc, inputLength, value);
return decoder.currentSample;
}
public var currentMillisecond(get, set):Float;
function get_currentMillisecond():Float
{
return sampleToMillisecond(currentSample);
}
function set_currentMillisecond(value:Float):Float {
currentSample = millisecondToSample(value);
return currentMillisecond;
}
public var loopStart:Null<Int>;
public var loopLength:Null<Int>;
var seekFunc:Int->Void;
var inputLength:Int;
function new (input:Input, seekFunc:Int->Void, inputLength:Int) {
this.seekFunc = seekFunc;
this.inputLength = inputLength;
decoder = VorbisDecoder.start(input);
decoder.setupSampleNumber(seekFunc, inputLength);
loopStart = header.comment.loopStart;
loopLength = header.comment.loopLength;
}
public static function openFromBytes(bytes:Bytes) {
var input = new BytesInput(bytes);
return new Reader(input, seekBytes.bind(input), bytes.length);
}
static function seekBytes(bytes:BytesInput, pos:Int) {
bytes.position = pos;
}
#if sys
public static function openFromFile(fileName:String):Reader {
var file = File.read(fileName, true);
var stat = FileSystem.stat(fileName);
return new Reader(file, file.seek.bind(_, SeekBegin), stat.size);
}
#end
public static function readAll(bytes:Bytes, output:Output, useFloat:Bool = false):Header {
var input = new BytesInput(bytes);
var decoder = VorbisDecoder.start(input);
decoder.setupSampleNumber(seekBytes.bind(input), bytes.length);
var header = decoder.header;
var count = 0;
var bufferSize = 4096;
var buffer = new kha.arrays.Float32Array(bufferSize * header.channel);
while (true) {
var n = decoder.read(buffer, bufferSize, header.channel, header.sampleRate, useFloat);
for (i in 0...n * header.channel) {
output.writeFloat(buffer[i]);
}
if (n == 0) { break; }
count += n;
}
return decoder.header;
}
public function read(output:kha.arrays.Float32Array, ?samples:Int, ?channels:Int, ?sampleRate:Int, useFloat:Bool = false) {
decoder.ensurePosition(seekFunc);
if (samples == null) {
samples = decoder.totalSample;
}
if (channels == null) {
channels = header.channel;
}
if (sampleRate == null) {
sampleRate = header.sampleRate;
}
return decoder.read(output, samples, channels, sampleRate, useFloat);
}
public function clone():Reader {
var reader = Type.createEmptyInstance(Reader);
reader.seekFunc = seekFunc;
reader.inputLength = inputLength;
reader.decoder = decoder.clone(seekFunc);
reader.loopStart = loopStart;
reader.loopLength = loopLength;
return reader;
}
public inline function sampleToMillisecond(samples:Int) {
return samples / header.sampleRate * 1000;
}
public inline function millisecondToSample(millseconds:Float) {
return Math.floor(millseconds / 1000 * header.sampleRate);
}
}
private typedef InitData = {
input:Input,
seekFunc:Int->Void,
inputLength:Int,
}

View File

@ -0,0 +1,857 @@
package kha.audio2.ogg.vorbis;
import haxe.ds.Vector;
import haxe.Int64;
import haxe.io.Bytes;
import haxe.io.Eof;
import haxe.io.Input;
import haxe.io.Output;
import kha.audio2.ogg.tools.Crc32;
import kha.audio2.ogg.tools.MathTools;
import kha.audio2.ogg.vorbis.data.Codebook;
import kha.audio2.ogg.vorbis.data.Floor.Floor1;
import kha.audio2.ogg.vorbis.data.Header;
import kha.audio2.ogg.vorbis.data.Mode;
import kha.audio2.ogg.vorbis.data.Page;
import kha.audio2.ogg.vorbis.data.ProbedPage;
import kha.audio2.ogg.vorbis.data.ReaderError;
import kha.audio2.ogg.vorbis.data.Page;
import kha.audio2.ogg.vorbis.data.Residue;
import kha.audio2.ogg.vorbis.data.Setting;
import kha.audio2.ogg.vorbis.VorbisDecoder.DecodeInitialResult;
/**
* ...
* @author shohei909
*/
class VorbisDecodeState
{
public static inline var INVALID_BITS = -1;
public var page(default, null):Page;
public var eof(default, null):Bool;
public var pFirst(default, null):ProbedPage;
public var pLast(default, null):ProbedPage;
public var validBits(default, null):Int = 0;
public var inputPosition(default, null):Int;
public var input(default, null):Input;
public var discardSamplesDeferred:Int;
public var segments(default, null):Vector<Int>;
public var bytesInSeg:Int = 0; // uint8
// decode buffer
public var channelBuffers:Vector<Vector<Float>>; //var *[STB_VORBIS_MAX_CHANNELS];
public var channelBufferStart:Int;
public var channelBufferEnd:Int;
public var currentSample(default, null):Int;
public var previousWindow:Vector<Vector<Float>>; //var *[STB_VORBIS_MAX_CHANNELS];
public var previousLength:Int;
public var finalY:Vector<Array<Int>>; // [STB_VORBIS_MAX_CHANNELS];
var firstDecode:Bool = false;
var nextSeg:Int = 0;
var acc:UInt;
var lastSeg:Bool; // flag that we're on the last decodeState
var lastSegWhich:Int; // what was the decodeState number of the l1ast seg?
var endSegWithKnownLoc:Int;
var knownLocForPacket:Int;
var error:ReaderError;
var currentLoc:Int; //uint32 sample location of next frame to decode
var currentLocValid:Int;
var firstAudioPageOffset:UInt;
public function new(input:Input)
{
this.input = input;
inputPosition = 0;
page = new Page();
Crc32.init();
}
public function setup(loc0:Int, loc1:Int) {
var segmentCount = readByte();
this.segments = read(segmentCount);
// assume we Don't_ know any the sample position of any segments
this.endSegWithKnownLoc = -2;
if (loc0 != 0xFFFFFFFF || loc1 != 0xFFFFFFFF) {
var i:Int = segmentCount - 1;
while (i >= 0) {
if (segments.get(i) < 255) {
break;
}
if (i >= 0) {
this.endSegWithKnownLoc = i;
this.knownLocForPacket = loc0;
}
i--;
}
}
if (firstDecode) {
var i:Int = 0;
var len:Int = 0;
var p = new ProbedPage();
for (i in 0...segmentCount) {
len += segments.get(i);
}
len += 27 + segmentCount;
p.pageStart = firstAudioPageOffset;
p.pageEnd = p.pageStart + len;
p.firstDecodedSample = 0;
p.lastDecodedSample = loc0;
pFirst = p;
}
nextSeg = 0;
}
public function clone(seekFunc:Int->Void)
{
var state = Type.createEmptyInstance(VorbisDecodeState);
seekFunc(inputPosition);
state.input = input;
// primitive
state.eof = eof;
state.validBits = validBits;
state.discardSamplesDeferred = discardSamplesDeferred;
state.firstDecode = firstDecode;
state.nextSeg = nextSeg;
state.bytesInSeg = bytesInSeg;
state.acc = state.acc;
state.lastSeg = lastSeg;
state.lastSegWhich = lastSegWhich;
state.currentLoc = currentLoc;
state.currentLocValid = currentLocValid;
state.inputPosition = inputPosition;
state.firstAudioPageOffset = firstAudioPageOffset;
// sharrow copy
state.error = error;
state.segments = segments;
state.pFirst = pFirst;
state.pLast = pLast;
// deep copy
state.page = page.clone();
return state;
}
// nextSegment
public function next():Int {
if (lastSeg) {
return 0;
}
if (nextSeg == -1) {
lastSegWhich = segments.length - 1; // in case startPage fails
try {
page.start(this);
} catch(e:ReaderError) {
lastSeg = true;
error = e;
return 0;
}
if ((page.flag & PageFlag.CONTINUED_PACKET) == 0) {
throw new ReaderError(ReaderErrorType.CONTINUED_PACKET_FLAG_INVALID);
}
}
var len = segments.get(nextSeg++);
if (len < 255) {
lastSeg = true;
lastSegWhich = nextSeg - 1;
}
if (nextSeg >= segments.length) {
nextSeg = -1;
}
VorbisTools.assert(bytesInSeg == 0);
bytesInSeg = len;
return len;
}
public function startPacket() {
while (nextSeg == -1) {
page.start(this);
if ((page.flag & PageFlag.CONTINUED_PACKET) != 0) {
throw new ReaderError(ReaderErrorType.MISSING_CAPTURE_PATTERN);
}
}
lastSeg = false;
validBits = 0;
bytesInSeg = 0;
}
public function maybeStartPacket():Bool
{
if (nextSeg == -1) {
var eof = false;
var x = try {
readByte();
} catch (e:Eof) {
eof = true;
0;
}
if (eof) {
return false; // EOF at page boundary is not an error!
}
if (x != 0x4f || readByte() != 0x67 || readByte() != 0x67 || readByte() != 0x53) {
throw new ReaderError(ReaderErrorType.MISSING_CAPTURE_PATTERN);
}
page.startWithoutCapturePattern(this);
}
startPacket();
return true;
}
// public inline function readBits(n:Int):Int
public function readBits(n:Int):Int // Kha: reduce output size
{
if (validBits < 0) {
return 0;
} else if (validBits < n) {
if (n > 24) {
// the accumulator technique below would not work correctly in this case
return readBits(24) + ((readBits(n - 24) << 24));
} else {
if (validBits == 0) {
acc = 0;
}
do {
if (bytesInSeg == 0 && (lastSeg || next() == 0)) {
validBits = INVALID_BITS;
break;
} else {
bytesInSeg--;
acc += (readByte() << validBits);
validBits += 8;
}
} while (validBits < n);
if (validBits < 0) {
return 0;
} else {
var z = acc & ((1 << n) - 1);
acc >>>= n;
validBits -= n;
return z;
}
}
} else {
var z = acc & ((1 << n) - 1);
acc >>>= n;
validBits -= n;
return z;
}
}
inline function readPacketRaw():Int {
return if (bytesInSeg == 0 && (lastSeg || next() == 0)) { // CLANG!
VorbisTools.EOP;
} else {
//VorbisTools.assert(bytesInSeg > 0);
bytesInSeg--;
readByte();
}
}
public inline function readPacket():Int
{
var x = readPacketRaw();
validBits = 0;
return x;
}
public inline function flushPacket():Void {
while (bytesInSeg != 0 || (!lastSeg && next() != 0)) {
bytesInSeg--;
readByte();
}
}
public inline function vorbisValidate() {
var header = Bytes.alloc(6);
for (i in 0...6) {
header.set(i, readPacket());
}
if (header.toString() != "vorbis") {
throw new ReaderError(ReaderErrorType.INVALID_SETUP, "vorbis header");
}
}
public function firstPageValidate()
{
if (segments.length != 1) {
throw new ReaderError(INVALID_FIRST_PAGE, "segmentCount");
}
if (segments.get(0) != 30) {
throw new ReaderError(INVALID_FIRST_PAGE, "decodeState head");
}
}
public function startFirstDecode()
{
firstAudioPageOffset = inputPosition;
firstDecode = true;
}
public inline function capturePattern()
{
if (readByte() != 0x4f || readByte() != 0x67 || readByte() != 0x67 || readByte() != 0x53) {
throw new ReaderError(ReaderErrorType.MISSING_CAPTURE_PATTERN);
}
}
inline function skip(len:Int)
{
read(len);
}
function prepHuffman()
{
if (validBits <= 24) {
if (validBits == 0) {
acc = 0;
}
do {
if (bytesInSeg == 0 && (lastSeg || next() == 0)) { // CLANG!
return;
} else {
bytesInSeg--;
acc += readByte() << validBits;
validBits += 8;
}
} while (validBits <= 24);
}
}
public inline function decode(c:Codebook):Int {
var val = decodeRaw(c);
if (c.sparse) {
val = c.sortedValues[val];
}
return val;
}
public inline function decodeRaw(c:Codebook)
{
if (validBits < Setting.FAST_HUFFMAN_LENGTH){
prepHuffman();
}
// fast huffman table lookup
var i = c.fastHuffman[acc & Setting.FAST_HUFFMAN_TABLE_MASK];
return if (i >= 0) {
var l = c.codewordLengths[i];
acc >>>= l;
validBits -= l;
if (validBits < 0) {
validBits = 0;
-1;
} else {
i;
}
} else {
decodeScalarRaw(c);
}
}
public inline function isLastByte()
{
return bytesInSeg == 0 && lastSeg;
}
public function finishDecodePacket(previousLength:Int, n:Int, r:DecodeInitialResult)
{
var left = r.left.start;
var currentLocValid = false;
var n2 = n >> 1;
if (firstDecode) {
// assume we start so first non-discarded sample is sample 0
// this isn't to spec, but spec would require us to read ahead
// and decode the size of all current frames--could be done,
// but presumably it's not a commonly used feature
currentLoc = -n2; // start of first frame is positioned for discard
// we might have to discard samples "from" the next frame too,
// if we're lapping a large block then a small at the start?
discardSamplesDeferred = n - r.right.end;
currentLocValid = true;
firstDecode = false;
} else if (discardSamplesDeferred != 0) {
r.left.start += discardSamplesDeferred;
left = r.left.start;
discardSamplesDeferred = 0;
} else if (previousLength == 0 && currentLocValid) {
// we're recovering from a seek... that means we're going to discard
// the samples from this packet even though we know our position from
// the last page header, so we need to update the position based on
// the discarded samples here
// but wait, the code below is going to add this in itself even
// on a discard, so we don't need to do it here...
}
// check if we have ogg information about the sample # for this packet
if (lastSegWhich == endSegWithKnownLoc) {
// if we have a valid current loc, and this is final:
if (currentLocValid && (page.flag & PageFlag.LAST_PAGE) != 0) {
var currentEnd = knownLocForPacket - (n - r.right.end);
// then let's infer the size of the (probably) short final frame
if (currentEnd < currentLoc + r.right.end) {
var len = if (currentEnd < currentLoc) {
// negative truncation, that's impossible!
0;
} else {
currentEnd - currentLoc;
}
len += r.left.start;
currentLoc += len;
return {
len : len,
left : left,
right : r.right.start,
}
}
}
// otherwise, just set our sample loc
// guess that the ogg granule pos refers to the Middle_ of the
// last frame?
// set currentLoc to the position of leftStart
currentLoc = knownLocForPacket - (n2-r.left.start);
currentLocValid = true;
}
if (currentLocValid) {
currentLoc += (r.right.start - r.left.start);
}
// if (alloc.allocBuffer)
//assert(alloc.allocBufferLengthInBytes == tempOffset);
return {
len : r.right.end,
left : left,
right : r.right.start,
}
}
public inline function readInt32():Int
{
inputPosition += 4;
return input.readInt32();
}
public inline function readByte():Int
{
inputPosition += 1;
return input.readByte();
}
public inline function read(n:Int):Vector<Int> {
inputPosition += n;
var vec = new Vector(n);
for (i in 0...n) {
vec[i] = input.readByte();
}
return vec;
}
public inline function readBytes(n:Int):Bytes {
inputPosition += n;
return input.read(n);
}
public inline function readString(n:Int):String
{
inputPosition += n;
return input.readString(n);
}
public function getSampleNumber(seekFunc:Int->Void, inputLength:UInt):Int {
// first, store the current decode position so we can restore it
var restoreOffset = inputPosition;
// now we want to seek back 64K from the end (the last page must
// be at most a little less than 64K, but let's allow a little slop)
var previousSafe = if (inputLength >= 65536 && inputLength - 65536 >= firstAudioPageOffset) {
inputLength - 65536;
} else {
firstAudioPageOffset;
}
setInputOffset(seekFunc, previousSafe);
// previousSafe is now our candidate 'earliest known place that seeking
// to will lead to the final page'
var end = 0;
var last = false;
switch (findPage(seekFunc, inputLength)) {
case Found(e, l):
end = e;
last = l;
case NotFound:
throw new ReaderError(ReaderErrorType.CANT_FIND_LAST_PAGE);
}
// check if there are more pages
var lastPageLoc = inputPosition;
// stop when the lastPage flag is set, not when we reach eof;
// this allows us to stop short of a 'fileSection' end without
// explicitly checking the length of the section
while (!last) {
setInputOffset(seekFunc, end);
switch (findPage(seekFunc, inputLength)) {
case Found(e, l):
end = e;
last = l;
case NotFound:
// the last page we found didn't have the 'last page' flag
// set. whoops!
break;
}
previousSafe = lastPageLoc + 1;
lastPageLoc = inputPosition;
}
setInputOffset(seekFunc, lastPageLoc);
// parse the header
var vorbisHeader = read(6);
// extract the absolute granule position
var lo = readInt32();
var hi = readInt32();
if (lo == 0xffffffff && hi == 0xffffffff || hi > 0) {
throw new ReaderError(ReaderErrorType.CANT_FIND_LAST_PAGE);
}
pLast = new ProbedPage();
pLast.pageStart = lastPageLoc;
pLast.pageEnd = end;
pLast.lastDecodedSample = lo;
pLast.firstDecodedSample = null;
pLast.afterPreviousPageStart = previousSafe;
setInputOffset(seekFunc, restoreOffset);
return lo;
}
public inline function forcePageResync()
{
nextSeg = -1;
}
public inline function setInputOffset(seekFunc:Int->Void, n:Int)
{
seekFunc(inputPosition = n);
}
public function findPage(seekFunc:Int->Void, inputLength:Int):FindPageResult {
try {
while (true) {
var n = readByte();
if (n == 0x4f) { // page header
var retryLoc = inputPosition;
// check if we're off the end of a fileSection stream
if (retryLoc - 25 > inputLength) {
return FindPageResult.NotFound;
}
if (readByte() != 0x67 || readByte() != 0x67 || readByte() != 0x53) {
continue;
}
var header = new Vector<UInt>(27);
header[0] = 0x4f;
header[1] = 0x67;
header[2] = 0x67;
header[3] = 0x53;
for (i in 4...27) {
header[i] = readByte();
}
if (header[4] != 0) {
setInputOffset(seekFunc, retryLoc);
continue;
}
var goal:UInt = header[22] + (header[23] << 8) + (header[24]<<16) + (header[25]<<24);
for (i in 22...26) {
header[i] = 0;
}
var crc:UInt = 0;
for (i in 0...27){
crc = Crc32.update(crc, header[i]);
}
var len = 0;
try {
for (i in 0...header[26]) {
var s = readByte();
crc = Crc32.update(crc, s);
len += s;
}
for (i in 0...len) {
crc = Crc32.update(crc, readByte());
}
} catch (e:Eof) {
return FindPageResult.NotFound;
}
// finished parsing probable page
if (crc == goal) {
// we could now check that it's either got the last
// page flag set, OR it's followed by the capture
// pattern, but I guess TECHNICALLY you could have
// a file with garbage between each ogg page and recover
// from it automatically? So even though that paranoia
// might decrease the chance of an invalid decode by
// another 2^32, not worth it since it would hose those
// invalid-but-useful files?
var end = inputPosition;
setInputOffset(seekFunc, retryLoc - 1);
return FindPageResult.Found(end, (header[5] & 0x04 != 0));
}
}
}
} catch (e:Eof) {
return FindPageResult.NotFound;
}
}
public function analyzePage(seekFunc:Int->Void, h:Header)
{
var z:ProbedPage = new ProbedPage();
var packetType = new Vector<Bool>(255);
// record where the page starts
z.pageStart = inputPosition;
// parse the header
var pageHeader = read(27);
VorbisTools.assert(pageHeader.get(0) == 0x4f && pageHeader.get(1) == 0x67 && pageHeader.get(2) == 0x67 && pageHeader.get(3) == 0x53);
var lacing = read(pageHeader.get(26));
// determine the length of the payload
var len = 0;
for (i in 0...pageHeader.get(26)){
len += lacing.get(i);
}
// this implies where the page ends
z.pageEnd = z.pageStart + 27 + pageHeader.get(26) + len;
// read the last-decoded sample out of the data
z.lastDecodedSample = pageHeader.get(6) + (pageHeader.get(7) << 8) + (pageHeader.get(8) << 16) + (pageHeader.get(9) << 16);
if ((pageHeader.get(5) & 4) != 0) {
// if this is the last page, it's not possible to work
// backwards to figure out the first sample! whoops! fuck.
z.firstDecodedSample = null;
setInputOffset(seekFunc, z.pageStart);
return z;
}
// scan through the frames to determine the sample-count of each one...
// our goal is the sample # of the first fully-decoded sample on the
// page, which is the first decoded sample of the 2nd packet
var numPacket = 0;
var packetStart = ((pageHeader.get(5) & 1) == 0);
var modeCount = h.modes.length;
for (i in 0...pageHeader.get(26)) {
if (packetStart) {
if (lacing.get(i) == 0) {
setInputOffset(seekFunc, z.pageStart);
return null; // trying to read from zero-length packet
}
var n = readByte();
// if bottom bit is non-zero, we've got corruption
if (n & 1 != 0) {
setInputOffset(seekFunc, z.pageStart);
return null;
}
n >>= 1;
var b = MathTools.ilog(modeCount - 1);
n &= (1 << b) - 1;
if (n >= modeCount) {
setInputOffset(seekFunc, z.pageStart);
return null;
}
packetType[numPacket++] = h.modes[n].blockflag;
skip(lacing.get(i)-1);
} else {
skip(lacing.get(i));
}
packetStart = (lacing.get(i) < 255);
}
// now that we know the sizes of all the pages, we can start determining
// how much sample data there is.
var samples = 0;
// for the last packet, we step by its whole length, because the definition
// is that we encoded the end sample loc of the 'last packet completed',
// where 'completed' refers to packets being split, and we are left to guess
// what 'end sample loc' means. we assume it means ignoring the fact that
// the last half of the data is useless without windowing against the next
// packet... (so it's not REALLY complete in that sense)
if (numPacket > 1) {
samples += packetType[numPacket-1] ? h.blocksize1 : h.blocksize0;
}
var i = numPacket - 2;
while (i >= 1) {
i--;
// now, for this packet, how many samples do we have that
// do not overlap the following packet?
if (packetType[i]) {
if (packetType[i + 1]) {
samples += h.blocksize1 >> 1;
} else {
samples += ((h.blocksize1 - h.blocksize0) >> 2) + (h.blocksize0 >> 1);
}
} else {
samples += h.blocksize0 >> 1;
}
i--;
}
// now, at this point, we've rewound to the very beginning of the
// Second_ packet. if we entirely discard the first packet after
// a seek, this will be exactly the right sample number. HOWEVER!
// we can't as easily compute this number for the LAST page. The
// only way to get the sample offset of the LAST page is to use
// the end loc from the previous page. But what that returns us
// is _exactly_ the place where we get our first non-overlapped
// sample. (I think. Stupid spec for being ambiguous.) So for
// consistency it's better to do that here, too. However, that
// will then require us to NOT discard all of the first frame we
// decode, in some cases, which means an even weirder frame size
// and extra code. what a fucking pain.
// we're going to discard the first packet if we
// start the seek here, so we don't care about it. (we could actually
// do better; if the first packet is long, and the previous packet
// is short, there's actually data in the first half of the first
// packet that doesn't need discarding... but not worth paying the
// effort of tracking that of that here and in the seeking logic)
// except crap, if we infer it from the Previous_ packet's end
// location, we DO need to use that definition... and we HAVE to
// infer the start loc of the LAST packet from the previous packet's
// end location. fuck you, ogg vorbis.
z.firstDecodedSample = z.lastDecodedSample - samples;
// restore file state to where we were
setInputOffset(seekFunc, z.pageStart);
return z;
}
function decodeScalarRaw(c:Codebook):Int
{
prepHuffman();
VorbisTools.assert(c.sortedCodewords != null || c.codewords != null);
// cases to use binary search: sortedCodewords && !codewords
var codewordLengths = c.codewordLengths;
var codewords = c.codewords;
var sortedCodewords = c.sortedCodewords;
if (c.entries > 8 ? (sortedCodewords != null) : codewords != null) {
// binary search
var code = VorbisTools.bitReverse(acc);
var x = 0;
var n = c.sortedEntries;
while (n > 1) {
// invariant: sc[x] <= code < sc[x+n]
var m = x + (n >> 1);
if (sortedCodewords[m] <= code) {
x = m;
n -= (n>>1);
} else {
n >>= 1;
}
}
// x is now the sorted index
if (!c.sparse) {
x = c.sortedValues[x];
}
// x is now sorted index if sparse, or symbol otherwise
var len = codewordLengths[x];
if (validBits >= len) {
acc >>>= len;
validBits -= len;
return x;
}
validBits = 0;
return -1;
}
// if small, linear search
VorbisTools.assert(!c.sparse);
for (i in 0...c.entries) {
var cl = codewordLengths[i];
if (cl == Codebook.NO_CODE) {
continue;
}
if (codewords[i] == (acc & ((1 << cl)-1))) {
if (validBits >= cl) {
acc >>>= cl;
validBits -= cl;
return i;
}
validBits = 0;
return -1;
}
}
error = new ReaderError(INVALID_STREAM);
validBits = 0;
return -1;
}
}
private enum FindPageResult {
Found(end:Int, last:Bool);
NotFound;
}

View File

@ -0,0 +1,784 @@
package kha.audio2.ogg.vorbis;
import haxe.ds.Vector;
import haxe.io.Bytes;
import haxe.io.BytesOutput;
import haxe.io.Input;
import haxe.io.Output;
import kha.audio2.ogg.tools.MathTools;
import kha.audio2.ogg.tools.Mdct;
import kha.audio2.ogg.vorbis.data.Codebook;
import kha.audio2.ogg.vorbis.data.Floor.Floor1;
import kha.audio2.ogg.vorbis.data.Header;
import kha.audio2.ogg.vorbis.data.Mode;
import kha.audio2.ogg.vorbis.data.ProbedPage;
import kha.audio2.ogg.vorbis.data.ReaderError;
import kha.audio2.ogg.vorbis.VorbisDecodeState;
/**
* ...
* @author shohei909
*/
class VorbisDecoder
{
var previousWindow:Vector<Vector<Float>>; //var *[STB_VORBIS_MAX_CHANNELS];
var previousLength:Int;
var finalY:Vector<Array<Int>>; // [STB_VORBIS_MAX_CHANNELS];
// twiddle factors
var a:Vector<Vector<Float>>; // var * [2]
var b:Vector<Vector<Float>>; // var * [2]
var c:Vector<Vector<Float>>; // var * [2]
var window:Vector<Vector<Float>>; //var * [2];
var bitReverseData:Vector<Vector<Int>>; //uint16 * [2]
// decode buffer
var channelBuffers:Vector<Vector<Float>>; //var *[STB_VORBIS_MAX_CHANNELS];
var channelBufferStart:Int;
var channelBufferEnd:Int;
public var header(default, null):Header;
public var currentSample(default, null):Int;
public var totalSample(default, null):Null<Int>;
var decodeState:VorbisDecodeState;
function new(header:Header, decodeState:VorbisDecodeState) {
this.header = header;
this.decodeState = decodeState;
totalSample = null;
currentSample = 0;
//Channel
previousLength = 0;
channelBuffers = new Vector(header.channel);
previousWindow = new Vector(header.channel);
finalY = new Vector(header.channel);
for (i in 0...header.channel) {
channelBuffers[i] = VorbisTools.emptyFloatVector(header.blocksize1);
previousWindow[i] = VorbisTools.emptyFloatVector(Std.int(header.blocksize1 / 2));
finalY[i] = new Array();
}
a = new Vector(2);
b = new Vector(2);
c = new Vector(2);
window = new Vector(2);
bitReverseData = new Vector(2);
initBlocksize(0, header.blocksize0);
initBlocksize(1, header.blocksize1);
}
public static function start(input:Input) {
var decodeState = new VorbisDecodeState(input);
var header = Header.read(decodeState);
var decoder = new VorbisDecoder(header, decodeState);
decodeState.startFirstDecode();
decoder.pumpFirstFrame();
return decoder;
}
public function read(output:kha.arrays.Float32Array, samples:Int, channels:Int, sampleRate:Int, useFloat:Bool) {
if (sampleRate % header.sampleRate != 0) {
throw 'Unsupported sampleRate : can\'t convert ${header.sampleRate} to $sampleRate';
}
if (channels % header.channel != 0) {
throw 'Unsupported channels : can\'t convert ${header.channel} to $channels';
}
var sampleRepeat = Std.int(sampleRate / header.sampleRate);
var channelRepeat = Std.int(channels / header.channel);
var n = 0;
var len = Math.floor(samples / sampleRepeat);
if (totalSample != null && len > totalSample - currentSample) {
len = totalSample - currentSample;
}
var index = 0;
while (n < len) {
var k = channelBufferEnd - channelBufferStart;
if (k >= len - n) k = len - n;
for (j in channelBufferStart...(channelBufferStart + k)) {
for (sr in 0...sampleRepeat) {
for (i in 0...header.channel) {
for (cr in 0...channelRepeat) {
var value = channelBuffers[i][j];
if (value > 1) {
value = 1;
} else if (value < -1) {
value = -1;
}
if (useFloat) {
//output.writeFloat(value);
output[index] = value;
++index;
} else {
//output.writeInt16(Math.floor(value * 0x7FFF));
}
}
}
}
}
n += k;
channelBufferStart += k;
if (n == len || getFrameFloat() == 0) {
break;
}
}
for (j in n...len) {
for (sr in 0...sampleRepeat) {
for (i in 0...header.channel) {
for (cr in 0...channelRepeat) {
if (useFloat) {
//output.writeFloat(0);
output[index] = 0;
++index;
} else {
//output.writeInt16(0);
}
}
}
}
}
currentSample += len;
return len * sampleRepeat;
}
public function skipSamples(len:Int) {
var n = 0;
if (totalSample != null && len > totalSample - currentSample) {
len = totalSample - currentSample;
}
while (n < len) {
var k = channelBufferEnd - channelBufferStart;
if (k >= len - n) k = len - n;
n += k;
channelBufferStart += k;
if (n == len || getFrameFloat() == 0) {
break;
}
}
currentSample += len;
return len;
}
public function setupSampleNumber(seekFunc:Int->Void, inputLength:Int) {
if (totalSample == null) {
totalSample = decodeState.getSampleNumber(seekFunc, inputLength);
}
}
public function seek(seekFunc:Int->Void, inputLength:UInt, sampleNumber:Int) {
if (currentSample == sampleNumber) {
return;
}
// do we know the location of the last page?
if (totalSample == null) {
setupSampleNumber(seekFunc, inputLength);
if (totalSample == 0) {
throw new ReaderError(ReaderErrorType.CANT_FIND_LAST_PAGE);
}
}
if (sampleNumber < 0) {
sampleNumber = 0;
}
var p0 = decodeState.pFirst;
var p1 = decodeState.pLast;
if (sampleNumber >= p1.lastDecodedSample) {
sampleNumber = p1.lastDecodedSample - 1;
}
if (sampleNumber < p0.lastDecodedSample) {
seekFrameFromPage(seekFunc, p0.pageStart, 0, sampleNumber);
} else {
var attempts = 0;
while (p0.pageEnd < p1.pageStart) {
// copy these into local variables so we can tweak them
// if any are unknown
var startOffset:UInt = p0.pageEnd;
var endOffset:UInt = p1.afterPreviousPageStart; // an address known to seek to page p1
var startSample = p0.lastDecodedSample;
var endSample = p1.lastDecodedSample;
// currently there is no such tweaking logic needed/possible?
if (startSample == null || endSample == null) {
throw new ReaderError(SEEK_FAILED);
}
// now we want to lerp between these for the target samples...
// step 1: we need to bias towards the page start...
if (startOffset + 4000 < endOffset) {
endOffset -= 4000;
}
// now compute an interpolated search loc
var probe:UInt = startOffset + Math.floor((endOffset - startOffset) / (endSample - startSample) * (sampleNumber - startSample));
// next we need to bias towards binary search...
// code is a little wonky to allow for full 32-bit unsigned values
if (attempts >= 4) {
var probe2:UInt = startOffset + ((endOffset - startOffset) >> 1);
probe = if (attempts >= 8) {
probe2;
} else if (probe < probe2) {
probe + ((probe2 - probe) >>> 1);
} else {
probe2 + ((probe - probe2) >>> 1);
}
}
++attempts;
decodeState.setInputOffset(seekFunc, probe);
switch (decodeState.findPage(seekFunc, inputLength)) {
case NotFound:
throw new ReaderError(SEEK_FAILED);
case Found(_):
}
var q:ProbedPage = decodeState.analyzePage(seekFunc, header);
if (q == null) {
throw new ReaderError(SEEK_FAILED);
}
q.afterPreviousPageStart = probe;
// it's possible we've just found the last page again
if (q.pageStart == p1.pageStart) {
p1 = q;
continue;
}
if (sampleNumber < q.lastDecodedSample) {
p1 = q;
} else {
p0 = q;
}
}
if (p0.lastDecodedSample <= sampleNumber && sampleNumber < p1.lastDecodedSample) {
seekFrameFromPage(seekFunc, p1.pageStart, p0.lastDecodedSample, sampleNumber);
} else {
throw new ReaderError(SEEK_FAILED);
}
}
}
public function seekFrameFromPage(seekFunc:Int->Void, pageStart:Int, firstSample:Int, targetSample:Int) {
var frame = 0;
var frameStart:Int = firstSample;
// firstSample is the sample # of the first sample that doesn't
// overlap the previous page... note that this requires us to
// Partially_ discard the first packet! bleh.
decodeState.setInputOffset(seekFunc, pageStart);
decodeState.forcePageResync();
// frame start is where the previous packet's last decoded sample
// was, which corresponds to leftEnd... EXCEPT if the previous
// packet was long and this packet is short? Probably a bug here.
// now, we can start decoding frames... we'll only FAKE decode them,
// until we find the frame that contains our sample; then we'll rewind,
// and try again
var leftEnd = 0;
var leftStart = 0;
var prevState = null;
var lastState = null;
while (true) {
prevState = lastState;
lastState = decodeState.clone(seekFunc);
var initialResult = decodeInitial();
if (initialResult == null) {
lastState = prevState;
break;
}
leftStart = initialResult.left.start;
leftEnd = initialResult.left.end;
var start = if (frame == 0) {
leftEnd;
} else{
leftStart;
}
// the window starts at leftStart; the last valid sample we generate
// before the next frame's window start is rightStart-1
if (targetSample < frameStart + initialResult.right.start - start) {
break;
}
decodeState.flushPacket();
frameStart += initialResult.right.start - start;
++frame;
}
decodeState = lastState;
seekFunc(decodeState.inputPosition);
previousLength = 0;
pumpFirstFrame();
currentSample = frameStart;
skipSamples(targetSample - frameStart);
}
public function clone(seekFunc:Int->Void) {
var decoder = Type.createEmptyInstance(VorbisDecoder);
decoder.currentSample = currentSample;
decoder.totalSample = totalSample;
decoder.previousLength = previousLength;
decoder.channelBufferStart = channelBufferStart;
decoder.channelBufferEnd = channelBufferEnd;
// sharrow copy
decoder.a = a;
decoder.b = b;
decoder.c = c;
decoder.window = window;
decoder.bitReverseData = bitReverseData;
decoder.header = header;
// deep copy
decoder.decodeState = decodeState.clone(seekFunc);
decoder.channelBuffers = new Vector(header.channel);
decoder.previousWindow = new Vector(header.channel);
decoder.finalY = new Vector(header.channel);
for (i in 0...header.channel) {
decoder.channelBuffers[i] = VorbisTools.copyVector(channelBuffers[i]);
decoder.previousWindow[i] = VorbisTools.copyVector(previousWindow[i]);
decoder.finalY[i] = Lambda.array(finalY[i]);
}
return decoder;
}
public function ensurePosition(seekFunc:Int->Void) {
seekFunc(decodeState.inputPosition);
}
function getFrameFloat() {
var result = decodePacket();
if (result == null) {
channelBufferStart = channelBufferEnd = 0;
return 0;
}
var len = finishFrame(result);
channelBufferStart = result.left;
channelBufferEnd = result.left + len;
return len;
}
function pumpFirstFrame() {
finishFrame(decodePacket());
}
function finishFrame(r:DecodePacketResult):Int {
var len = r.len;
var right = r.right;
var left = r.left;
// we use right&left (the start of the right- and left-window sin()-regions)
// to determine how much to return, rather than inferring from the rules
// (same result, clearer code); 'left' indicates where our sin() window
// starts, therefore where the previous window's right edge starts, and
// therefore where to start mixing from the previous buffer. 'right'
// indicates where our sin() ending-window starts, therefore that's where
// we start saving, and where our returned-data ends.
// mixin from previous window
if (previousLength != 0) {
var n = previousLength;
var w = getWindow(n);
for (i in 0...header.channel) {
var cb = channelBuffers[i];
var pw = previousWindow[i];
for (j in 0...n) {
cb[left+j] = cb[left+j] * w[j] + pw[j] * w[n-1-j];
}
}
}
var prev = previousLength;
// last half of this data becomes previous window
previousLength = len - right;
// @OPTIMIZE: could avoid this copy by double-buffering the
// output (flipping previousWindow with channelBuffers), but
// then previousWindow would have to be 2x as large, and
// channelBuffers couldn't be temp mem (although they're NOT
// currently temp mem, they could be (unless we want to level
// performance by spreading out the computation))
for (i in 0...header.channel) {
var pw = previousWindow[i];
var cb = channelBuffers[i];
for (j in 0...(len - right)) {
pw[j] = cb[right + j];
}
}
if (prev == 0) {
// there was no previous packet, so this data isn't valid...
// this isn't entirely true, only the would-have-overlapped data
// isn't valid, but this seems to be what the spec requires
return 0;
}
// truncate a short frame
if (len < right) {
right = len;
}
return right - left;
}
function getWindow(len:Int)
{
len <<= 1;
return if (len == header.blocksize0) {
window[0];
} else if (len == header.blocksize1) {
window[1];
} else {
VorbisTools.assert(false);
null;
}
}
function initBlocksize(bs:Int, n:Int)
{
var n2 = n >> 1, n4 = n >> 2, n8 = n >> 3;
a[bs] = new Vector(n2);
b[bs] = new Vector(n2);
c[bs] = new Vector(n4);
window[bs] = new Vector(n2);
bitReverseData[bs] = new Vector(n8);
VorbisTools.computeTwiddleFactors(n, a[bs], b[bs], c[bs]);
VorbisTools.computeWindow(n, window[bs]);
VorbisTools.computeBitReverse(n, bitReverseData[bs]);
}
function inverseMdct(buffer:Vector<Float>, n:Int, blocktype:Bool) {
var bt = blocktype ? 1 : 0;
Mdct.inverseTransform(buffer, n, a[bt], b[bt], c[bt], bitReverseData[bt]);
}
function decodePacket():DecodePacketResult
{
var result = decodeInitial();
if (result == null) {
return null;
}
var rest = decodePacketRest(result);
return rest;
}
function decodeInitial():DecodeInitialResult
{
channelBufferStart = channelBufferEnd = 0;
do {
if (!decodeState.maybeStartPacket()) {
return null;
}
// check packet type
if (decodeState.readBits(1) != 0) {
while (VorbisTools.EOP != decodeState.readPacket()) {};
continue;
}
break;
} while (true);
var i = decodeState.readBits(MathTools.ilog(header.modes.length - 1));
if (i == VorbisTools.EOP || i >= header.modes.length) {
throw new ReaderError(ReaderErrorType.SEEK_FAILED);
}
var m = header.modes[i];
var n, prev, next;
if (m.blockflag) {
n = header.blocksize1;
prev = decodeState.readBits(1);
next = decodeState.readBits(1);
} else {
prev = next = 0;
n = header.blocksize0;
}
// WINDOWING
var windowCenter = n >> 1;
return {
mode : i,
left : if (m.blockflag && prev == 0) {
start : (n - header.blocksize0) >> 2,
end : (n + header.blocksize0) >> 2,
} else {
start : 0,
end : windowCenter,
},
right : if (m.blockflag && next == 0) {
start : (n * 3 - header.blocksize0) >> 2,
end : (n * 3 + header.blocksize0) >> 2,
} else {
start : windowCenter,
end : n,
},
}
}
function decodePacketRest(r:DecodeInitialResult):DecodePacketResult
{
var len = 0;
var m = header.modes[r.mode];
var zeroChannel = new Vector<Bool>(256);
var reallyZeroChannel = new Vector<Bool>(256);
// WINDOWING
var n = m.blockflag ? header.blocksize1 : header.blocksize0;
var map = header.mapping[m.mapping];
// FLOORS
var n2 = n >> 1;
VorbisTools.stbProf(1);
var rangeList = [256, 128, 86, 64];
var codebooks = header.codebooks;
for (i in 0...header.channel) {
var s = map.chan[i].mux;
zeroChannel[i] = false;
var floor = header.floorConfig[map.submapFloor[s]];
if (floor.type == 0) {
throw new ReaderError(INVALID_STREAM);
} else {
var g:Floor1 = floor.floor1;
if (decodeState.readBits(1) != 0) {
var fy = new Array<Int>();
var step2Flag = new Vector<Bool>(256);
var range = rangeList[g.floor1Multiplier-1];
var offset = 2;
fy = finalY[i];
fy[0] = decodeState.readBits(MathTools.ilog(range)-1);
fy[1] = decodeState.readBits(MathTools.ilog(range)-1);
for (j in 0...g.partitions) {
var pclass = g.partitionClassList[j];
var cdim = g.classDimensions[pclass];
var cbits = g.classSubclasses[pclass];
var csub = (1 << cbits) - 1;
var cval = 0;
if (cbits != 0) {
var c = codebooks[g.classMasterbooks[pclass]];
cval = decodeState.decode(c);
}
var books = g.subclassBooks[pclass];
for (k in 0...cdim) {
var book = books[cval & csub];
cval >>= cbits;
fy[offset++] = if (book >= 0) {
decodeState.decode(codebooks[book]);
} else {
0;
}
}
}
if (decodeState.validBits == VorbisDecodeState.INVALID_BITS) {
zeroChannel[i] = true;
continue;
}
step2Flag[0] = step2Flag[1] = true;
var naighbors = g.neighbors;
var xlist = g.xlist;
for (j in 2...g.values) {
var low = naighbors[j][0];
var high = naighbors[j][1];
var lowroom = VorbisTools.predictPoint(xlist[j], xlist[low], xlist[high], fy[low], fy[high]);
var val = fy[j];
var highroom = range - lowroom;
var room = if (highroom < lowroom){
highroom * 2;
}else{
lowroom * 2;
}
if (val != 0) {
step2Flag[low] = step2Flag[high] = true;
step2Flag[j] = true;
if (val >= room){
if (highroom > lowroom){
fy[j] = val - lowroom + lowroom;
}else{
fy[j] = lowroom - val + highroom - 1;
}
} else {
if (val & 1 != 0){
fy[j] = lowroom - ((val+1)>>1);
} else{
fy[j] = lowroom + (val>>1);
}
}
} else {
step2Flag[j] = false;
fy[j] = lowroom;
}
}
// defer final floor computation until _after_ residue
for (j in 0...g.values) {
if (!step2Flag[j]){
fy[j] = -1;
}
}
} else {
zeroChannel[i] = true;
}
// So we just defer everything else to later
// at this point we've decoded the floor into buffer
}
}
VorbisTools.stbProf(0);
// at this point we've decoded all floors
//if (alloc.allocBuffer) {
// assert(alloc.allocBufferLengthInBytes == tempOffset);
//}
// re-enable coupled channels if necessary
for (i in 0...header.channel) {
reallyZeroChannel[i] = zeroChannel[i];
}
for (i in 0...map.couplingSteps) {
if (!zeroChannel[map.chan[i].magnitude] || !zeroChannel[map.chan[i].angle]) {
zeroChannel[map.chan[i].magnitude] = zeroChannel[map.chan[i].angle] = false;
}
}
// RESIDUE DECODE
for (i in 0...map.submaps) {
var residueBuffers = new Vector<Vector<Float>>(header.channel);
var doNotDecode = new Vector<Bool>(256);
var ch = 0;
for (j in 0...header.channel) {
if (map.chan[j].mux == i) {
if (zeroChannel[j]) {
doNotDecode[ch] = true;
residueBuffers[ch] = null;
} else {
doNotDecode[ch] = false;
residueBuffers[ch] = channelBuffers[j];
}
++ch;
}
}
var r = map.submapResidue[i];
var residue = header.residueConfig[r];
residue.decode(decodeState,header, residueBuffers, ch, n2, doNotDecode, channelBuffers);
}
// INVERSE COUPLING
VorbisTools.stbProf(14);
var i = map.couplingSteps;
var n2 = n >> 1;
while (--i >= 0) {
var m = channelBuffers[map.chan[i].magnitude];
var a = channelBuffers[map.chan[i].angle];
for (j in 0...n2) {
var a2, m2;
if (m[j] > 0) {
if (a[j] > 0) {
m2 = m[j];
a2 = m[j] - a[j];
} else {
a2 = m[j];
m2 = m[j] + a[j];
}
} else {
if (a[j] > 0) {
m2 = m[j];
a2 = m[j] + a[j];
} else {
a2 = m[j];
m2 = m[j] - a[j];
}
}
m[j] = m2;
a[j] = a2;
}
}
// finish decoding the floors
VorbisTools.stbProf(15);
for (i in 0...header.channel) {
if (reallyZeroChannel[i]) {
for(j in 0...n2) {
channelBuffers[i][j] = 0;
}
} else {
map.doFloor(header.floorConfig, i, n, channelBuffers[i], finalY[i], null);
}
}
// INVERSE MDCT
VorbisTools.stbProf(16);
for (i in 0...header.channel) {
inverseMdct(channelBuffers[i], n, m.blockflag);
}
VorbisTools.stbProf(0);
// this shouldn't be necessary, unless we exited on an error
// and want to flush to get to the next packet
decodeState.flushPacket();
return decodeState.finishDecodePacket(previousLength, n, r);
}
}
typedef DecodePacketResult = {
var len : Int;
var left : Int;
var right : Int;
}
typedef DecodeInitialResult = {
var mode : Int;
var left : Range;
var right : Range;
}
private typedef Range = {
var start : Int;
var end : Int;
}

View File

@ -0,0 +1,291 @@
package kha.audio2.ogg.vorbis;
import haxe.ds.Vector;
import haxe.io.Bytes;
import haxe.io.Input;
import haxe.PosInfos;
import kha.audio2.ogg.vorbis.data.IntPoint;
import kha.audio2.ogg.vorbis.data.ReaderError;
import kha.audio2.ogg.tools.MathTools;
/**
* ...
* @author shohei909
*/
class VorbisTools
{
static public inline var EOP = -1;
static public var integerDivideTable:Vector<Vector<Int>>;
static inline var M__PI = 3.14159265358979323846264;
static inline var DIVTAB_NUMER = 32;
static inline var DIVTAB_DENOM = 64;
static public var INVERSE_DB_TABLE = [
1.0649863e-07, 1.1341951e-07, 1.2079015e-07, 1.2863978e-07,
1.3699951e-07, 1.4590251e-07, 1.5538408e-07, 1.6548181e-07,
1.7623575e-07, 1.8768855e-07, 1.9988561e-07, 2.1287530e-07,
2.2670913e-07, 2.4144197e-07, 2.5713223e-07, 2.7384213e-07,
2.9163793e-07, 3.1059021e-07, 3.3077411e-07, 3.5226968e-07,
3.7516214e-07, 3.9954229e-07, 4.2550680e-07, 4.5315863e-07,
4.8260743e-07, 5.1396998e-07, 5.4737065e-07, 5.8294187e-07,
6.2082472e-07, 6.6116941e-07, 7.0413592e-07, 7.4989464e-07,
7.9862701e-07, 8.5052630e-07, 9.0579828e-07, 9.6466216e-07,
1.0273513e-06, 1.0941144e-06, 1.1652161e-06, 1.2409384e-06,
1.3215816e-06, 1.4074654e-06, 1.4989305e-06, 1.5963394e-06,
1.7000785e-06, 1.8105592e-06, 1.9282195e-06, 2.0535261e-06,
2.1869758e-06, 2.3290978e-06, 2.4804557e-06, 2.6416497e-06,
2.8133190e-06, 2.9961443e-06, 3.1908506e-06, 3.3982101e-06,
3.6190449e-06, 3.8542308e-06, 4.1047004e-06, 4.3714470e-06,
4.6555282e-06, 4.9580707e-06, 5.2802740e-06, 5.6234160e-06,
5.9888572e-06, 6.3780469e-06, 6.7925283e-06, 7.2339451e-06,
7.7040476e-06, 8.2047000e-06, 8.7378876e-06, 9.3057248e-06,
9.9104632e-06, 1.0554501e-05, 1.1240392e-05, 1.1970856e-05,
1.2748789e-05, 1.3577278e-05, 1.4459606e-05, 1.5399272e-05,
1.6400004e-05, 1.7465768e-05, 1.8600792e-05, 1.9809576e-05,
2.1096914e-05, 2.2467911e-05, 2.3928002e-05, 2.5482978e-05,
2.7139006e-05, 2.8902651e-05, 3.0780908e-05, 3.2781225e-05,
3.4911534e-05, 3.7180282e-05, 3.9596466e-05, 4.2169667e-05,
4.4910090e-05, 4.7828601e-05, 5.0936773e-05, 5.4246931e-05,
5.7772202e-05, 6.1526565e-05, 6.5524908e-05, 6.9783085e-05,
7.4317983e-05, 7.9147585e-05, 8.4291040e-05, 8.9768747e-05,
9.5602426e-05, 0.00010181521, 0.00010843174, 0.00011547824,
0.00012298267, 0.00013097477, 0.00013948625, 0.00014855085,
0.00015820453, 0.00016848555, 0.00017943469, 0.00019109536,
0.00020351382, 0.00021673929, 0.00023082423, 0.00024582449,
0.00026179955, 0.00027881276, 0.00029693158, 0.00031622787,
0.00033677814, 0.00035866388, 0.00038197188, 0.00040679456,
0.00043323036, 0.00046138411, 0.00049136745, 0.00052329927,
0.00055730621, 0.00059352311, 0.00063209358, 0.00067317058,
0.00071691700, 0.00076350630, 0.00081312324, 0.00086596457,
0.00092223983, 0.00098217216, 0.0010459992, 0.0011139742,
0.0011863665, 0.0012634633, 0.0013455702, 0.0014330129,
0.0015261382, 0.0016253153, 0.0017309374, 0.0018434235,
0.0019632195, 0.0020908006, 0.0022266726, 0.0023713743,
0.0025254795, 0.0026895994, 0.0028643847, 0.0030505286,
0.0032487691, 0.0034598925, 0.0036847358, 0.0039241906,
0.0041792066, 0.0044507950, 0.0047400328, 0.0050480668,
0.0053761186, 0.0057254891, 0.0060975636, 0.0064938176,
0.0069158225, 0.0073652516, 0.0078438871, 0.0083536271,
0.0088964928, 0.009474637, 0.010090352, 0.010746080,
0.011444421, 0.012188144, 0.012980198, 0.013823725,
0.014722068, 0.015678791, 0.016697687, 0.017782797,
0.018938423, 0.020169149, 0.021479854, 0.022875735,
0.024362330, 0.025945531, 0.027631618, 0.029427276,
0.031339626, 0.033376252, 0.035545228, 0.037855157,
0.040315199, 0.042935108, 0.045725273, 0.048696758,
0.051861348, 0.055231591, 0.058820850, 0.062643361,
0.066714279, 0.071049749, 0.075666962, 0.080584227,
0.085821044, 0.091398179, 0.097337747, 0.10366330,
0.11039993, 0.11757434, 0.12521498, 0.13335215,
0.14201813, 0.15124727, 0.16107617, 0.17154380,
0.18269168, 0.19456402, 0.20720788, 0.22067342,
0.23501402, 0.25028656, 0.26655159, 0.28387361,
0.30232132, 0.32196786, 0.34289114, 0.36517414,
0.38890521, 0.41417847, 0.44109412, 0.46975890,
0.50028648, 0.53279791, 0.56742212, 0.60429640,
0.64356699, 0.68538959, 0.72993007, 0.77736504,
0.82788260, 0.88168307, 0.9389798, 1.0
];
public static inline function assert(b:Bool, ?p:PosInfos) {
#if debug
if (!b) {
throw new ReaderError(ReaderErrorType.OTHER, "", p);
}
#end
}
public static inline function neighbors(x:Vector<Int>, n:Int)
{
var low = -1;
var high = 65536;
var plow = 0;
var phigh = 0;
for (i in 0...n) {
if (x[i] > low && x[i] < x[n]) { plow = i; low = x[i]; }
if (x[i] < high && x[i] > x[n]) { phigh = i; high = x[i]; }
}
return {
low : plow,
high : phigh,
}
}
public static inline function floatUnpack(x:UInt):Float
{
// from the specification
var mantissa:Float = x & 0x1fffff;
var sign:Int = x & 0x80000000;
var exp:Int = (x & 0x7fe00000) >>> 21;
var res:Float = (sign != 0) ? -mantissa : mantissa;
return res * Math.pow(2, exp - 788);
}
public static inline function bitReverse(n:UInt):UInt
{
n = ((n & 0xAAAAAAAA) >>> 1) | ((n & 0x55555555) << 1);
n = ((n & 0xCCCCCCCC) >>> 2) | ((n & 0x33333333) << 2);
n = ((n & 0xF0F0F0F0) >>> 4) | ((n & 0x0F0F0F0F) << 4);
n = ((n & 0xFF00FF00) >>> 8) | ((n & 0x00FF00FF) << 8);
return (n >>> 16) | (n << 16);
}
public static inline function pointCompare(a:IntPoint, b:IntPoint) {
return if (a.x < b.x) -1 else if (a.x > b.x) 1 else 0;
}
public static function uintAsc(a:UInt, b:UInt) {
return if (a < b) {
-1;
} else if (a == b){
0;
} else {
1;
}
}
public static function lookup1Values(entries:Int, dim:Int)
{
var r = Std.int(Math.exp(Math.log(entries) / dim));
if (Std.int(Math.pow(r + 1, dim)) <= entries) {
r++;
}
assert(Math.pow(r+1, dim) > entries);
assert(Std.int(Math.pow(r, dim)) <= entries); // (int),floor() as above
return r;
}
public static function computeWindow(n:Int, window:Vector<Float>)
{
var n2 = n >> 1;
for (i in 0...n2) {
window[i] = Math.sin(0.5 * M__PI * square(Math.sin((i - 0 + 0.5) / n2 * 0.5 * M__PI)));
}
}
public static function square(f:Float) {
return f * f;
}
public static function computeBitReverse(n:Int, rev:Vector<Int>)
{
var ld = MathTools.ilog(n) - 1;
var n8 = n >> 3;
for (i in 0...n8) {
rev[i] = (bitReverse(i) >>> (32 - ld + 3)) << 2;
}
}
public static function computeTwiddleFactors(n:Int, af:Vector<Float>, bf:Vector<Float>, cf:Vector<Float>)
{
var n4 = n >> 2;
var n8 = n >> 3;
var k2 = 0;
for (k in 0...n4) {
af[k2] = Math.cos(4*k*M__PI/n);
af[k2 + 1] = -Math.sin(4*k*M__PI/n);
bf[k2] = Math.cos((k2+1)*M__PI/n/2) * 0.5;
bf[k2 + 1] = Math.sin((k2 + 1) * M__PI / n / 2) * 0.5;
k2 += 2;
}
var k2 = 0;
for (k in 0...n8) {
cf[k2 ] = Math.cos(2*(k2+1) * M__PI/n);
cf[k2+1] = -Math.sin(2*(k2+1) * M__PI/n);
k2 += 2;
}
}
public static function drawLine(output:Vector<Float>, x0:Int, y0:Int, x1:Int, y1:Int, n:Int)
{
if (integerDivideTable == null) {
integerDivideTable = new Vector(DIVTAB_NUMER);
for (i in 0...DIVTAB_NUMER) {
integerDivideTable[i] = new Vector(DIVTAB_DENOM);
for (j in 1...DIVTAB_DENOM) {
integerDivideTable[i][j] = Std.int(i / j);
}
}
}
var dy = y1 - y0;
var adx = x1 - x0;
var ady = dy < 0 ? -dy : dy;
var base:Int;
var x = x0;
var y = y0;
var err = 0;
var sy = if (adx < DIVTAB_DENOM && ady < DIVTAB_NUMER) {
if (dy < 0) {
base = -integerDivideTable[ady][adx];
base - 1;
} else {
base = integerDivideTable[ady][adx];
base + 1;
}
} else {
base = Std.int(dy / adx);
if (dy < 0) {
base - 1;
} else {
base + 1;
}
}
ady -= (base < 0 ? -base : base) * adx;
if (x1 > n) {
x1 = n;
}
output[x] *= INVERSE_DB_TABLE[y];
for (i in (x + 1)...x1) {
err += ady;
if (err >= adx) {
err -= adx;
y += sy;
} else {
y += base;
}
output[i] *= INVERSE_DB_TABLE[y];
}
}
public macro static inline function stbProf(i:Int)
{
return macro null;// macro trace($v { i }, channelBuffers[0][0], channelBuffers[0][1]);
}
public static inline function predictPoint(x:Int, x0:Int, x1:Int, y0:Int, y1:Int):Int
{
var dy = y1 - y0;
var adx = x1 - x0;
// @OPTIMIZE: force int division to round in the right direction... is this necessary on x86?
var err = Math.abs(dy) * (x - x0);
var off = Std.int(err / adx);
return dy < 0 ? (y0 - off) : (y0 + off);
}
public static inline function emptyFloatVector(len:Int) {
var vec = new Vector<Float>(len);
#if neko
for (i in 0...len) {
vec[i] = 0;
}
#end
return vec;
}
static public function copyVector(source:Vector<Float>):Vector<Float> {
var dest:Vector<Float> = new Vector<Float>(source.length);
for (i in 0...source.length) {
dest[i] = source[i];
}
return dest;
}
}

View File

@ -0,0 +1,594 @@
package kha.audio2.ogg.vorbis.data;
import haxe.ds.Vector;
import haxe.io.Bytes;
import haxe.io.Input;
import kha.audio2.ogg.tools.MathTools;
import kha.audio2.ogg.vorbis.data.ReaderError.ReaderErrorType;
import kha.audio2.ogg.vorbis.VorbisDecodeState;
/**
* ...
* @author shohei909
*/
class Codebook
{
static public inline var NO_CODE = 255;
public var dimensions:Int;
public var entries:Int;
public var codewordLengths:Vector<Int>; //uint8*
public var minimumValue:Float;
public var deltaValue:Float;
public var valueBits:Int; //uint8
public var lookupType:Int; //uint8
public var sequenceP:Bool; //uint8
public var sparse:Bool; //uint8
public var lookupValues:UInt; //uint32
public var multiplicands:Vector<Float>; // codetype *
public var codewords:Vector<UInt>; //uint32*
public var fastHuffman:Vector<Int>; //[FAST_HUFFMAN_TABLE_SIZE];
public var sortedCodewords:Array<UInt>; //uint32*
public var sortedValues:Vector<Int>;
public var sortedEntries:Int;
public function new () {
}
static public function read(decodeState:VorbisDecodeState):Codebook {
var c = new Codebook();
if (decodeState.readBits(8) != 0x42 || decodeState.readBits(8) != 0x43 || decodeState.readBits(8) != 0x56) {
throw new ReaderError(ReaderErrorType.INVALID_SETUP);
}
var x = decodeState.readBits(8);
c.dimensions = (decodeState.readBits(8) << 8) + x;
var x = decodeState.readBits(8);
var y = decodeState.readBits(8);
c.entries = (decodeState.readBits(8) << 16) + (y << 8) + x;
var ordered = decodeState.readBits(1);
c.sparse = (ordered != 0) ? false : (decodeState.readBits(1) != 0);
var lengths = new Vector(c.entries);
if (!c.sparse) {
c.codewordLengths = lengths;
}
var total = 0;
if (ordered != 0) {
var currentEntry = 0;
var currentLength = decodeState.readBits(5) + 1;
while (currentEntry < c.entries) {
var limit = c.entries - currentEntry;
var n = decodeState.readBits(MathTools.ilog(limit));
if (currentEntry + n > c.entries) {
throw new ReaderError(ReaderErrorType.INVALID_SETUP, "codebook entrys");
}
for (i in 0...n) {
lengths.set(currentEntry + i, currentLength);
}
currentEntry += n;
currentLength++;
}
} else {
for (j in 0...c.entries) {
var present = (c.sparse) ? decodeState.readBits(1) : 1;
if (present != 0) {
lengths.set(j, decodeState.readBits(5) + 1);
total++;
} else {
lengths.set(j, NO_CODE);
}
}
}
if (c.sparse && total >= (c.entries >> 2)) {
c.codewordLengths = lengths;
c.sparse = false;
}
c.sortedEntries = if (c.sparse) {
total;
} else {
var sortedCount = 0;
for (j in 0...c.entries) {
var l = lengths.get(j);
if (l > Setting.FAST_HUFFMAN_LENGTH && l != NO_CODE) {
++sortedCount;
}
}
sortedCount;
}
var values:Vector<UInt> = null;
if (!c.sparse) {
c.codewords = new Vector<UInt>(c.entries);
} else {
if (c.sortedEntries != 0) {
c.codewordLengths = new Vector(c.sortedEntries);
c.codewords = new Vector<UInt>(c.entries);
values = new Vector<UInt>(c.entries);
}
var size:Int = c.entries + (32 + 32) * c.sortedEntries;
}
if (!c.computeCodewords(lengths, c.entries, values)) {
throw new ReaderError(ReaderErrorType.INVALID_SETUP, "compute codewords");
}
if (c.sortedEntries != 0) {
// allocate an extra slot for sentinels
c.sortedCodewords = [];
// allocate an extra slot at the front so that sortedValues[-1] is defined
// so that we can catch that case without an extra if
c.sortedValues = new Vector<Int>(c.sortedEntries);
c.computeSortedHuffman(lengths, values);
}
if (c.sparse) {
values = null;
c.codewords = null;
lengths = null;
}
c.computeAcceleratedHuffman();
c.lookupType = decodeState.readBits(4);
if (c.lookupType > 2) {
throw new ReaderError(ReaderErrorType.INVALID_SETUP, "codebook lookup type");
}
if (c.lookupType > 0) {
c.minimumValue = VorbisTools.floatUnpack(decodeState.readBits(32));
c.deltaValue = VorbisTools.floatUnpack(decodeState.readBits(32));
c.valueBits = decodeState.readBits(4) + 1;
c.sequenceP = (decodeState.readBits(1) != 0);
if (c.lookupType == 1) {
c.lookupValues = VorbisTools.lookup1Values(c.entries, c.dimensions);
} else {
c.lookupValues = c.entries * c.dimensions;
}
var mults = new Vector<Int>(c.lookupValues);
for (j in 0...c.lookupValues) {
var q = decodeState.readBits(c.valueBits);
if (q == VorbisTools.EOP) {
throw new ReaderError(ReaderErrorType.INVALID_SETUP, "fail lookup");
}
mults[j] = q;
}
{
c.multiplicands = new Vector(c.lookupValues);
//STB_VORBIS_CODEBOOK_FLOATS = true
for (j in 0...c.lookupValues) {
c.multiplicands[j] = mults[j] * c.deltaValue + c.minimumValue;
}
}
//STB_VORBIS_CODEBOOK_FLOATS = true
if (c.lookupType == 2 && c.sequenceP) {
for (j in 1...c.lookupValues) {
c.multiplicands[j] = c.multiplicands[j - 1];
}
c.sequenceP = false;
}
}
return c;
}
inline function addEntry(huffCode:UInt, symbol:Int, count:Int, len:Int, values:Vector<UInt>)
{
if (!sparse) {
codewords[symbol] = huffCode;
} else {
codewords[count] = huffCode;
codewordLengths.set(count, len);
values[count] = symbol;
}
}
inline function includeInSort(len:Int)
{
return if (sparse) {
VorbisTools.assert(len != NO_CODE);
true;
} else if (len == NO_CODE) {
false;
} else if (len > Setting.FAST_HUFFMAN_LENGTH) {
true;
} else {
false;
}
}
function computeCodewords(len:Vector<Int>, n:Int, values:Vector<UInt>)
{
var available = new Vector<UInt>(32);
for (x in 0...32) available[x] = 0;
// find the first entry
var k = 0;
while (k < n) {
if (len.get(k) < NO_CODE) {
break;
}
k++;
}
if (k == n) {
VorbisTools.assert(sortedEntries == 0);
return true;
}
var m = 0;
// add to the list
addEntry(0, k, m++, len.get(k), values);
// add all available leaves
var i = 0;
while (++i <= len.get(k)) {
available[i] = (1:UInt) << ((32 - i):UInt);
}
// note that the above code treats the first case specially,
// but it's really the same as the following code, so they
// could probably be combined (except the initial code is 0,
// and I use 0 in available[] to mean 'empty')
i = k;
while (++i < n) {
var z = len.get(i);
if (z == NO_CODE) continue;
// find lowest available leaf (should always be earliest,
// which is what the specification calls for)
// note that this property, and the fact we can never have
// more than one free leaf at a given level, isn't totally
// trivial to prove, but it seems true and the assert never
// fires, so!
while (z > 0 && available[z] == 0) --z;
if (z == 0) {
return false;
}
var res:UInt = available[z];
available[z] = 0;
addEntry(VorbisTools.bitReverse(res), i, m++, len.get(i), values);
// propogate availability up the tree
if (z != len.get(i)) {
var y = len.get(i);
while (y > z) {
VorbisTools.assert(available[y] == 0);
available[y] = res + (1 << (32 - y));
y--;
}
}
}
return true;
}
function computeSortedHuffman(lengths:Vector<Int>, values:Vector<UInt>)
{
// build a list of all the entries
// OPTIMIZATION: don't include the short ones, since they'll be caught by FAST_HUFFMAN.
// this is kind of a frivolous optimization--I don't see any performance improvement,
// but it's like 4 extra lines of code, so.
if (!sparse) {
var k = 0;
for (i in 0...entries) {
if (includeInSort(lengths.get(i))) {
sortedCodewords[k++] = VorbisTools.bitReverse(codewords[i]);
}
}
VorbisTools.assert(k == sortedEntries);
} else {
for (i in 0...sortedEntries) {
sortedCodewords[i] = VorbisTools.bitReverse(codewords[i]);
}
}
sortedCodewords[sortedEntries] = 0xffffffff;
sortedCodewords.sort(VorbisTools.uintAsc);
var len = sparse ? sortedEntries : entries;
// now we need to indicate how they correspond; we could either
// #1: sort a different data structure that says who they correspond to
// #2: for each sorted entry, search the original list to find who corresponds
// #3: for each original entry, find the sorted entry
// #1 requires extra storage, #2 is slow, #3 can use binary search!
for (i in 0...len) {
var huffLen = sparse ? lengths.get(values[i]) : lengths.get(i);
if (includeInSort(huffLen)) {
var code = VorbisTools.bitReverse(codewords[i]);
var x = 0;
var n = sortedEntries;
while (n > 1) {
// invariant: sc[x] <= code < sc[x+n]
var m = x + (n >> 1);
if (sortedCodewords[m] <= code) {
x = m;
n -= (n>>1);
} else {
n >>= 1;
}
}
//VorbisTools.assert(sortedCodewords[x] == code);
if (sparse) {
sortedValues[x] = values[i];
codewordLengths.set(x, huffLen);
} else {
sortedValues[x] = i;
}
}
}
}
function computeAcceleratedHuffman()
{
fastHuffman = new Vector(Setting.FAST_HUFFMAN_TABLE_SIZE);
fastHuffman[0] = -1;
for (i in 0...(Setting.FAST_HUFFMAN_TABLE_SIZE)) {
fastHuffman[i] = -1;
}
var len = (sparse) ? sortedEntries : entries;
//STB_VORBIS_FAST_HUFFMAN_SHORT
//if (len > 32767) len = 32767; // largest possible value we can encode!
for (i in 0...len) {
if (codewordLengths[i] <= Setting.FAST_HUFFMAN_LENGTH) {
var z:Int = (sparse) ? VorbisTools.bitReverse(sortedCodewords[i]) : codewords[i];
// set table entries for all bit combinations in the higher bits
while (z < Setting.FAST_HUFFMAN_TABLE_SIZE) {
fastHuffman[z] = i;
z += 1 << codewordLengths[i];
}
}
}
}
function codebookDecode(decodeState:VorbisDecodeState, output:Vector<Float>, offset:Int, len:Int)
{
var z = decodeStart(decodeState);
var lookupValues = this.lookupValues;
var sequenceP = this.sequenceP;
var multiplicands = this.multiplicands;
var minimumValue = this.minimumValue;
if (z < 0) {
return false;
}
if (len > dimensions) {
len = dimensions;
}
// STB_VORBIS_DIVIDES_IN_CODEBOOK = true
if (lookupType == 1) {
var div = 1;
var last = 0.0;
for (i in 0...len) {
var off = Std.int(z / div) % lookupValues;
var val = multiplicands[off] + last;
output[offset + i] += val;
if (sequenceP) {
last = val + minimumValue;
}
div *= lookupValues;
}
return true;
}
z *= dimensions;
if (sequenceP) {
var last = 0.0;
for (i in 0...len) {
var val = multiplicands[z + i] + last;
output[offset + i] += val;
last = val + minimumValue;
}
} else {
var last = 0.0;
for (i in 0...len) {
output[offset + i] += multiplicands[z + i] + last;
}
}
return true;
}
function codebookDecodeStep(decodeState:VorbisDecodeState, output:Vector<Float>, offset:Int, len:Int, step:Int)
{
var z = decodeStart(decodeState);
var last = 0.0;
if (z < 0) {
return false;
}
if (len > dimensions) {
len = dimensions;
}
var lookupValues = this.lookupValues;
var sequenceP = this.sequenceP;
var multiplicands = this.multiplicands;
// STB_VORBIS_DIVIDES_IN_CODEBOOK = true
if (lookupType == 1) {
var div = 1;
for (i in 0...len) {
var off = Std.int(z / div) % lookupValues;
var val = multiplicands[off] + last;
output[offset + i * step] += val;
if (sequenceP) {
last = val;
}
div *= lookupValues;
}
return true;
}
z *= dimensions;
for (i in 0...len) {
var val = multiplicands[z + i] + last;
output[offset + i * step] += val;
if (sequenceP) {
last = val;
}
}
return true;
}
inline function decodeStart(decodeState:VorbisDecodeState)
{
return decodeState.decode(this);
//var z = -1;
//// type 0 is only legal in a scalar context
//if (lookupType == 0) {
// throw new ReaderError(INVALID_STREAM);
//} else {
// z = decodeState.decode(this);
// //if (sparse) VorbisTools.assert(z < sortedEntries);
// if (z < 0) { // check for VorbisTools.EOP
// if (decodeState.isLastByte()) {
// return z;
// } else {
// throw new ReaderError(INVALID_STREAM);
// }
// } else {
// return z;
// }
//}
}
static var delay = 0;
public function decodeDeinterleaveRepeat(decodeState:VorbisDecodeState, residueBuffers:Vector<Vector<Float>>, ch:Int, cInter:Int, pInter:Int, len:Int, totalDecode:Int)
{
var effective = dimensions;
// type 0 is only legal in a scalar context
if (lookupType == 0) {
throw new ReaderError(INVALID_STREAM);
}
var multiplicands = this.multiplicands;
var sequenceP = this.sequenceP;
var lookupValues = this.lookupValues;
while (totalDecode > 0) {
var last = 0.0;
var z = decodeState.decode(this);
if (z < 0) {
if (decodeState.isLastByte()) {
return null;
}
throw new ReaderError(INVALID_STREAM);
}
// if this will take us off the end of the buffers, stop short!
// we check by computing the length of the virtual interleaved
// buffer (len*ch), our current offset within it (pInter*ch)+(cInter),
// and the length we'll be using (effective)
if (cInter + pInter * ch + effective > len * ch) {
effective = len * ch - (pInter * ch - cInter);
}
if (lookupType == 1) {
var div = 1;
if (sequenceP) {
for (i in 0...effective) {
var off = Std.int(z / div) % lookupValues;
var val = multiplicands[off] + last;
residueBuffers[cInter][pInter] += val;
if (++cInter == ch) {
cInter = 0;
++pInter;
}
last = val;
div *= lookupValues;
}
} else {
for (i in 0...effective) {
var off = Std.int(z / div) % lookupValues;
var val = multiplicands[off] + last;
residueBuffers[cInter][pInter] += val;
if (++cInter == ch) {
cInter = 0;
++pInter;
}
div *= lookupValues;
}
}
} else {
z *= dimensions;
if (sequenceP) {
for (i in 0...effective) {
var val = multiplicands[z + i] + last;
residueBuffers[cInter][pInter] += val;
if (++cInter == ch) {
cInter = 0;
++pInter;
}
last = val;
}
} else {
for (i in 0...effective) {
var val = multiplicands[z + i] + last;
residueBuffers[cInter][pInter] += val;
if (++cInter == ch) {
cInter = 0;
++pInter;
}
}
}
}
totalDecode -= effective;
}
return {
cInter : cInter,
pInter : pInter
}
}
public function residueDecode(decodeState:VorbisDecodeState, target:Vector<Float>, offset:Int, n:Int, rtype:Int)
{
if (rtype == 0) {
var step = Std.int(n / dimensions);
for (k in 0...step) {
if (!codebookDecodeStep(decodeState, target, offset + k, n-offset-k, step)) {
return false;
}
}
} else {
var k = 0;
while(k < n) {
if (!codebookDecode(decodeState, target, offset, n-k)) {
return false;
}
k += dimensions;
offset += dimensions;
}
}
return true;
}
}

View File

@ -0,0 +1,130 @@
package kha.audio2.ogg.vorbis.data;
/**
* ...
* @author shohei909
*/
class Comment {
public var data(default, null):Map<String, Array<String>>;
public var title(get, never):String;
function get_title() {
return getString("title");
}
public var loopStart(get, never):Null<Int>;
function get_loopStart() {
return Std.parseInt(getString("loopstart"));
}
public var loopLength(get, never):Null<Int>;
function get_loopLength() {
return Std.parseInt(getString("looplength"));
}
public var version(get, never):String;
function get_version() {
return getString("version");
}
public var album(get, never):String;
function get_album() {
return getString("album");
}
public var organization(get, never):String;
function get_organization() {
return getString("organization");
}
public var tracknumber(get, never):String;
function get_tracknumber() {
return getString("tracknumber");
}
public var performer(get, never):String;
function get_performer() {
return getString("performer");
}
public var copyright(get, never):String;
function get_copyright() {
return getString("copyright");
}
public var license(get, never):String;
function get_license() {
return getString("license");
}
public var artist(get, never):String;
function get_artist() {
return getString("artist");
}
public var description(get, never):String;
function get_description() {
return getString("description");
}
public var genre(get, never):String;
function get_genre() {
return getString("genre");
}
public var date(get, never):String;
function get_date() {
return getString("date");
}
public var location(get, never):String;
function get_location() {
return getString("location");
}
public var contact(get, never):String;
function get_contact() {
return getString("contact");
}
public var isrc(get, never):String;
function get_isrc() {
return getString("isrc");
}
public var artists(get, never):Array<String>;
function get_artists() {
return getArray("artist");
}
public function new() {
data = new Map();
}
public function add(key:String, value:String) {
key = key.toLowerCase();
if (data.exists(key)) {
data[key].push(value);
} else {
data[key] = [value];
}
}
public function getString(key:String) {
key = key.toLowerCase();
return if (data.exists(key)) {
data[key][0];
} else {
null;
}
}
public function getArray(key:String) {
key = key.toLowerCase();
return if (data.exists(key)) {
data[key];
} else {
null;
}
}
}

View File

@ -0,0 +1,151 @@
package kha.audio2.ogg.vorbis.data;
import haxe.ds.Vector;
import haxe.io.Input;
import kha.audio2.ogg.vorbis.data.ReaderError;
import kha.audio2.ogg.vorbis.VorbisDecodeState;
/**
* ...
* @author shohei909
*/
class Floor
{
public var floor0:Floor0;
public var floor1:Floor1;
public var type:Int;
function new()
{
}
public static function read(decodeState:VorbisDecodeState, codebooks:Vector<Codebook>):Floor
{
var floor = new Floor();
floor.type = decodeState.readBits(16);
if (floor.type > 1) {
throw new ReaderError(INVALID_SETUP);
}
if (floor.type == 0) {
var g = floor.floor0 = new Floor0();
g.order = decodeState.readBits(8);
g.rate = decodeState.readBits(16);
g.barkMapSize = decodeState.readBits(16);
g.amplitudeBits = decodeState.readBits(6);
g.amplitudeOffset = decodeState.readBits(8);
g.numberOfBooks = decodeState.readBits(4) + 1;
for (j in 0...g.numberOfBooks) {
g.bookList[j] = decodeState.readBits(8);
}
throw new ReaderError(FEATURE_NOT_SUPPORTED);
} else {
var p = new Array<IntPoint>();
var g = floor.floor1 = new Floor1();
var maxClass = -1;
g.partitions = decodeState.readBits(5);
g.partitionClassList = new Vector(g.partitions);
for (j in 0...g.partitions) {
g.partitionClassList[j] = decodeState.readBits(4);
if (g.partitionClassList[j] > maxClass) {
maxClass = g.partitionClassList[j];
}
}
g.classDimensions = new Vector(maxClass + 1);
g.classMasterbooks = new Vector(maxClass + 1);
g.classSubclasses = new Vector(maxClass + 1);
g.subclassBooks = new Vector(maxClass + 1);
for (j in 0...(maxClass + 1)) {
g.classDimensions[j] = decodeState.readBits(3) + 1;
g.classSubclasses[j] = decodeState.readBits(2);
if (g.classSubclasses[j] != 0) {
g.classMasterbooks[j] = decodeState.readBits(8);
if (g.classMasterbooks[j] >= codebooks.length) {
throw new ReaderError(INVALID_SETUP);
}
}
var kl = (1 << g.classSubclasses[j]);
g.subclassBooks[j] = new Vector(kl);
for (k in 0...kl) {
g.subclassBooks[j][k] = decodeState.readBits(8)-1;
if (g.subclassBooks[j][k] >= codebooks.length) {
throw new ReaderError(INVALID_SETUP);
}
}
}
g.floor1Multiplier = decodeState.readBits(2) + 1;
g.rangebits = decodeState.readBits(4);
g.xlist = new Vector(31*8+2);
g.xlist[0] = 0;
g.xlist[1] = 1 << g.rangebits;
g.values = 2;
for (j in 0...g.partitions) {
var c = g.partitionClassList[j];
for (k in 0...g.classDimensions[c]) {
g.xlist[g.values] = decodeState.readBits(g.rangebits);
g.values++;
}
}
// precompute the sorting
for (j in 0...g.values) {
p.push(new IntPoint());
p[j].x = g.xlist[j];
p[j].y = j;
}
p.sort(VorbisTools.pointCompare);
g.sortedOrder = new Vector(g.values);
for (j in 0...g.values) {
g.sortedOrder[j] = p[j].y;
}
g.neighbors = new Vector(g.values);
// precompute the neighbors
for (j in 2...g.values) {
var ne = VorbisTools.neighbors(g.xlist, j);
g.neighbors[j] = new Vector(g.values);
g.neighbors[j][0] = ne.low;
g.neighbors[j][1] = ne.high;
}
}
return floor;
}
}
class Floor0
{
public var order:Int; //uint8
public var rate:Int; //uint16
public var barkMapSize:Int; //uint16
public var amplitudeBits:Int; //uint8
public var amplitudeOffset:Int; //uint8
public var numberOfBooks:Int; //uint8
public var bookList:Vector<UInt>; //uint8 [16] varies
public function new() {
}
}
class Floor1
{
public var partitions:Int; // uint8
public var partitionClassList:Vector<Int>; // uint8 varies
public var classDimensions:Vector<Int>; // uint8 [16] varies
public var classSubclasses:Vector<Int>; // uint8 [16] varies
public var classMasterbooks:Vector<Int>; // uint8 [16] varies
public var subclassBooks:Vector<Vector<Int>>; //int 16 [16][8] varies
public var xlist:Vector<Int>; //uint16 [31*8+2] varies
public var sortedOrder:Vector<Int>; //uint8 [31 * 8 + 2];
public var neighbors:Vector<Vector<Int>>; //uint8[31 * 8 + 2][2];
public var floor1Multiplier:Int;
public var rangebits:Int;
public var values:Int;
public function new() {
}
}

View File

@ -0,0 +1,213 @@
package kha.audio2.ogg.vorbis.data;
import haxe.ds.Vector;
import haxe.io.BytesInput;
import haxe.io.BytesOutput;
import haxe.io.Input;
import haxe.io.Output;
import kha.audio2.ogg.vorbis.data.Comment;
import kha.audio2.ogg.vorbis.data.Page.PageFlag;
import kha.audio2.ogg.vorbis.data.ReaderError.ReaderErrorType;
import kha.audio2.ogg.vorbis.VorbisDecodeState;
/**
* ...
* @author shohei909
*/
class Header {
static public inline var PACKET_ID = 1;
static public inline var PACKET_COMMENT = 3;
static public inline var PACKET_SETUP = 5;
public var maximumBitRate(default, null):UInt;
public var nominalBitRate(default, null):UInt;
public var minimumBitRate(default, null):UInt;
public var sampleRate(default, null):UInt;
public var channel(default, null):Int;
public var blocksize0(default, null):Int;
public var blocksize1(default, null):Int;
public var codebooks(default, null):Vector<Codebook>;
public var floorConfig(default, null):Vector<Floor>;
public var residueConfig(default, null):Vector<Residue>;
public var mapping(default, null):Vector<Mapping>;
public var modes(default, null):Vector<Mode>; // [64] varies
public var comment(default, null):Comment;
public var vendor(default, null):String;
function new() {
}
static public function read(decodeState:VorbisDecodeState):Header {
var page = decodeState.page;
page.start(decodeState);
if ((page.flag & PageFlag.FIRST_PAGE) == 0) {
throw new ReaderError(INVALID_FIRST_PAGE, "not firstPage");
}
if ((page.flag & PageFlag.LAST_PAGE) != 0) {
throw new ReaderError(INVALID_FIRST_PAGE, "lastPage");
}
if ((page.flag & PageFlag.CONTINUED_PACKET) != 0) {
throw new ReaderError(INVALID_FIRST_PAGE, "continuedPacket");
}
decodeState.firstPageValidate();
if (decodeState.readByte() != PACKET_ID) {
throw new ReaderError(INVALID_FIRST_PAGE, "decodeState head");
}
// vorbis header
decodeState.vorbisValidate();
// vorbisVersion
var version = decodeState.readInt32();
if (version != 0) {
throw new ReaderError(INVALID_FIRST_PAGE, "vorbis version : " + version);
}
var header = new Header();
header.channel = decodeState.readByte();
if (header.channel == 0) {
throw new ReaderError(INVALID_FIRST_PAGE, "no channel");
} else if (header.channel > Setting.MAX_CHANNELS) {
throw new ReaderError(TOO_MANY_CHANNELS, "too many channels");
}
header.sampleRate = decodeState.readInt32();
if (header.sampleRate == 0) {
throw new ReaderError(INVALID_FIRST_PAGE, "no sampling rate");
}
header.maximumBitRate = decodeState.readInt32();
header.nominalBitRate = decodeState.readInt32();
header.minimumBitRate = decodeState.readInt32();
var x = decodeState.readByte();
var log0 = x & 15;
var log1 = x >> 4;
header.blocksize0 = 1 << log0;
header.blocksize1 = 1 << log1;
if (log0 < 6 || log0 > 13) {
throw new ReaderError(INVALID_SETUP);
}
if (log1 < 6 || log1 > 13) {
throw new ReaderError(INVALID_SETUP);
}
if (log0 > log1) {
throw new ReaderError(INVALID_SETUP);
}
// framingFlag
var x = decodeState.readByte();
if (x & 1 == 0) {
throw new ReaderError(INVALID_FIRST_PAGE);
}
// comment fields
decodeState.page.start(decodeState);
decodeState.startPacket();
var len = 0;
var output = new BytesOutput();
while((len = decodeState.next()) != 0) {
output.write(decodeState.readBytes(len));
decodeState.bytesInSeg = 0;
}
{
var packetInput = new BytesInput(output.getBytes());
packetInput.readByte();
packetInput.read(6);
var vendorLength:UInt = packetInput.readInt32();
header.vendor = packetInput.readString(vendorLength);
header.comment = new Comment();
var commentCount = packetInput.readInt32();
for (i in 0...commentCount) {
var n = packetInput.readInt32();
var str = packetInput.readString(n);
var splitter = str.indexOf("=");
if (splitter != -1) {
header.comment.add(str.substring(0, splitter), str.substring(splitter + 1));
}
}
var x = packetInput.readByte();
if (x & 1 == 0) {
throw new ReaderError(ReaderErrorType.INVALID_SETUP);
}
}
// third packet!
decodeState.startPacket();
if (decodeState.readPacket() != PACKET_SETUP) {
throw new ReaderError(ReaderErrorType.INVALID_SETUP, "setup packet");
}
decodeState.vorbisValidate();
// codebooks
var codebookCount = decodeState.readBits(8) + 1;
header.codebooks = new Vector(codebookCount);
for (i in 0...codebookCount) {
header.codebooks[i] = Codebook.read(decodeState);
}
// time domain transfers (notused)
x = decodeState.readBits(6) + 1;
for (i in 0...x) {
if (decodeState.readBits(16) != 0) {
throw new ReaderError(INVALID_SETUP);
}
}
// Floors
var floorCount = decodeState.readBits(6) + 1;
header.floorConfig = new Vector(floorCount);
for (i in 0...floorCount) {
header.floorConfig[i] = Floor.read(decodeState, header.codebooks);
}
// Residue
var residueCount = decodeState.readBits(6) + 1;
header.residueConfig = new Vector(residueCount);
for (i in 0...residueCount) {
header.residueConfig[i] = Residue.read(decodeState, header.codebooks);
}
//Mapping
var mappingCount = decodeState.readBits(6) + 1;
header.mapping = new Vector(mappingCount);
for (i in 0...mappingCount) {
var map = Mapping.read(decodeState, header.channel);
header.mapping[i] = map;
for (j in 0...map.submaps) {
if (map.submapFloor[j] >= header.floorConfig.length) {
throw new ReaderError(INVALID_SETUP);
}
if (map.submapResidue[j] >= header.residueConfig.length) {
throw new ReaderError(INVALID_SETUP);
}
}
}
var modeCount = decodeState.readBits(6) + 1;
header.modes = new Vector(modeCount);
for (i in 0...modeCount) {
var mode = Mode.read(decodeState);
header.modes[i] = mode;
if (mode.mapping >= header.mapping.length) {
throw new ReaderError(INVALID_SETUP);
}
}
decodeState.flushPacket();
return header;
}
}

View File

@ -0,0 +1,15 @@
package kha.audio2.ogg.vorbis.data;
/**
* ...
* @author shohei909
*/
class IntPoint
{
public var x:Int;
public var y:Int;
public function new() {
}
}

View File

@ -0,0 +1,127 @@
package kha.audio2.ogg.vorbis.data;
import haxe.ds.Vector;
import haxe.io.Input;
import kha.audio2.ogg.tools.MathTools;
import kha.audio2.ogg.vorbis.VorbisDecodeState;
class Mapping
{
public var couplingSteps:Int; // uint16
public var chan:Vector<MappingChannel>;
public var submaps:Int; // uint8
public var submapFloor:Vector<Int>; // uint8 varies
public var submapResidue:Vector<Int>; // uint8 varies
public function new() {
}
public static function read(decodeState:VorbisDecodeState, channels:Int):Mapping
{
var m = new Mapping();
var mappingType = decodeState.readBits(16);
if (mappingType != 0) {
throw new ReaderError(INVALID_SETUP, "mapping type " + mappingType);
}
m.chan = new Vector(channels);
for (j in 0...channels) {
m.chan[j] = new MappingChannel();
}
if (decodeState.readBits(1) != 0) {
m.submaps = decodeState.readBits(4)+1;
} else {
m.submaps = 1;
}
//if (m.submaps > maxSubmaps) {
// maxSubmaps = m.submaps;
//}
if (decodeState.readBits(1) != 0) {
m.couplingSteps = decodeState.readBits(8)+1;
for (k in 0...m.couplingSteps) {
m.chan[k].magnitude = decodeState.readBits(MathTools.ilog(channels-1));
m.chan[k].angle = decodeState.readBits(MathTools.ilog(channels-1));
if (m.chan[k].magnitude >= channels) {
throw new ReaderError(INVALID_SETUP);
}
if (m.chan[k].angle >= channels) {
throw new ReaderError(INVALID_SETUP);
}
if (m.chan[k].magnitude == m.chan[k].angle) {
throw new ReaderError(INVALID_SETUP);
}
}
} else {
m.couplingSteps = 0;
}
// reserved field
if (decodeState.readBits(2) != 0) {
throw new ReaderError(INVALID_SETUP);
}
if (m.submaps > 1) {
for (j in 0...channels) {
m.chan[j].mux = decodeState.readBits(4);
if (m.chan[j].mux >= m.submaps) {
throw new ReaderError(INVALID_SETUP);
}
}
} else {
for (j in 0...channels) {
m.chan[j].mux = 0;
}
}
m.submapFloor = new Vector(m.submaps);
m.submapResidue = new Vector(m.submaps);
for (j in 0...m.submaps) {
decodeState.readBits(8); // discard
m.submapFloor[j] = decodeState.readBits(8);
m.submapResidue[j] = decodeState.readBits(8);
}
return m;
}
public function doFloor(floors:Vector<Floor>, i:Int, n:Int, target:Vector<Float>, finalY:Array<Int>, step2Flag:Vector<Bool>)
{
var n2 = n >> 1;
var s = chan[i].mux, floor;
var floor = floors[submapFloor[s]];
if (floor.type == 0) {
throw new ReaderError(INVALID_STREAM);
} else {
var g = floor.floor1;
var lx = 0, ly = finalY[0] * g.floor1Multiplier;
for (q in 1...g.values) {
var j = g.sortedOrder[q];
if (finalY[j] >= 0)
{
var hy = finalY[j] * g.floor1Multiplier;
var hx = g.xlist[j];
VorbisTools.drawLine(target, lx, ly, hx, hy, n2);
lx = hx;
ly = hy;
}
}
if (lx < n2) {
// optimization of: drawLine(target, lx,ly, n,ly, n2);
for (j in lx...n2) {
target[j] *= VorbisTools.INVERSE_DB_TABLE[ly];
}
}
}
}
}
class MappingChannel
{
public var magnitude:Int; // uint8
public var angle:Int; // uint8
public var mux:Int; // uint8
public function new() {
}
}

View File

@ -0,0 +1,29 @@
package kha.audio2.ogg.vorbis.data;
import haxe.io.Input;
import kha.audio2.ogg.vorbis.VorbisDecodeState;
class Mode
{
public var blockflag:Bool; // uint8
public var mapping:Int; // uint8
public var windowtype:Int; // uint16
public var transformtype:Int; // uint16
public function new() {
}
public static function read(decodeState:VorbisDecodeState) {
var m = new Mode();
m.blockflag = (decodeState.readBits(1) != 0);
m.windowtype = decodeState.readBits(16);
m.transformtype = decodeState.readBits(16);
m.mapping = decodeState.readBits(8);
if (m.windowtype != 0) {
throw new ReaderError(INVALID_SETUP);
}
if (m.transformtype != 0) {
throw new ReaderError(INVALID_SETUP);
}
return m;
}
}

View File

@ -0,0 +1,60 @@
package kha.audio2.ogg.vorbis.data;
import haxe.io.Bytes;
import haxe.io.Input;
import kha.audio2.ogg.vorbis.data.ReaderError.ReaderErrorType;
import kha.audio2.ogg.vorbis.VorbisDecodeState;
/**
* ...
* @author shohei909
*/
class Page {
public var flag(default, null):Int;
public function new () {
}
public function clone() {
var page = new Page();
page.flag = flag;
return page;
}
// startPage
public function start(decodeState:VorbisDecodeState) {
decodeState.capturePattern();
startWithoutCapturePattern(decodeState);
}
// startPageNoCapturePattern
public function startWithoutCapturePattern(decodeState:VorbisDecodeState) {
var version = decodeState.readByte();
if (version != 0) {
throw new ReaderError(ReaderErrorType.INVALID_STREAM_STRUCTURE_VERSION, "" + version);
}
this.flag = decodeState.readByte();
var loc0 = decodeState.readInt32();
var loc1 = decodeState.readInt32();
// input serial number -- vorbis doesn't interleave, so discard
decodeState.readInt32();
//if (this.serial != get32(f)) throw new ReaderError(ReaderErrorType.incorrectStreamSerialNumber);
// page sequence number
decodeState.readInt32();
// CRC32
decodeState.readInt32();
// pageSegments
decodeState.setup(loc0, loc1);
}
}
class PageFlag {
static public inline var CONTINUED_PACKET = 1;
static public inline var FIRST_PAGE = 2;
static public inline var LAST_PAGE = 4;
}

View File

@ -0,0 +1,18 @@
package kha.audio2.ogg.vorbis.data;
/**
* ...
* @author shohei909
*/
class ProbedPage
{
public var pageStart:Int;
public var pageEnd:Int;
public var afterPreviousPageStart:Int;
public var firstDecodedSample:Null<Int>;
public var lastDecodedSample:Null<Int>;
public function new() {
}
}

View File

@ -0,0 +1,53 @@
package kha.audio2.ogg.vorbis.data;
import haxe.PosInfos;
/**
* ...
* @author shohei909
*/
class ReaderError
{
public var type(default, null):ReaderErrorType;
public var message(default, null):String;
public var posInfos(default, null):PosInfos;
public function new(type:ReaderErrorType, ?message:String = "", ?posInfos:PosInfos) {
this.type = type;
this.message = message;
this.posInfos = posInfos;
}
}
enum ReaderErrorType
{
NEED_MORE_DATA; // not a real error
INVALID_API_MIXING; // can't mix API modes
OUTOFMEM; // not enough memory
FEATURE_NOT_SUPPORTED; // uses floor 0
TOO_MANY_CHANNELS; // STB_VORBIS_MAX_CHANNELS is too small
FILE_OPEN_FAILURE; // fopen() failed
SEEK_WITHOUT_LENGTH; // can't seek in unknown-length file
UNEXPECTED_EOF; // file is truncated?
SEEK_INVALID; // seek past EOF
// decoding errors (corrupt/invalid input) -- you probably
// don't care about the exact details of these
// vorbis errors:
INVALID_SETUP;
INVALID_STREAM;
// ogg errors:
MISSING_CAPTURE_PATTERN;
INVALID_STREAM_STRUCTURE_VERSION;
CONTINUED_PACKET_FLAG_INVALID;
INCORRECT_STREAM_SERIAL_NUMBER;
INVALID_FIRST_PAGE;
BAD_PACKET_TYPE;
CANT_FIND_LAST_PAGE;
SEEK_FAILED;
OTHER;
}

View File

@ -0,0 +1,298 @@
package kha.audio2.ogg.vorbis.data;
import haxe.ds.Vector;
import haxe.io.Input;
import kha.audio2.ogg.vorbis.VorbisDecodeState;
/**
* ...
* @author shohei909
*/
class Residue
{
public var begin(default, null):UInt; // uint32
public var end(default, null):UInt; // uint32
public var partSize(default, null):UInt; // uint32
public var classifications(default, null):Int; // uint8
public var classbook(default, null):Int; // uint8
public var classdata(default, null):Vector<Vector<Int>>; //uint8 **
public var residueBooks(default, null):Vector<Vector<Int>>; //int16 (*)[8]
public var type(default, null):Int;
public function new() {
}
public static function read(decodeState:VorbisDecodeState, codebooks:Vector<Codebook>):Residue
{
var r = new Residue();
r.type = decodeState.readBits(16);
if (r.type > 2) {
throw new ReaderError(INVALID_SETUP);
}
var residueCascade = new Vector<Int>(64);
r.begin = decodeState.readBits(24);
r.end = decodeState.readBits(24);
r.partSize = decodeState.readBits(24)+1;
var classifications = r.classifications = decodeState.readBits(6)+1;
r.classbook = decodeState.readBits(8);
for (j in 0...r.classifications) {
var highBits = 0;
var lowBits = decodeState.readBits(3);
if (decodeState.readBits(1) != 0){
highBits = decodeState.readBits(5);
}
residueCascade[j] = highBits * 8 + lowBits;
}
r.residueBooks = new Vector(r.classifications);
for (j in 0...r.classifications) {
r.residueBooks[j] = new Vector(8);
for (k in 0...8) {
if (residueCascade[j] & (1 << k) != 0) {
r.residueBooks[j][k] = decodeState.readBits(8);
if (r.residueBooks[j][k] >= codebooks.length) {
throw new ReaderError(INVALID_SETUP);
}
} else {
r.residueBooks[j][k] = -1;
}
}
}
// precompute the classifications[] array to avoid inner-loop mod/divide
// call it 'classdata' since we already have classifications
var el = codebooks[r.classbook].entries;
var classwords = codebooks[r.classbook].dimensions;
r.classdata = new Vector(el);
for (j in 0...el) {
var temp = j;
var k = classwords;
var cd = r.classdata[j] = new Vector(classwords);
while (--k >= 0) {
cd[k] = temp % classifications;
temp = Std.int(temp / classifications);
}
}
return r;
}
public function decode(decodeState:VorbisDecodeState, header:Header, residueBuffers:Vector<Vector<Float>>, ch:Int, n:Int, doNotDecode:Vector<Bool>, channelBuffers:Vector<Vector<Float>>)
{
// STB_VORBIS_DIVIDES_IN_RESIDUE = true
var codebooks = header.codebooks;
var classwords = codebooks[classbook].dimensions;
var nRead = end - begin;
var partSize = this.partSize;
var partRead = Std.int(nRead / partSize);
var classifications = new Vector<Int>(header.channel * partRead + 1); // + 1 is a hack for a possible crash in line 268 with some ogg files
VorbisTools.stbProf(2);
for (i in 0...ch) {
if (!doNotDecode[i]) {
var buffer = residueBuffers[i];
for (j in 0...buffer.length) {
buffer[j] = 0;
}
}
}
if (type == 2 && ch != 1) {
for (j in 0...ch) {
if (!doNotDecode[j]) {
break;
} else if (j == ch - 1) {
return;
}
}
VorbisTools.stbProf(3);
for (pass in 0...8) {
var pcount = 0, classSet = 0;
if (ch == 2) {
VorbisTools.stbProf(13);
while (pcount < partRead) {
var z = begin + pcount * partSize;
var cInter = (z & 1);
var pInter = z >> 1;
if (pass == 0) {
var c:Codebook = codebooks[classbook];
var q = decodeState.decode(c);
if (q == VorbisTools.EOP) {
return;
}
var i = classwords;
while (--i >= 0) {
classifications[i + pcount] = q % this.classifications;
q = Std.int(q / this.classifications);
}
}
VorbisTools.stbProf(5);
for (i in 0...classwords) {
if (pcount >= partRead) {
break;
}
var z = begin + pcount*partSize;
var c = classifications[pcount];
var b = residueBooks[c][pass];
if (b >= 0) {
var book = codebooks[b];
VorbisTools.stbProf(20); // accounts for X time
var result = book.decodeDeinterleaveRepeat(decodeState, residueBuffers, ch, cInter, pInter, n, partSize);
if (result == null) {
return;
} else {
cInter = result.cInter;
pInter = result.pInter;
}
VorbisTools.stbProf(7);
} else {
z += partSize;
cInter = z & 1;
pInter = z >> 1;
}
++pcount;
}
VorbisTools.stbProf(8);
}
} else if (ch == 1) {
while (pcount < partRead) {
var z = begin + pcount*partSize;
var cInter = 0;
var pInter = z;
if (pass == 0) {
var c:Codebook = codebooks[classbook];
var q = decodeState.decode(c);
if (q == VorbisTools.EOP) return;
var i = classwords;
while (--i >= 0) {
classifications[i + pcount] = q % this.classifications;
q = Std.int(q / this.classifications);
}
}
for (i in 0...classwords) {
if (pcount >= partRead) {
break;
}
var z = begin + pcount * partSize;
var b = residueBooks[classifications[pcount]][pass];
if (b >= 0) {
var book:Codebook = codebooks[b];
VorbisTools.stbProf(22);
var result = book.decodeDeinterleaveRepeat(decodeState, residueBuffers, ch, cInter, pInter, n, partSize);
if (result == null) {
return;
} else {
cInter = result.cInter;
pInter = result.pInter;
}
VorbisTools.stbProf(3);
} else {
z += partSize;
cInter = 0;
pInter = z;
}
++pcount;
}
}
} else {
while (pcount < partRead) {
var z = begin + pcount * partSize;
var cInter = z % ch;
var pInter = Std.int(z / ch);
if (pass == 0) {
var c:Codebook = codebooks[classbook];
var q = decodeState.decode(c);
if (q == VorbisTools.EOP) {
return;
}
var i = classwords;
while (--i >= 0) {
classifications[i+pcount] = q % this.classifications;
q = Std.int(q / this.classifications);
}
}
for (i in 0...classwords) {
if (pcount >= partRead) {
break;
}
var z = begin + pcount * partSize;
var b = residueBooks[classifications[pcount]][pass];
if (b >= 0) {
var book = codebooks[b];
VorbisTools.stbProf(22);
var result = book.decodeDeinterleaveRepeat(decodeState, residueBuffers, ch, cInter, pInter, n, partSize);
if (result == null) {
return;
} else {
cInter = result.cInter;
pInter = result.pInter;
}
VorbisTools.stbProf(3);
} else {
z += partSize;
cInter = z % ch;
pInter = Std.int(z / ch);
}
++pcount;
}
}
}
}
return;
}
VorbisTools.stbProf(9);
for (pass in 0...8) {
var pcount = 0;
var classSet = 0;
while (pcount < partRead) {
if (pass == 0) {
for (j in 0...ch) {
if (!doNotDecode[j]) {
var c:Codebook = codebooks[classbook];
var temp = decodeState.decode(c);
if (temp == VorbisTools.EOP) {
return;
}
var i = classwords;
while (--i >= 0) {
classifications[j * partRead + i + pcount] = temp % this.classifications;
temp = Std.int(temp / this.classifications);
}
}
}
}
for (i in 0...classwords) {
if (pcount >= partRead) {
break;
}
for (j in 0...ch) {
if (!doNotDecode[j]) {
var c = classifications[j * partRead + pcount];
var b = residueBooks[c][pass];
if (b >= 0) {
var target = residueBuffers[j];
var offset = begin + pcount * partSize;
var n = partSize;
var book = codebooks[b];
if (!book.residueDecode(decodeState, target, offset, n, type)) {
return;
}
}
}
}
++pcount;
}
}
}
}
}

View File

@ -0,0 +1,15 @@
package kha.audio2.ogg.vorbis.data;
/**
* ...
* @author shohei909
*/
class Setting
{
static public inline var MAX_CHANNELS = 16;
static public inline var PUSHDATA_CRC_COUNT = 4;
static public inline var FAST_HUFFMAN_LENGTH = 10;
static public inline var FAST_HUFFMAN_TABLE_SIZE = (1 << FAST_HUFFMAN_LENGTH);
static public inline var FAST_HUFFMAN_TABLE_MASK = FAST_HUFFMAN_TABLE_SIZE - 1;
}