Moved openbts codes into a separate directory and updated their license statements so they can be automatically processed
diff --git a/lib/decoding/openbts/AmrCoder.cpp b/lib/decoding/openbts/AmrCoder.cpp
new file mode 100644
index 0000000..718baf0
--- /dev/null
+++ b/lib/decoding/openbts/AmrCoder.cpp
@@ -0,0 +1,1891 @@
+/*
+ * Copyright 2013, 2014 Range Networks, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This use of this software may be subject to additional restrictions.
+ * See the LEGAL file in the main directory for details.
+ */
+
+
+#include "BitVector.h"
+#include "AmrCoder.h"
+#include <iostream>
+#include <stdio.h>
+#include <sstream>
+
+using namespace std;
+
+
+
+ViterbiTCH_AFS12_2::ViterbiTCH_AFS12_2()
+{
+	assert(mDeferral < 32);
+	mCoeffs[0] = 0x019;
+	mCoeffsFB[0] = 0x019;
+	mCoeffs[1] = 0x01b;
+	mCoeffsFB[1] = 0x019;
+	for (unsigned i = 0; i < mIRate; i++) {
+		computeStateTables(i);
+	}
+	computeGeneratorTable();
+}
+
+
+//void BitVector::encode(const ViterbiTCH_AFS12_2& coder, BitVector& target) const
+void ViterbiTCH_AFS12_2::encode(const BitVector& in, BitVector& target) const
+{
+	assert(in.size() == 250);
+	assert(target.size() == 508);
+	const char *u = in.begin();
+	char *C = target.begin();
+	const unsigned H = 4;
+	BitVector r(254+H);
+	for (int k = -H; k <= -1; k++) r[k+H] = 0;
+	for (unsigned k = 0; k <= 249; k++) {
+		r[k+H] = u[k] ^ r[k-3+H] ^ r[k-4+H];
+		C[2*k] = u[k];
+		C[2*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+	}
+	// termination
+	for (unsigned k = 250; k <= 253; k++) {
+		r[k+H] = 0;
+		C[2*k] = r[k-3+H] ^ r[k-4+H];
+		C[2*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+	}
+}
+
+
+
+//void BitVector::encode(const ViterbiTCH_AFS10_2& coder, BitVector& target)
+void ViterbiTCH_AFS10_2::encode(const BitVector& in, BitVector& target) const
+{
+	assert(in.size() == 210);
+	assert(target.size() == 642);
+	const char *u = in.begin();
+	char *C = target.begin();
+	const unsigned H = 4;
+	BitVector r(214+H);
+	for (int k = -H; k <= -1; k++) r[k+H] = 0;
+	for (unsigned k = 0; k <= 209; k++) {
+		r[k+H] = u[k] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
+		C[3*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+		C[3*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
+		C[3*k+2] = u[k];
+	}
+	// termination
+	for (unsigned k = 210; k <= 213; k++) {
+		r[k+H] = 0;
+		C[3*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+		C[3*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
+		C[3*k+2] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
+	}
+}
+
+
+
+//void BitVector::encode(const ViterbiTCH_AFS7_95& coder, BitVector& target)
+void ViterbiTCH_AFS7_95::encode(const BitVector& in, BitVector& target) const
+{
+	assert(in.size() == 165);
+	assert(target.size() == 513);
+	const char *u = in.begin();
+	char *C = target.begin();
+	const unsigned H = 6;
+	BitVector r(171+H);
+	for (int k = -H; k <= -1; k++) r[k+H] = 0;
+	for (unsigned k = 0; k <= 164; k++) {
+		r[k+H] = u[k] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
+		C[3*k] = u[k];
+		C[3*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-4+H] ^ r[k-6+H];
+		C[3*k+2] = r[k+H] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
+	}
+	// termination
+	for (unsigned k = 165; k <= 170; k++) {
+		r[k+H] = 0;
+		C[3*k] = r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
+		C[3*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-4+H] ^ r[k-6+H];
+		C[3*k+2] = r[k+H] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
+	}
+}
+
+
+
+void ViterbiTCH_AFS7_4::encode(const BitVector& in, BitVector& target) const
+{
+	assert(in.size() == 154);
+	assert(target.size() == 474);
+	const char *u = in.begin();
+	char *C = target.begin();
+	const unsigned H = 4;
+	BitVector r(158+H);
+	for (int k = -H; k <= -1; k++) r[k+H] = 0;
+	for (unsigned k = 0; k <= 153; k++) {
+		r[k+H] = u[k] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
+		C[3*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+		C[3*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
+		C[3*k+2] = u[k];
+	}
+	// termination
+	for (unsigned k = 154; k <= 157; k++) {
+		r[k+H] = 0;
+		C[3*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+		C[3*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
+		C[3*k+2] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
+	}
+}
+
+
+
+void ViterbiTCH_AFS6_7::encode(const BitVector& in, BitVector& target) const
+{
+	assert(in.size() == 140);
+	assert(target.size() == 576);
+	const char *u = in.begin();
+	char *C = target.begin();
+	const unsigned H = 4;
+	BitVector r(144+H);
+	for (int k = -H; k <= -1; k++) r[k+H] = 0;
+	for (unsigned k = 0; k <= 139; k++) {
+		r[k+H] = u[k] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
+		C[4*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+		C[4*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
+		C[4*k+2] = u[k];
+		C[4*k+3] = u[k];
+	}
+	// termination
+	for (unsigned k = 140; k <= 143; k++) {
+		r[k+H] = 0;
+		C[4*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+		C[4*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
+		C[4*k+2] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
+		C[4*k+3] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
+	}
+}
+
+
+
+void ViterbiTCH_AFS5_9::encode(const BitVector& in, BitVector& target) const
+{
+	assert(in.size() == 124);
+	assert(target.size() == 520);
+	const char *u = in.begin();
+	char *C = target.begin();
+	const unsigned H = 6;
+	BitVector r(130+H);
+	for (int k = -H; k <= -1; k++) r[k+H] = 0;
+	for (unsigned k = 0; k <= 123; k++) {
+		r[k+H] = u[k] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
+		C[4*k] = r[k+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
+		C[4*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-4+H] ^ r[k-6+H];
+		C[4*k+2] = u[k];
+		C[4*k+3] = u[k];
+	}
+	// termination
+	for (unsigned k = 124; k <= 129; k++) {
+		r[k+H] = 0;
+		C[4*k] = r[k+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
+		C[4*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-4+H] ^ r[k-6+H];
+		C[4*k+2] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
+		C[4*k+3] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
+	}
+}
+
+
+
+void ViterbiTCH_AFS5_15::encode(const BitVector& in, BitVector& target) const
+{
+	assert(in.size() == 109);
+	assert(target.size() == 565);
+	const char *u = in.begin();
+	char *C = target.begin();
+	const unsigned H = 4;
+	BitVector r(113+H);
+	for (int k = -H; k <= -1; k++) r[k+H] = 0;
+	for (unsigned k = 0; k <= 108; k++) {
+		r[k+H] = u[k] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
+		C[5*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+		C[5*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+		C[5*k+2] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
+		C[5*k+3] = u[k];
+		C[5*k+4] = u[k];
+	}
+	// termination
+	for (unsigned k = 109; k <= 112; k++) {
+		r[k+H] = 0;
+		C[5*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+		C[5*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
+		C[5*k+2] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
+		C[5*k+3] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
+		C[5*k+4] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
+	}
+}
+
+
+
+void ViterbiTCH_AFS4_75::encode(const BitVector& in, BitVector& target) const
+{
+	assert(in.size() == 101);
+	assert(target.size() == 535);
+	const char *u = in.begin();
+	char *C = target.begin();
+	const unsigned H = 6;
+	BitVector r(107+H);
+	for (int k = -H; k <= -1; k++) r[k+H] = 0;
+	for (unsigned k = 0; k <= 100; k++) {
+		r[k+H] = u[k] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
+		C[5*k] = r[k+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
+		C[5*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
+		C[5*k+2] = r[k+H] ^ r[k-1+H] ^ r[k-4+H] ^ r[k-6+H];
+		C[5*k+3] = u[k];
+		C[5*k+4] = u[k];
+	}
+	// termination
+	for (unsigned k = 101; k <= 106; k++) {
+		r[k+H] = 0;
+		C[5*k] = r[k+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
+		C[5*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
+		C[5*k+2] = r[k+H] ^ r[k-1+H] ^ r[k-4+H] ^ r[k-6+H];
+		C[5*k+3] = r[k+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
+		C[5*k+4] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
+	}
+}
+
+
+void ViterbiTCH_AFS12_2::initializeStates()
+{
+	for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
+	for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
+}
+
+
+
+void ViterbiTCH_AFS12_2::computeStateTables(unsigned g)
+{
+	assert(g<mIRate);
+	for (unsigned state=0; state<mIStates; state++) {
+		for (unsigned in = 0; in <= 1; in++) {
+			uint32_t inputVal = (state<<1) | in;
+			mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
+		}
+	}
+}
+
+void ViterbiTCH_AFS12_2::computeGeneratorTable()
+{
+	for (unsigned index=0; index<mIStates*2; index++) {
+		uint32_t t = 0;
+		for (unsigned i = 0; i < mIRate; i++) {
+			t = (t << 1) | mStateTable[i][index];
+		}
+		mGeneratorTable[index] = t;
+	}
+}
+
+
+
+
+
+
+void ViterbiTCH_AFS12_2::branchCandidates()
+{
+	// Branch to generate new input states.
+	const vCand *sp = mSurvivors;
+	for (unsigned cand=0; cand<mNumCands; cand+=2) {
+		uint32_t oStateShifted = (sp->oState) << mIRate;
+		for (unsigned in = 0; in <= 1; in++) {
+			mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
+			mCandidates[cand+in].cost = sp->cost;
+			uint32_t outputs = oStateShifted;
+			for (unsigned out = 0; out < mIRate; out++) {
+				char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
+				char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
+				mCandidates[cand+in].rState[out] = rState;
+				outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
+			}
+			mCandidates[cand+in].oState = outputs;
+		}
+		sp++;
+	}
+}
+
+
+void ViterbiTCH_AFS12_2::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
+{
+	const float *cTab[2] = {matchCost,mismatchCost};
+	for (unsigned i=0; i<mNumCands; i++) {
+		vCand& thisCand = mCandidates[i];
+		const unsigned mismatched = inSample ^ (thisCand.oState);
+		for (unsigned i = 0; i < mIRate; i++) {
+			thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
+		}
+	}
+}
+
+
+void ViterbiTCH_AFS12_2::pruneCandidates()
+{
+	const vCand* c1 = mCandidates;					// 0-prefix
+	const vCand* c2 = mCandidates + mIStates;		// 1-prefix
+	for (unsigned i=0; i<mIStates; i++) {
+		if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
+		else mSurvivors[i] = c2[i];
+	}
+}
+
+
+const ViterbiTCH_AFS12_2::vCand& ViterbiTCH_AFS12_2::minCost() const
+{
+	int minIndex = 0;
+	float minCost = mSurvivors[0].cost;
+	for (unsigned i=1; i<mIStates; i++) {
+		const float thisCost = mSurvivors[i].cost;
+		if (thisCost>=minCost) continue;
+		minCost = thisCost;
+		minIndex=i;
+	}
+	return mSurvivors[minIndex];
+}
+
+
+const ViterbiTCH_AFS12_2::vCand& ViterbiTCH_AFS12_2::step(uint32_t inSample, const float *probs, const float *iprobs)
+{
+	branchCandidates();
+	getSoftCostMetrics(inSample,probs,iprobs);
+	pruneCandidates();
+	return minCost();
+}
+
+
+
+void ViterbiTCH_AFS12_2::decode(const SoftVector &in, BitVector& target)
+{
+	ViterbiTCH_AFS12_2 &decoder = *this;
+	const size_t sz = in.size() - 8;
+	const unsigned deferral = decoder.deferral();
+	const size_t ctsz = sz + deferral*decoder.iRate();
+	assert(sz == decoder.iRate()*target.size());
+
+	// Build a "history" array where each element contains the full history.
+	uint32_t history[ctsz];
+	{
+		BitVector bits = in.sliced();
+		uint32_t accum = 0;
+		for (size_t i=0; i<sz; i++) {
+			accum = (accum<<1) | bits.bit(i);
+			history[i] = accum;
+		}
+		// Repeat last bit at the end.
+		for (size_t i=sz; i<ctsz; i++) {
+			accum = (accum<<1) | (accum & 0x01);
+			history[i] = accum;
+		}
+	}
+
+	// Precompute metric tables.
+	float matchCostTable[ctsz];
+	float mismatchCostTable[ctsz];
+	{
+		const float *dp = in.begin();
+		for (size_t i=0; i<sz; i++) {
+			// pVal is the probability that a bit is correct.
+			// ipVal is the probability that a bit is incorrect.
+			float pVal = dp[i];
+			if (pVal>0.5F) pVal = 1.0F-pVal;
+			float ipVal = 1.0F-pVal;
+			// This is a cheap approximation to an ideal cost function.
+			if (pVal<0.01F) pVal = 0.01;
+			if (ipVal<0.01F) ipVal = 0.01;
+			matchCostTable[i] = 0.25F/ipVal;
+			mismatchCostTable[i] = 0.25F/pVal;
+		}
+	
+		// pad end of table with unknowns
+		for (size_t i=sz; i<ctsz; i++) {
+			matchCostTable[i] = 0.5F;
+			mismatchCostTable[i] = 0.5F;
+		}
+	}
+
+	{
+		decoder.initializeStates();
+		// Each sample of history[] carries its history.
+		// So we only have to process every iRate-th sample.
+		const unsigned step = decoder.iRate();
+		// input pointer
+		const uint32_t *ip = history + step - 1;
+		// output pointers
+		char *op = target.begin();
+		const char *const opt = target.end();
+		// table pointers
+		const float* match = matchCostTable;
+		const float* mismatch = mismatchCostTable;
+		size_t oCount = 0;
+		while (op<opt) {
+			// Viterbi algorithm
+			assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
+			assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
+			const ViterbiTCH_AFS12_2::vCand &minCost = decoder.step(*ip, match, mismatch);
+			ip += step;
+			match += step;
+			mismatch += step;
+			// output
+			if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
+			oCount++;
+		}
+	}
+}
+
+
+
+ViterbiTCH_AFS10_2::ViterbiTCH_AFS10_2()
+{
+	assert(mDeferral < 32);
+	mCoeffs[0] = 0x01b;
+	mCoeffsFB[0] = 0x01f;
+	mCoeffs[1] = 0x015;
+	mCoeffsFB[1] = 0x01f;
+	mCoeffs[2] = 0x01f;
+	mCoeffsFB[2] = 0x01f;
+	for (unsigned i = 0; i < mIRate; i++) {
+		computeStateTables(i);
+	}
+	computeGeneratorTable();
+}
+
+
+
+
+void ViterbiTCH_AFS10_2::initializeStates()
+{
+	for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
+	for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
+}
+
+
+
+void ViterbiTCH_AFS10_2::computeStateTables(unsigned g)
+{
+	assert(g<mIRate);
+	for (unsigned state=0; state<mIStates; state++) {
+		for (unsigned in = 0; in <= 1; in++) {
+			uint32_t inputVal = (state<<1) | in;
+			mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
+		}
+	}
+}
+
+void ViterbiTCH_AFS10_2::computeGeneratorTable()
+{
+	for (unsigned index=0; index<mIStates*2; index++) {
+		uint32_t t = 0;
+		for (unsigned i = 0; i < mIRate; i++) {
+			t = (t << 1) | mStateTable[i][index];
+		}
+		mGeneratorTable[index] = t;
+	}
+}
+
+
+
+
+
+
+void ViterbiTCH_AFS10_2::branchCandidates()
+{
+	// Branch to generate new input states.
+	const vCand *sp = mSurvivors;
+	for (unsigned cand=0; cand<mNumCands; cand+=2) {
+		uint32_t oStateShifted = (sp->oState) << mIRate;
+		for (unsigned in = 0; in <= 1; in++) {
+			mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
+			mCandidates[cand+in].cost = sp->cost;
+			uint32_t outputs = oStateShifted;
+			for (unsigned out = 0; out < mIRate; out++) {
+				char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
+				char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
+				mCandidates[cand+in].rState[out] = rState;
+				outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
+			}
+			mCandidates[cand+in].oState = outputs;
+		}
+		sp++;
+	}
+}
+
+
+void ViterbiTCH_AFS10_2::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
+{
+	const float *cTab[2] = {matchCost,mismatchCost};
+	for (unsigned i=0; i<mNumCands; i++) {
+		vCand& thisCand = mCandidates[i];
+		const unsigned mismatched = inSample ^ (thisCand.oState);
+		for (unsigned i = 0; i < mIRate; i++) {
+			thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
+		}
+	}
+}
+
+
+void ViterbiTCH_AFS10_2::pruneCandidates()
+{
+	const vCand* c1 = mCandidates;					// 0-prefix
+	const vCand* c2 = mCandidates + mIStates;		// 1-prefix
+	for (unsigned i=0; i<mIStates; i++) {
+		if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
+		else mSurvivors[i] = c2[i];
+	}
+}
+
+
+const ViterbiTCH_AFS10_2::vCand& ViterbiTCH_AFS10_2::minCost() const
+{
+	int minIndex = 0;
+	float minCost = mSurvivors[0].cost;
+	for (unsigned i=1; i<mIStates; i++) {
+		const float thisCost = mSurvivors[i].cost;
+		if (thisCost>=minCost) continue;
+		minCost = thisCost;
+		minIndex=i;
+	}
+	return mSurvivors[minIndex];
+}
+
+
+const ViterbiTCH_AFS10_2::vCand& ViterbiTCH_AFS10_2::step(uint32_t inSample, const float *probs, const float *iprobs)
+{
+	branchCandidates();
+	getSoftCostMetrics(inSample,probs,iprobs);
+	pruneCandidates();
+	return minCost();
+}
+
+
+
+void ViterbiTCH_AFS10_2::decode(const SoftVector &in, BitVector& target)
+{
+	ViterbiTCH_AFS10_2 &decoder = *this;
+	const size_t sz = in.size() - 12;
+	const unsigned deferral = decoder.deferral();
+	const size_t ctsz = sz + deferral*decoder.iRate();
+	assert(sz == decoder.iRate()*target.size());
+
+	// Build a "history" array where each element contains the full history.
+	uint32_t history[ctsz];
+	{
+		BitVector bits = in.sliced();
+		uint32_t accum = 0;
+		for (size_t i=0; i<sz; i++) {
+			accum = (accum<<1) | bits.bit(i);
+			history[i] = accum;
+		}
+		// Repeat last bit at the end.
+		for (size_t i=sz; i<ctsz; i++) {
+			accum = (accum<<1) | (accum & 0x01);
+			history[i] = accum;
+		}
+	}
+
+	// Precompute metric tables.
+	float matchCostTable[ctsz];
+	float mismatchCostTable[ctsz];
+	{
+		const float *dp = in.begin();
+		for (size_t i=0; i<sz; i++) {
+			// pVal is the probability that a bit is correct.
+			// ipVal is the probability that a bit is incorrect.
+			float pVal = dp[i];
+			if (pVal>0.5F) pVal = 1.0F-pVal;
+			float ipVal = 1.0F-pVal;
+			// This is a cheap approximation to an ideal cost function.
+			if (pVal<0.01F) pVal = 0.01;
+			if (ipVal<0.01F) ipVal = 0.01;
+			matchCostTable[i] = 0.25F/ipVal;
+			mismatchCostTable[i] = 0.25F/pVal;
+		}
+	
+		// pad end of table with unknowns
+		for (size_t i=sz; i<ctsz; i++) {
+			matchCostTable[i] = 0.5F;
+			mismatchCostTable[i] = 0.5F;
+		}
+	}
+
+	{
+		decoder.initializeStates();
+		// Each sample of history[] carries its history.
+		// So we only have to process every iRate-th sample.
+		const unsigned step = decoder.iRate();
+		// input pointer
+		const uint32_t *ip = history + step - 1;
+		// output pointers
+		char *op = target.begin();
+		const char *const opt = target.end();
+		// table pointers
+		const float* match = matchCostTable;
+		const float* mismatch = mismatchCostTable;
+		size_t oCount = 0;
+		while (op<opt) {
+			// Viterbi algorithm
+			assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
+			assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
+			const ViterbiTCH_AFS10_2::vCand &minCost = decoder.step(*ip, match, mismatch);
+			ip += step;
+			match += step;
+			mismatch += step;
+			// output
+			if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
+			oCount++;
+		}
+	}
+}
+
+
+
+ViterbiTCH_AFS7_95::ViterbiTCH_AFS7_95()
+{
+	assert(mDeferral < 32);
+	mCoeffs[0] = 0x06d;
+	mCoeffsFB[0] = 0x06d;
+	mCoeffs[1] = 0x053;
+	mCoeffsFB[1] = 0x06d;
+	mCoeffs[2] = 0x05f;
+	mCoeffsFB[2] = 0x06d;
+	for (unsigned i = 0; i < mIRate; i++) {
+		computeStateTables(i);
+	}
+	computeGeneratorTable();
+}
+
+
+
+
+void ViterbiTCH_AFS7_95::initializeStates()
+{
+	for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
+	for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
+}
+
+
+
+void ViterbiTCH_AFS7_95::computeStateTables(unsigned g)
+{
+	assert(g<mIRate);
+	for (unsigned state=0; state<mIStates; state++) {
+		for (unsigned in = 0; in <= 1; in++) {
+			uint32_t inputVal = (state<<1) | in;
+			mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
+		}
+	}
+}
+
+void ViterbiTCH_AFS7_95::computeGeneratorTable()
+{
+	for (unsigned index=0; index<mIStates*2; index++) {
+		uint32_t t = 0;
+		for (unsigned i = 0; i < mIRate; i++) {
+			t = (t << 1) | mStateTable[i][index];
+		}
+		mGeneratorTable[index] = t;
+	}
+}
+
+
+
+
+
+
+void ViterbiTCH_AFS7_95::branchCandidates()
+{
+	// Branch to generate new input states.
+	const vCand *sp = mSurvivors;
+	for (unsigned cand=0; cand<mNumCands; cand+=2) {
+		uint32_t oStateShifted = (sp->oState) << mIRate;
+		for (unsigned in = 0; in <= 1; in++) {
+			mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
+			mCandidates[cand+in].cost = sp->cost;
+			uint32_t outputs = oStateShifted;
+			for (unsigned out = 0; out < mIRate; out++) {
+				char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
+				char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
+				mCandidates[cand+in].rState[out] = rState;
+				outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
+			}
+			mCandidates[cand+in].oState = outputs;
+		}
+		sp++;
+	}
+}
+
+
+void ViterbiTCH_AFS7_95::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
+{
+	const float *cTab[2] = {matchCost,mismatchCost};
+	for (unsigned i=0; i<mNumCands; i++) {
+		vCand& thisCand = mCandidates[i];
+		const unsigned mismatched = inSample ^ (thisCand.oState);
+		for (unsigned i = 0; i < mIRate; i++) {
+			thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
+		}
+	}
+}
+
+
+void ViterbiTCH_AFS7_95::pruneCandidates()
+{
+	const vCand* c1 = mCandidates;					// 0-prefix
+	const vCand* c2 = mCandidates + mIStates;		// 1-prefix
+	for (unsigned i=0; i<mIStates; i++) {
+		if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
+		else mSurvivors[i] = c2[i];
+	}
+}
+
+
+const ViterbiTCH_AFS7_95::vCand& ViterbiTCH_AFS7_95::minCost() const
+{
+	int minIndex = 0;
+	float minCost = mSurvivors[0].cost;
+	for (unsigned i=1; i<mIStates; i++) {
+		const float thisCost = mSurvivors[i].cost;
+		if (thisCost>=minCost) continue;
+		minCost = thisCost;
+		minIndex=i;
+	}
+	return mSurvivors[minIndex];
+}
+
+
+const ViterbiTCH_AFS7_95::vCand& ViterbiTCH_AFS7_95::step(uint32_t inSample, const float *probs, const float *iprobs)
+{
+	branchCandidates();
+	getSoftCostMetrics(inSample,probs,iprobs);
+	pruneCandidates();
+	return minCost();
+}
+
+
+
+void ViterbiTCH_AFS7_95::decode(const SoftVector &in, BitVector& target)
+{
+	ViterbiTCH_AFS7_95 &decoder = *this;
+	const size_t sz = in.size() - 18;
+	const unsigned deferral = decoder.deferral();
+	const size_t ctsz = sz + deferral*decoder.iRate();
+	assert(sz == decoder.iRate()*target.size());
+
+	// Build a "history" array where each element contains the full history.
+	uint32_t history[ctsz];
+	{
+		BitVector bits = in.sliced();
+		uint32_t accum = 0;
+		for (size_t i=0; i<sz; i++) {
+			accum = (accum<<1) | bits.bit(i);
+			history[i] = accum;
+		}
+		// Repeat last bit at the end.
+		for (size_t i=sz; i<ctsz; i++) {
+			accum = (accum<<1) | (accum & 0x01);
+			history[i] = accum;
+		}
+	}
+
+	// Precompute metric tables.
+	float matchCostTable[ctsz];
+	float mismatchCostTable[ctsz];
+	{
+		const float *dp = in.begin();
+		for (size_t i=0; i<sz; i++) {
+			// pVal is the probability that a bit is correct.
+			// ipVal is the probability that a bit is incorrect.
+			float pVal = dp[i];
+			if (pVal>0.5F) pVal = 1.0F-pVal;
+			float ipVal = 1.0F-pVal;
+			// This is a cheap approximation to an ideal cost function.
+			if (pVal<0.01F) pVal = 0.01;
+			if (ipVal<0.01F) ipVal = 0.01;
+			matchCostTable[i] = 0.25F/ipVal;
+			mismatchCostTable[i] = 0.25F/pVal;
+		}
+	
+		// pad end of table with unknowns
+		for (size_t i=sz; i<ctsz; i++) {
+			matchCostTable[i] = 0.5F;
+			mismatchCostTable[i] = 0.5F;
+		}
+	}
+
+	{
+		decoder.initializeStates();
+		// Each sample of history[] carries its history.
+		// So we only have to process every iRate-th sample.
+		const unsigned step = decoder.iRate();
+		// input pointer
+		const uint32_t *ip = history + step - 1;
+		// output pointers
+		char *op = target.begin();
+		const char *const opt = target.end();
+		// table pointers
+		const float* match = matchCostTable;
+		const float* mismatch = mismatchCostTable;
+		size_t oCount = 0;
+		while (op<opt) {
+			// Viterbi algorithm
+			assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
+			assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
+			const ViterbiTCH_AFS7_95::vCand &minCost = decoder.step(*ip, match, mismatch);
+			ip += step;
+			match += step;
+			mismatch += step;
+			// output
+			if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
+			oCount++;
+		}
+	}
+}
+
+
+
+ViterbiTCH_AFS7_4::ViterbiTCH_AFS7_4()
+{
+	assert(mDeferral < 32);
+	mCoeffs[0] = 0x01b;
+	mCoeffsFB[0] = 0x01f;
+	mCoeffs[1] = 0x015;
+	mCoeffsFB[1] = 0x01f;
+	mCoeffs[2] = 0x01f;
+	mCoeffsFB[2] = 0x01f;
+	for (unsigned i = 0; i < mIRate; i++) {
+		computeStateTables(i);
+	}
+	computeGeneratorTable();
+}
+
+
+
+
+void ViterbiTCH_AFS7_4::initializeStates()
+{
+	for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
+	for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
+}
+
+
+
+void ViterbiTCH_AFS7_4::computeStateTables(unsigned g)
+{
+	assert(g<mIRate);
+	for (unsigned state=0; state<mIStates; state++) {
+		for (unsigned in = 0; in <= 1; in++) {
+			uint32_t inputVal = (state<<1) | in;
+			mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
+		}
+	}
+}
+
+void ViterbiTCH_AFS7_4::computeGeneratorTable()
+{
+	for (unsigned index=0; index<mIStates*2; index++) {
+		uint32_t t = 0;
+		for (unsigned i = 0; i < mIRate; i++) {
+			t = (t << 1) | mStateTable[i][index];
+		}
+		mGeneratorTable[index] = t;
+	}
+}
+
+
+
+
+
+
+void ViterbiTCH_AFS7_4::branchCandidates()
+{
+	// Branch to generate new input states.
+	const vCand *sp = mSurvivors;
+	for (unsigned cand=0; cand<mNumCands; cand+=2) {
+		uint32_t oStateShifted = (sp->oState) << mIRate;
+		for (unsigned in = 0; in <= 1; in++) {
+			mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
+			mCandidates[cand+in].cost = sp->cost;
+			uint32_t outputs = oStateShifted;
+			for (unsigned out = 0; out < mIRate; out++) {
+				char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
+				char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
+				mCandidates[cand+in].rState[out] = rState;
+				outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
+			}
+			mCandidates[cand+in].oState = outputs;
+		}
+		sp++;
+	}
+}
+
+
+void ViterbiTCH_AFS7_4::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
+{
+	const float *cTab[2] = {matchCost,mismatchCost};
+	for (unsigned i=0; i<mNumCands; i++) {
+		vCand& thisCand = mCandidates[i];
+		const unsigned mismatched = inSample ^ (thisCand.oState);
+		for (unsigned i = 0; i < mIRate; i++) {
+			thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
+		}
+	}
+}
+
+
+void ViterbiTCH_AFS7_4::pruneCandidates()
+{
+	const vCand* c1 = mCandidates;					// 0-prefix
+	const vCand* c2 = mCandidates + mIStates;		// 1-prefix
+	for (unsigned i=0; i<mIStates; i++) {
+		if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
+		else mSurvivors[i] = c2[i];
+	}
+}
+
+
+const ViterbiTCH_AFS7_4::vCand& ViterbiTCH_AFS7_4::minCost() const
+{
+	int minIndex = 0;
+	float minCost = mSurvivors[0].cost;
+	for (unsigned i=1; i<mIStates; i++) {
+		const float thisCost = mSurvivors[i].cost;
+		if (thisCost>=minCost) continue;
+		minCost = thisCost;
+		minIndex=i;
+	}
+	return mSurvivors[minIndex];
+}
+
+
+const ViterbiTCH_AFS7_4::vCand& ViterbiTCH_AFS7_4::step(uint32_t inSample, const float *probs, const float *iprobs)
+{
+	branchCandidates();
+	getSoftCostMetrics(inSample,probs,iprobs);
+	pruneCandidates();
+	return minCost();
+}
+
+
+
+void ViterbiTCH_AFS7_4::decode(const SoftVector &in, BitVector& target)
+{
+	ViterbiTCH_AFS7_4 &decoder = *this;
+	const size_t sz = in.size() - 12;
+	const unsigned deferral = decoder.deferral();
+	const size_t ctsz = sz + deferral*decoder.iRate();
+	assert(sz == decoder.iRate()*target.size());
+
+	// Build a "history" array where each element contains the full history.
+	uint32_t history[ctsz];
+	{
+		BitVector bits = in.sliced();
+		uint32_t accum = 0;
+		for (size_t i=0; i<sz; i++) {
+			accum = (accum<<1) | bits.bit(i);
+			history[i] = accum;
+		}
+		// Repeat last bit at the end.
+		for (size_t i=sz; i<ctsz; i++) {
+			accum = (accum<<1) | (accum & 0x01);
+			history[i] = accum;
+		}
+	}
+
+	// Precompute metric tables.
+	float matchCostTable[ctsz];
+	float mismatchCostTable[ctsz];
+	{
+		const float *dp = in.begin();
+		for (size_t i=0; i<sz; i++) {
+			// pVal is the probability that a bit is correct.
+			// ipVal is the probability that a bit is incorrect.
+			float pVal = dp[i];
+			if (pVal>0.5F) pVal = 1.0F-pVal;
+			float ipVal = 1.0F-pVal;
+			// This is a cheap approximation to an ideal cost function.
+			if (pVal<0.01F) pVal = 0.01;
+			if (ipVal<0.01F) ipVal = 0.01;
+			matchCostTable[i] = 0.25F/ipVal;
+			mismatchCostTable[i] = 0.25F/pVal;
+		}
+	
+		// pad end of table with unknowns
+		for (size_t i=sz; i<ctsz; i++) {
+			matchCostTable[i] = 0.5F;
+			mismatchCostTable[i] = 0.5F;
+		}
+	}
+
+	{
+		decoder.initializeStates();
+		// Each sample of history[] carries its history.
+		// So we only have to process every iRate-th sample.
+		const unsigned step = decoder.iRate();
+		// input pointer
+		const uint32_t *ip = history + step - 1;
+		// output pointers
+		char *op = target.begin();
+		const char *const opt = target.end();
+		// table pointers
+		const float* match = matchCostTable;
+		const float* mismatch = mismatchCostTable;
+		size_t oCount = 0;
+		while (op<opt) {
+			// Viterbi algorithm
+			assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
+			assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
+			const ViterbiTCH_AFS7_4::vCand &minCost = decoder.step(*ip, match, mismatch);
+			ip += step;
+			match += step;
+			mismatch += step;
+			// output
+			if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
+			oCount++;
+		}
+	}
+}
+
+
+
+ViterbiTCH_AFS6_7::ViterbiTCH_AFS6_7()
+{
+	assert(mDeferral < 32);
+	mCoeffs[0] = 0x01b;
+	mCoeffsFB[0] = 0x01f;
+	mCoeffs[1] = 0x015;
+	mCoeffsFB[1] = 0x01f;
+	mCoeffs[2] = 0x01f;
+	mCoeffsFB[2] = 0x01f;
+	mCoeffs[3] = 0x01f;
+	mCoeffsFB[3] = 0x01f;
+	for (unsigned i = 0; i < mIRate; i++) {
+		computeStateTables(i);
+	}
+	computeGeneratorTable();
+}
+
+
+
+
+void ViterbiTCH_AFS6_7::initializeStates()
+{
+	for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
+	for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
+}
+
+
+
+void ViterbiTCH_AFS6_7::computeStateTables(unsigned g)
+{
+	assert(g<mIRate);
+	for (unsigned state=0; state<mIStates; state++) {
+		for (unsigned in = 0; in <= 1; in++) {
+			uint32_t inputVal = (state<<1) | in;
+			mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
+		}
+	}
+}
+
+void ViterbiTCH_AFS6_7::computeGeneratorTable()
+{
+	for (unsigned index=0; index<mIStates*2; index++) {
+		uint32_t t = 0;
+		for (unsigned i = 0; i < mIRate; i++) {
+			t = (t << 1) | mStateTable[i][index];
+		}
+		mGeneratorTable[index] = t;
+	}
+}
+
+
+
+
+
+
+void ViterbiTCH_AFS6_7::branchCandidates()
+{
+	// Branch to generate new input states.
+	const vCand *sp = mSurvivors;
+	for (unsigned cand=0; cand<mNumCands; cand+=2) {
+		uint32_t oStateShifted = (sp->oState) << mIRate;
+		for (unsigned in = 0; in <= 1; in++) {
+			mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
+			mCandidates[cand+in].cost = sp->cost;
+			uint32_t outputs = oStateShifted;
+			for (unsigned out = 0; out < mIRate; out++) {
+				char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
+				char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
+				mCandidates[cand+in].rState[out] = rState;
+				outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
+			}
+			mCandidates[cand+in].oState = outputs;
+		}
+		sp++;
+	}
+}
+
+
+void ViterbiTCH_AFS6_7::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
+{
+	const float *cTab[2] = {matchCost,mismatchCost};
+	for (unsigned i=0; i<mNumCands; i++) {
+		vCand& thisCand = mCandidates[i];
+		const unsigned mismatched = inSample ^ (thisCand.oState);
+		for (unsigned i = 0; i < mIRate; i++) {
+			thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
+		}
+	}
+}
+
+
+void ViterbiTCH_AFS6_7::pruneCandidates()
+{
+	const vCand* c1 = mCandidates;					// 0-prefix
+	const vCand* c2 = mCandidates + mIStates;		// 1-prefix
+	for (unsigned i=0; i<mIStates; i++) {
+		if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
+		else mSurvivors[i] = c2[i];
+	}
+}
+
+
+const ViterbiTCH_AFS6_7::vCand& ViterbiTCH_AFS6_7::minCost() const
+{
+	int minIndex = 0;
+	float minCost = mSurvivors[0].cost;
+	for (unsigned i=1; i<mIStates; i++) {
+		const float thisCost = mSurvivors[i].cost;
+		if (thisCost>=minCost) continue;
+		minCost = thisCost;
+		minIndex=i;
+	}
+	return mSurvivors[minIndex];
+}
+
+
+const ViterbiTCH_AFS6_7::vCand& ViterbiTCH_AFS6_7::step(uint32_t inSample, const float *probs, const float *iprobs)
+{
+	branchCandidates();
+	getSoftCostMetrics(inSample,probs,iprobs);
+	pruneCandidates();
+	return minCost();
+}
+
+
+
+void ViterbiTCH_AFS6_7::decode(const SoftVector &in, BitVector& target)
+{
+	ViterbiTCH_AFS6_7 &decoder = *this;
+	const size_t sz = in.size() - 16;
+	const unsigned deferral = decoder.deferral();
+	const size_t ctsz = sz + deferral*decoder.iRate();
+	assert(sz == decoder.iRate()*target.size());
+
+	// Build a "history" array where each element contains the full history.
+	uint32_t history[ctsz];
+	{
+		BitVector bits = in.sliced();
+		uint32_t accum = 0;
+		for (size_t i=0; i<sz; i++) {
+			accum = (accum<<1) | bits.bit(i);
+			history[i] = accum;
+		}
+		// Repeat last bit at the end.
+		for (size_t i=sz; i<ctsz; i++) {
+			accum = (accum<<1) | (accum & 0x01);
+			history[i] = accum;
+		}
+	}
+
+	// Precompute metric tables.
+	float matchCostTable[ctsz];
+	float mismatchCostTable[ctsz];
+	{
+		const float *dp = in.begin();
+		for (size_t i=0; i<sz; i++) {
+			// pVal is the probability that a bit is correct.
+			// ipVal is the probability that a bit is incorrect.
+			float pVal = dp[i];
+			if (pVal>0.5F) pVal = 1.0F-pVal;
+			float ipVal = 1.0F-pVal;
+			// This is a cheap approximation to an ideal cost function.
+			if (pVal<0.01F) pVal = 0.01;
+			if (ipVal<0.01F) ipVal = 0.01;
+			matchCostTable[i] = 0.25F/ipVal;
+			mismatchCostTable[i] = 0.25F/pVal;
+		}
+	
+		// pad end of table with unknowns
+		for (size_t i=sz; i<ctsz; i++) {
+			matchCostTable[i] = 0.5F;
+			mismatchCostTable[i] = 0.5F;
+		}
+	}
+
+	{
+		decoder.initializeStates();
+		// Each sample of history[] carries its history.
+		// So we only have to process every iRate-th sample.
+		const unsigned step = decoder.iRate();
+		// input pointer
+		const uint32_t *ip = history + step - 1;
+		// output pointers
+		char *op = target.begin();
+		const char *const opt = target.end();
+		// table pointers
+		const float* match = matchCostTable;
+		const float* mismatch = mismatchCostTable;
+		size_t oCount = 0;
+		while (op<opt) {
+			// Viterbi algorithm
+			assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
+			assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
+			const ViterbiTCH_AFS6_7::vCand &minCost = decoder.step(*ip, match, mismatch);
+			ip += step;
+			match += step;
+			mismatch += step;
+			// output
+			if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
+			oCount++;
+		}
+	}
+}
+
+
+
+ViterbiTCH_AFS5_9::ViterbiTCH_AFS5_9()
+{
+	assert(mDeferral < 32);
+	mCoeffs[0] = 0x06d;
+	mCoeffsFB[0] = 0x05f;
+	mCoeffs[1] = 0x053;
+	mCoeffsFB[1] = 0x05f;
+	mCoeffs[2] = 0x05f;
+	mCoeffsFB[2] = 0x05f;
+	mCoeffs[3] = 0x05f;
+	mCoeffsFB[3] = 0x05f;
+	for (unsigned i = 0; i < mIRate; i++) {
+		computeStateTables(i);
+	}
+	computeGeneratorTable();
+}
+
+
+
+
+void ViterbiTCH_AFS5_9::initializeStates()
+{
+	for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
+	for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
+}
+
+
+
+void ViterbiTCH_AFS5_9::computeStateTables(unsigned g)
+{
+	assert(g<mIRate);
+	for (unsigned state=0; state<mIStates; state++) {
+		for (unsigned in = 0; in <= 1; in++) {
+			uint32_t inputVal = (state<<1) | in;
+			mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
+		}
+	}
+}
+
+void ViterbiTCH_AFS5_9::computeGeneratorTable()
+{
+	for (unsigned index=0; index<mIStates*2; index++) {
+		uint32_t t = 0;
+		for (unsigned i = 0; i < mIRate; i++) {
+			t = (t << 1) | mStateTable[i][index];
+		}
+		mGeneratorTable[index] = t;
+	}
+}
+
+
+
+
+
+
+void ViterbiTCH_AFS5_9::branchCandidates()
+{
+	// Branch to generate new input states.
+	const vCand *sp = mSurvivors;
+	for (unsigned cand=0; cand<mNumCands; cand+=2) {
+		uint32_t oStateShifted = (sp->oState) << mIRate;
+		for (unsigned in = 0; in <= 1; in++) {
+			mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
+			mCandidates[cand+in].cost = sp->cost;
+			uint32_t outputs = oStateShifted;
+			for (unsigned out = 0; out < mIRate; out++) {
+				char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
+				char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
+				mCandidates[cand+in].rState[out] = rState;
+				outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
+			}
+			mCandidates[cand+in].oState = outputs;
+		}
+		sp++;
+	}
+}
+
+
+void ViterbiTCH_AFS5_9::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
+{
+	const float *cTab[2] = {matchCost,mismatchCost};
+	for (unsigned i=0; i<mNumCands; i++) {
+		vCand& thisCand = mCandidates[i];
+		const unsigned mismatched = inSample ^ (thisCand.oState);
+		for (unsigned i = 0; i < mIRate; i++) {
+			thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
+		}
+	}
+}
+
+
+void ViterbiTCH_AFS5_9::pruneCandidates()
+{
+	const vCand* c1 = mCandidates;					// 0-prefix
+	const vCand* c2 = mCandidates + mIStates;		// 1-prefix
+	for (unsigned i=0; i<mIStates; i++) {
+		if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
+		else mSurvivors[i] = c2[i];
+	}
+}
+
+
+const ViterbiTCH_AFS5_9::vCand& ViterbiTCH_AFS5_9::minCost() const
+{
+	int minIndex = 0;
+	float minCost = mSurvivors[0].cost;
+	for (unsigned i=1; i<mIStates; i++) {
+		const float thisCost = mSurvivors[i].cost;
+		if (thisCost>=minCost) continue;
+		minCost = thisCost;
+		minIndex=i;
+	}
+	return mSurvivors[minIndex];
+}
+
+
+const ViterbiTCH_AFS5_9::vCand& ViterbiTCH_AFS5_9::step(uint32_t inSample, const float *probs, const float *iprobs)
+{
+	branchCandidates();
+	getSoftCostMetrics(inSample,probs,iprobs);
+	pruneCandidates();
+	return minCost();
+}
+
+
+
+void ViterbiTCH_AFS5_9::decode(const SoftVector &in, BitVector& target)
+{
+	ViterbiTCH_AFS5_9 &decoder = *this;
+	const size_t sz = in.size() - 24;
+	const unsigned deferral = decoder.deferral();
+	const size_t ctsz = sz + deferral*decoder.iRate();
+	assert(sz == decoder.iRate()*target.size());
+
+	// Build a "history" array where each element contains the full history.
+	uint32_t history[ctsz];
+	{
+		BitVector bits = in.sliced();
+		uint32_t accum = 0;
+		for (size_t i=0; i<sz; i++) {
+			accum = (accum<<1) | bits.bit(i);
+			history[i] = accum;
+		}
+		// Repeat last bit at the end.
+		for (size_t i=sz; i<ctsz; i++) {
+			accum = (accum<<1) | (accum & 0x01);
+			history[i] = accum;
+		}
+	}
+
+	// Precompute metric tables.
+	float matchCostTable[ctsz];
+	float mismatchCostTable[ctsz];
+	{
+		const float *dp = in.begin();
+		for (size_t i=0; i<sz; i++) {
+			// pVal is the probability that a bit is correct.
+			// ipVal is the probability that a bit is incorrect.
+			float pVal = dp[i];
+			if (pVal>0.5F) pVal = 1.0F-pVal;
+			float ipVal = 1.0F-pVal;
+			// This is a cheap approximation to an ideal cost function.
+			if (pVal<0.01F) pVal = 0.01;
+			if (ipVal<0.01F) ipVal = 0.01;
+			matchCostTable[i] = 0.25F/ipVal;
+			mismatchCostTable[i] = 0.25F/pVal;
+		}
+	
+		// pad end of table with unknowns
+		for (size_t i=sz; i<ctsz; i++) {
+			matchCostTable[i] = 0.5F;
+			mismatchCostTable[i] = 0.5F;
+		}
+	}
+
+	{
+		decoder.initializeStates();
+		// Each sample of history[] carries its history.
+		// So we only have to process every iRate-th sample.
+		const unsigned step = decoder.iRate();
+		// input pointer
+		const uint32_t *ip = history + step - 1;
+		// output pointers
+		char *op = target.begin();
+		const char *const opt = target.end();
+		// table pointers
+		const float* match = matchCostTable;
+		const float* mismatch = mismatchCostTable;
+		size_t oCount = 0;
+		while (op<opt) {
+			// Viterbi algorithm
+			assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
+			assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
+			const ViterbiTCH_AFS5_9::vCand &minCost = decoder.step(*ip, match, mismatch);
+			ip += step;
+			match += step;
+			mismatch += step;
+			// output
+			if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
+			oCount++;
+		}
+	}
+}
+
+
+
+ViterbiTCH_AFS5_15::ViterbiTCH_AFS5_15()
+{
+	assert(mDeferral < 32);
+	mCoeffs[0] = 0x01b;
+	mCoeffsFB[0] = 0x01f;
+	mCoeffs[1] = 0x01b;
+	mCoeffsFB[1] = 0x01f;
+	mCoeffs[2] = 0x015;
+	mCoeffsFB[2] = 0x01f;
+	mCoeffs[3] = 0x01f;
+	mCoeffsFB[3] = 0x01f;
+	mCoeffs[4] = 0x01f;
+	mCoeffsFB[4] = 0x01f;
+	for (unsigned i = 0; i < mIRate; i++) {
+		computeStateTables(i);
+	}
+	computeGeneratorTable();
+}
+
+
+
+
+void ViterbiTCH_AFS5_15::initializeStates()
+{
+	for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
+	for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
+}
+
+
+
+void ViterbiTCH_AFS5_15::computeStateTables(unsigned g)
+{
+	assert(g<mIRate);
+	for (unsigned state=0; state<mIStates; state++) {
+		for (unsigned in = 0; in <= 1; in++) {
+			uint32_t inputVal = (state<<1) | in;
+			mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
+		}
+	}
+}
+
+void ViterbiTCH_AFS5_15::computeGeneratorTable()
+{
+	for (unsigned index=0; index<mIStates*2; index++) {
+		uint32_t t = 0;
+		for (unsigned i = 0; i < mIRate; i++) {
+			t = (t << 1) | mStateTable[i][index];
+		}
+		mGeneratorTable[index] = t;
+	}
+}
+
+
+
+
+
+
+void ViterbiTCH_AFS5_15::branchCandidates()
+{
+	// Branch to generate new input states.
+	const vCand *sp = mSurvivors;
+	for (unsigned cand=0; cand<mNumCands; cand+=2) {
+		uint32_t oStateShifted = (sp->oState) << mIRate;
+		for (unsigned in = 0; in <= 1; in++) {
+			mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
+			mCandidates[cand+in].cost = sp->cost;
+			uint32_t outputs = oStateShifted;
+			for (unsigned out = 0; out < mIRate; out++) {
+				char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
+				char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
+				mCandidates[cand+in].rState[out] = rState;
+				outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
+			}
+			mCandidates[cand+in].oState = outputs;
+		}
+		sp++;
+	}
+}
+
+
+void ViterbiTCH_AFS5_15::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
+{
+	const float *cTab[2] = {matchCost,mismatchCost};
+	for (unsigned i=0; i<mNumCands; i++) {
+		vCand& thisCand = mCandidates[i];
+		const unsigned mismatched = inSample ^ (thisCand.oState);
+		for (unsigned i = 0; i < mIRate; i++) {
+			thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
+		}
+	}
+}
+
+
+void ViterbiTCH_AFS5_15::pruneCandidates()
+{
+	const vCand* c1 = mCandidates;					// 0-prefix
+	const vCand* c2 = mCandidates + mIStates;		// 1-prefix
+	for (unsigned i=0; i<mIStates; i++) {
+		if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
+		else mSurvivors[i] = c2[i];
+	}
+}
+
+
+const ViterbiTCH_AFS5_15::vCand& ViterbiTCH_AFS5_15::minCost() const
+{
+	int minIndex = 0;
+	float minCost = mSurvivors[0].cost;
+	for (unsigned i=1; i<mIStates; i++) {
+		const float thisCost = mSurvivors[i].cost;
+		if (thisCost>=minCost) continue;
+		minCost = thisCost;
+		minIndex=i;
+	}
+	return mSurvivors[minIndex];
+}
+
+
+const ViterbiTCH_AFS5_15::vCand& ViterbiTCH_AFS5_15::step(uint32_t inSample, const float *probs, const float *iprobs)
+{
+	branchCandidates();
+	getSoftCostMetrics(inSample,probs,iprobs);
+	pruneCandidates();
+	return minCost();
+}
+
+
+
+void ViterbiTCH_AFS5_15::decode(const SoftVector &in, BitVector& target)
+{
+	ViterbiTCH_AFS5_15 &decoder = *this;
+	const size_t sz = in.size() - 20;
+	const unsigned deferral = decoder.deferral();
+	const size_t ctsz = sz + deferral*decoder.iRate();
+	assert(sz == decoder.iRate()*target.size());
+
+	// Build a "history" array where each element contains the full history.
+	uint32_t history[ctsz];
+	{
+		BitVector bits = in.sliced();
+		uint32_t accum = 0;
+		for (size_t i=0; i<sz; i++) {
+			accum = (accum<<1) | bits.bit(i);
+			history[i] = accum;
+		}
+		// Repeat last bit at the end.
+		for (size_t i=sz; i<ctsz; i++) {
+			accum = (accum<<1) | (accum & 0x01);
+			history[i] = accum;
+		}
+	}
+
+	// Precompute metric tables.
+	float matchCostTable[ctsz];
+	float mismatchCostTable[ctsz];
+	{
+		const float *dp = in.begin();
+		for (size_t i=0; i<sz; i++) {
+			// pVal is the probability that a bit is correct.
+			// ipVal is the probability that a bit is incorrect.
+			float pVal = dp[i];
+			if (pVal>0.5F) pVal = 1.0F-pVal;
+			float ipVal = 1.0F-pVal;
+			// This is a cheap approximation to an ideal cost function.
+			if (pVal<0.01F) pVal = 0.01;
+			if (ipVal<0.01F) ipVal = 0.01;
+			matchCostTable[i] = 0.25F/ipVal;
+			mismatchCostTable[i] = 0.25F/pVal;
+		}
+	
+		// pad end of table with unknowns
+		for (size_t i=sz; i<ctsz; i++) {
+			matchCostTable[i] = 0.5F;
+			mismatchCostTable[i] = 0.5F;
+		}
+	}
+
+	{
+		decoder.initializeStates();
+		// Each sample of history[] carries its history.
+		// So we only have to process every iRate-th sample.
+		const unsigned step = decoder.iRate();
+		// input pointer
+		const uint32_t *ip = history + step - 1;
+		// output pointers
+		char *op = target.begin();
+		const char *const opt = target.end();
+		// table pointers
+		const float* match = matchCostTable;
+		const float* mismatch = mismatchCostTable;
+		size_t oCount = 0;
+		while (op<opt) {
+			// Viterbi algorithm
+			assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
+			assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
+			const ViterbiTCH_AFS5_15::vCand &minCost = decoder.step(*ip, match, mismatch);
+			ip += step;
+			match += step;
+			mismatch += step;
+			// output
+			if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
+			oCount++;
+		}
+	}
+}
+
+
+
+ViterbiTCH_AFS4_75::ViterbiTCH_AFS4_75()
+{
+	assert(mDeferral < 32);
+	mCoeffs[0] = 0x06d;
+	mCoeffsFB[0] = 0x05f;
+	mCoeffs[1] = 0x06d;
+	mCoeffsFB[1] = 0x05f;
+	mCoeffs[2] = 0x053;
+	mCoeffsFB[2] = 0x05f;
+	mCoeffs[3] = 0x05f;
+	mCoeffsFB[3] = 0x05f;
+	mCoeffs[4] = 0x05f;
+	mCoeffsFB[4] = 0x05f;
+	for (unsigned i = 0; i < mIRate; i++) {
+		computeStateTables(i);
+	}
+	computeGeneratorTable();
+}
+
+
+
+
+void ViterbiTCH_AFS4_75::initializeStates()
+{
+	for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
+	for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
+}
+
+
+
+void ViterbiTCH_AFS4_75::computeStateTables(unsigned g)
+{
+	assert(g<mIRate);
+	for (unsigned state=0; state<mIStates; state++) {
+		for (unsigned in = 0; in <= 1; in++) {
+			uint32_t inputVal = (state<<1) | in;
+			mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
+		}
+	}
+}
+
+void ViterbiTCH_AFS4_75::computeGeneratorTable()
+{
+	for (unsigned index=0; index<mIStates*2; index++) {
+		uint32_t t = 0;
+		for (unsigned i = 0; i < mIRate; i++) {
+			t = (t << 1) | mStateTable[i][index];
+		}
+		mGeneratorTable[index] = t;
+	}
+}
+
+
+
+
+
+
+void ViterbiTCH_AFS4_75::branchCandidates()
+{
+	// Branch to generate new input states.
+	const vCand *sp = mSurvivors;
+	for (unsigned cand=0; cand<mNumCands; cand+=2) {
+		uint32_t oStateShifted = (sp->oState) << mIRate;
+		for (unsigned in = 0; in <= 1; in++) {
+			mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
+			mCandidates[cand+in].cost = sp->cost;
+			uint32_t outputs = oStateShifted;
+			for (unsigned out = 0; out < mIRate; out++) {
+				char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
+				char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
+				mCandidates[cand+in].rState[out] = rState;
+				outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
+			}
+			mCandidates[cand+in].oState = outputs;
+		}
+		sp++;
+	}
+}
+
+
+void ViterbiTCH_AFS4_75::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
+{
+	const float *cTab[2] = {matchCost,mismatchCost};
+	for (unsigned i=0; i<mNumCands; i++) {
+		vCand& thisCand = mCandidates[i];
+		const unsigned mismatched = inSample ^ (thisCand.oState);
+		for (unsigned i = 0; i < mIRate; i++) {
+			thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
+		}
+	}
+}
+
+
+void ViterbiTCH_AFS4_75::pruneCandidates()
+{
+	const vCand* c1 = mCandidates;					// 0-prefix
+	const vCand* c2 = mCandidates + mIStates;		// 1-prefix
+	for (unsigned i=0; i<mIStates; i++) {
+		if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
+		else mSurvivors[i] = c2[i];
+	}
+}
+
+
+const ViterbiTCH_AFS4_75::vCand& ViterbiTCH_AFS4_75::minCost() const
+{
+	int minIndex = 0;
+	float minCost = mSurvivors[0].cost;
+	for (unsigned i=1; i<mIStates; i++) {
+		const float thisCost = mSurvivors[i].cost;
+		if (thisCost>=minCost) continue;
+		minCost = thisCost;
+		minIndex=i;
+	}
+	return mSurvivors[minIndex];
+}
+
+
+const ViterbiTCH_AFS4_75::vCand& ViterbiTCH_AFS4_75::step(uint32_t inSample, const float *probs, const float *iprobs)
+{
+	branchCandidates();
+	getSoftCostMetrics(inSample,probs,iprobs);
+	pruneCandidates();
+	return minCost();
+}
+
+
+
+void ViterbiTCH_AFS4_75::decode(const SoftVector &in, BitVector& target)
+{
+	ViterbiTCH_AFS4_75 &decoder = *this;
+	const size_t sz = in.size() - 30;
+	const unsigned deferral = decoder.deferral();
+	const size_t ctsz = sz + deferral*decoder.iRate();
+	assert(sz == decoder.iRate()*target.size());
+
+	// Build a "history" array where each element contains the full history.
+	uint32_t history[ctsz];
+	{
+		BitVector bits = in.sliced();
+		uint32_t accum = 0;
+		for (size_t i=0; i<sz; i++) {
+			accum = (accum<<1) | bits.bit(i);
+			history[i] = accum;
+		}
+		// Repeat last bit at the end.
+		for (size_t i=sz; i<ctsz; i++) {
+			accum = (accum<<1) | (accum & 0x01);
+			history[i] = accum;
+		}
+	}
+
+	// Precompute metric tables.
+	float matchCostTable[ctsz];
+	float mismatchCostTable[ctsz];
+	{
+		const float *dp = in.begin();
+		for (size_t i=0; i<sz; i++) {
+			// pVal is the probability that a bit is correct.
+			// ipVal is the probability that a bit is incorrect.
+			float pVal = dp[i];
+			if (pVal>0.5F) pVal = 1.0F-pVal;
+			float ipVal = 1.0F-pVal;
+			// This is a cheap approximation to an ideal cost function.
+			if (pVal<0.01F) pVal = 0.01;
+			if (ipVal<0.01F) ipVal = 0.01;
+			matchCostTable[i] = 0.25F/ipVal;
+			mismatchCostTable[i] = 0.25F/pVal;
+		}
+	
+		// pad end of table with unknowns
+		for (size_t i=sz; i<ctsz; i++) {
+			matchCostTable[i] = 0.5F;
+			mismatchCostTable[i] = 0.5F;
+		}
+	}
+
+	{
+		decoder.initializeStates();
+		// Each sample of history[] carries its history.
+		// So we only have to process every iRate-th sample.
+		const unsigned step = decoder.iRate();
+		// input pointer
+		const uint32_t *ip = history + step - 1;
+		// output pointers
+		char *op = target.begin();
+		const char *const opt = target.end();
+		// table pointers
+		const float* match = matchCostTable;
+		const float* mismatch = mismatchCostTable;
+		size_t oCount = 0;
+		while (op<opt) {
+			// Viterbi algorithm
+			assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
+			assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
+			const ViterbiTCH_AFS4_75::vCand &minCost = decoder.step(*ip, match, mismatch);
+			ip += step;
+			match += step;
+			mismatch += step;
+			// output
+			if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
+			oCount++;
+		}
+	}
+}
+
+
+
diff --git a/lib/decoding/openbts/AmrCoder.h b/lib/decoding/openbts/AmrCoder.h
new file mode 100644
index 0000000..ae49bd0
--- /dev/null
+++ b/lib/decoding/openbts/AmrCoder.h
@@ -0,0 +1,941 @@
+/*
+ * Copyright 2013, 2014 Range Networks, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This use of this software may be subject to additional restrictions.
+ * See the LEGAL file in the main directory for details.
+ */
+
+#ifndef _AMRCODER_H_
+#define _AMRCODER_H_
+#include <stdint.h>
+#include "BitVector.h"
+#include "Viterbi.h"
+
+
+
+/**
+	Class to represent recursive systematic convolutional coders/decoders of rate 1/2, memory length 4.
+*/
+class ViterbiTCH_AFS12_2 : public ViterbiBase {
+
+	private:
+		/**name Lots of precomputed elements so the compiler can optimize like hell. */
+		//@{
+		/**@name Core values. */
+		//@{
+		static const unsigned mIRate = 2;	///< reciprocal of rate
+		static const unsigned mOrder = 4;	///< memory length of generators
+		//@}
+		/**@name Derived values. */
+		//@{
+		static const unsigned mIStates = 0x01 << mOrder;	///< number of states, number of survivors
+		static const uint32_t mSMask = mIStates-1;			///< survivor mask
+		static const uint32_t mCMask = (mSMask<<1) | 0x01;	///< candidate mask
+		static const uint32_t mOMask = (0x01<<mIRate)-1;	///< ouput mask, all iRate low bits set
+		static const unsigned mNumCands = mIStates*2;		///< number of candidates to generate during branching
+		static const unsigned mDeferral = 6*mOrder;			///< deferral to be used
+		//@}
+		//@}
+
+		/** Precomputed tables. */
+		//@{
+		uint32_t mCoeffs[mIRate];					///< output polynomial for each generator
+		uint32_t mCoeffsFB[mIRate];					///< feedback polynomial for each generator
+		uint32_t mStateTable[mIRate][2*mIStates];	///< precomputed generator output tables
+		uint32_t mGeneratorTable[2*mIStates];		///< precomputed coder output table
+		//@}
+	
+	public:
+
+		/**
+		  A candidate sequence in a Viterbi decoder.
+		  The 32-bit state register can support a deferral of 6 with a 4th-order coder.
+		 */
+		typedef struct candStruct {
+			uint32_t iState;	///< encoder input associated with this candidate
+			uint32_t oState;	///< encoder output associated with this candidate
+			char rState[mIRate];///< real states of encoders associated with this candidate
+			float cost;			///< cost (metric value), float to support soft inputs
+		} vCand;
+
+		/** Clear a structure. */
+		void vitClear(vCand& v)
+		{
+			v.iState=0;
+			v.oState=0;
+			v.cost=0;
+			for (unsigned i = 0; i < mIRate; i++) v.rState[i] = 0;
+		}
+		
+
+	private:
+
+		/**@name Survivors and candidates. */
+		//@{
+		vCand mSurvivors[mIStates];			///< current survivor pool
+		vCand mCandidates[2*mIStates];		///< current candidate pool
+		//@}
+
+	public:
+
+		unsigned iRate() const { return mIRate; }
+		uint32_t cMask() const { return mCMask; }
+		uint32_t stateTable(unsigned g, unsigned i) const { return mStateTable[g][i]; }
+		unsigned deferral() const { return mDeferral; }
+		
+
+		ViterbiTCH_AFS12_2();
+
+		/** Set all cost metrics to zero. */
+		void initializeStates();
+
+		/**
+			Full cycle of the Viterbi algorithm: branch, metrics, prune, select.
+			@return reference to minimum-cost candidate.
+		*/
+		const vCand& step(uint32_t inSample, const float *probs, const float *iprobs);
+
+	private:
+
+		/** Branch survivors into new candidates. */
+		void branchCandidates();
+
+		/** Compute cost metrics for soft-inputs. */
+		void getSoftCostMetrics(uint32_t inSample, const float *probs, const float *iprobs);
+
+		/** Select survivors from the candidate set. */
+		void pruneCandidates();
+
+		/** Find the minimum cost survivor. */
+		const vCand& minCost() const;
+
+		/**
+			Precompute the state tables.
+			@param g Generator index 0..((1/rate)-1)
+		*/
+		void computeStateTables(unsigned g);
+
+		/**
+			Precompute the generator outputs.
+			mCoeffs must be defined first.
+		*/
+		void computeGeneratorTable();
+		void encode(const BitVector &in, BitVector& target) const;
+		void decode(const SoftVector &in, BitVector& target);
+};
+
+
+
+/**
+	Class to represent recursive systematic convolutional coders/decoders of rate 1/3, memory length 4.
+*/
+class ViterbiTCH_AFS10_2 : public ViterbiBase {
+
+	private:
+		/**name Lots of precomputed elements so the compiler can optimize like hell. */
+		//@{
+		/**@name Core values. */
+		//@{
+		static const unsigned mIRate = 3;	///< reciprocal of rate
+		static const unsigned mOrder = 4;	///< memory length of generators
+		//@}
+		/**@name Derived values. */
+		//@{
+		static const unsigned mIStates = 0x01 << mOrder;	///< number of states, number of survivors
+		static const uint32_t mSMask = mIStates-1;			///< survivor mask
+		static const uint32_t mCMask = (mSMask<<1) | 0x01;	///< candidate mask
+		static const uint32_t mOMask = (0x01<<mIRate)-1;	///< ouput mask, all iRate low bits set
+		static const unsigned mNumCands = mIStates*2;		///< number of candidates to generate during branching
+		static const unsigned mDeferral = 6*mOrder;			///< deferral to be used
+		//@}
+		//@}
+
+		/** Precomputed tables. */
+		//@{
+		uint32_t mCoeffs[mIRate];					///< output polynomial for each generator
+		uint32_t mCoeffsFB[mIRate];					///< feedback polynomial for each generator
+		uint32_t mStateTable[mIRate][2*mIStates];	///< precomputed generator output tables
+		uint32_t mGeneratorTable[2*mIStates];		///< precomputed coder output table
+		//@}
+	
+	public:
+
+		/**
+		  A candidate sequence in a Viterbi decoder.
+		  The 32-bit state register can support a deferral of 6 with a 4th-order coder.
+		 */
+		typedef struct candStruct {
+			uint32_t iState;	///< encoder input associated with this candidate
+			uint32_t oState;	///< encoder output associated with this candidate
+			char rState[mIRate];///< real states of encoders associated with this candidate
+			float cost;			///< cost (metric value), float to support soft inputs
+		} vCand;
+
+		/** Clear a structure. */
+		void vitClear(vCand& v)
+		{
+			v.iState=0;
+			v.oState=0;
+			v.cost=0;
+			for (unsigned i = 0; i < mIRate; i++) v.rState[i] = 0;
+		}
+		
+
+	private:
+
+		/**@name Survivors and candidates. */
+		//@{
+		vCand mSurvivors[mIStates];			///< current survivor pool
+		vCand mCandidates[2*mIStates];		///< current candidate pool
+		//@}
+
+	public:
+
+		unsigned iRate() const { return mIRate; }
+		uint32_t cMask() const { return mCMask; }
+		uint32_t stateTable(unsigned g, unsigned i) const { return mStateTable[g][i]; }
+		unsigned deferral() const { return mDeferral; }
+		
+
+		ViterbiTCH_AFS10_2();
+
+		/** Set all cost metrics to zero. */
+		void initializeStates();
+
+		/**
+			Full cycle of the Viterbi algorithm: branch, metrics, prune, select.
+			@return reference to minimum-cost candidate.
+		*/
+		const vCand& step(uint32_t inSample, const float *probs, const float *iprobs);
+
+	private:
+
+		/** Branch survivors into new candidates. */
+		void branchCandidates();
+
+		/** Compute cost metrics for soft-inputs. */
+		void getSoftCostMetrics(uint32_t inSample, const float *probs, const float *iprobs);
+
+		/** Select survivors from the candidate set. */
+		void pruneCandidates();
+
+		/** Find the minimum cost survivor. */
+		const vCand& minCost() const;
+
+		/**
+			Precompute the state tables.
+			@param g Generator index 0..((1/rate)-1)
+		*/
+		void computeStateTables(unsigned g);
+
+		/**
+			Precompute the generator outputs.
+			mCoeffs must be defined first.
+		*/
+		void computeGeneratorTable();
+		void encode(const BitVector &in, BitVector& target) const;
+		void decode(const SoftVector &in, BitVector& target);
+
+};
+
+
+
+/**
+	Class to represent recursive systematic convolutional coders/decoders of rate 1/3, memory length 6.
+*/
+class ViterbiTCH_AFS7_95 : public ViterbiBase {
+
+	private:
+		/**name Lots of precomputed elements so the compiler can optimize like hell. */
+		//@{
+		/**@name Core values. */
+		//@{
+		static const unsigned mIRate = 3;	///< reciprocal of rate
+		static const unsigned mOrder = 6;	///< memory length of generators
+		//@}
+		/**@name Derived values. */
+		//@{
+		static const unsigned mIStates = 0x01 << mOrder;	///< number of states, number of survivors
+		static const uint32_t mSMask = mIStates-1;			///< survivor mask
+		static const uint32_t mCMask = (mSMask<<1) | 0x01;	///< candidate mask
+		static const uint32_t mOMask = (0x01<<mIRate)-1;	///< ouput mask, all iRate low bits set
+		static const unsigned mNumCands = mIStates*2;		///< number of candidates to generate during branching
+		static const unsigned mDeferral = 5*mOrder;			///< deferral to be used
+		//@}
+		//@}
+
+		/** Precomputed tables. */
+		//@{
+		uint32_t mCoeffs[mIRate];					///< output polynomial for each generator
+		uint32_t mCoeffsFB[mIRate];					///< feedback polynomial for each generator
+		uint32_t mStateTable[mIRate][2*mIStates];	///< precomputed generator output tables
+		uint32_t mGeneratorTable[2*mIStates];		///< precomputed coder output table
+		//@}
+	
+	public:
+
+		/**
+		  A candidate sequence in a Viterbi decoder.
+		  The 32-bit state register can support a deferral of 5*order with a 6th-order coder.
+		 */
+		typedef struct candStruct {
+			uint32_t iState;	///< encoder input associated with this candidate
+			uint32_t oState;	///< encoder output associated with this candidate
+			char rState[mIRate];///< real states of encoders associated with this candidate
+			float cost;			///< cost (metric value), float to support soft inputs
+		} vCand;
+
+		/** Clear a structure. */
+		void vitClear(vCand& v)
+		{
+			v.iState=0;
+			v.oState=0;
+			v.cost=0;
+			for (unsigned i = 0; i < mIRate; i++) v.rState[i] = 0;
+		}
+		
+
+	private:
+
+		/**@name Survivors and candidates. */
+		//@{
+		vCand mSurvivors[mIStates];			///< current survivor pool
+		vCand mCandidates[2*mIStates];		///< current candidate pool
+		//@}
+
+	public:
+
+		unsigned iRate() const { return mIRate; }
+		uint32_t cMask() const { return mCMask; }
+		uint32_t stateTable(unsigned g, unsigned i) const { return mStateTable[g][i]; }
+		unsigned deferral() const { return mDeferral; }
+		
+
+		ViterbiTCH_AFS7_95();
+
+		/** Set all cost metrics to zero. */
+		void initializeStates();
+
+		/**
+			Full cycle of the Viterbi algorithm: branch, metrics, prune, select.
+			@return reference to minimum-cost candidate.
+		*/
+		const vCand& step(uint32_t inSample, const float *probs, const float *iprobs);
+
+	private:
+
+		/** Branch survivors into new candidates. */
+		void branchCandidates();
+
+		/** Compute cost metrics for soft-inputs. */
+		void getSoftCostMetrics(uint32_t inSample, const float *probs, const float *iprobs);
+
+		/** Select survivors from the candidate set. */
+		void pruneCandidates();
+
+		/** Find the minimum cost survivor. */
+		const vCand& minCost() const;
+
+		/**
+			Precompute the state tables.
+			@param g Generator index 0..((1/rate)-1)
+		*/
+		void computeStateTables(unsigned g);
+
+		/**
+			Precompute the generator outputs.
+			mCoeffs must be defined first.
+		*/
+		void computeGeneratorTable();
+		void encode(const BitVector &in, BitVector& target) const;
+		void decode(const SoftVector &in, BitVector& target);
+
+};
+
+
+
+/**
+	Class to represent recursive systematic convolutional coders/decoders of rate 1/3, memory length 4.
+*/
+class ViterbiTCH_AFS7_4 : public ViterbiBase {
+
+	private:
+		/**name Lots of precomputed elements so the compiler can optimize like hell. */
+		//@{
+		/**@name Core values. */
+		//@{
+		static const unsigned mIRate = 3;	///< reciprocal of rate
+		static const unsigned mOrder = 4;	///< memory length of generators
+		//@}
+		/**@name Derived values. */
+		//@{
+		static const unsigned mIStates = 0x01 << mOrder;	///< number of states, number of survivors
+		static const uint32_t mSMask = mIStates-1;			///< survivor mask
+		static const uint32_t mCMask = (mSMask<<1) | 0x01;	///< candidate mask
+		static const uint32_t mOMask = (0x01<<mIRate)-1;	///< ouput mask, all iRate low bits set
+		static const unsigned mNumCands = mIStates*2;		///< number of candidates to generate during branching
+		static const unsigned mDeferral = 6*mOrder;			///< deferral to be used
+		//@}
+		//@}
+
+		/** Precomputed tables. */
+		//@{
+		uint32_t mCoeffs[mIRate];					///< output polynomial for each generator
+		uint32_t mCoeffsFB[mIRate];					///< feedback polynomial for each generator
+		uint32_t mStateTable[mIRate][2*mIStates];	///< precomputed generator output tables
+		uint32_t mGeneratorTable[2*mIStates];		///< precomputed coder output table
+		//@}
+	
+	public:
+
+		/**
+		  A candidate sequence in a Viterbi decoder.
+		  The 32-bit state register can support a deferral of 6 with a 4th-order coder.
+		 */
+		typedef struct candStruct {
+			uint32_t iState;	///< encoder input associated with this candidate
+			uint32_t oState;	///< encoder output associated with this candidate
+			char rState[mIRate];///< real states of encoders associated with this candidate
+			float cost;			///< cost (metric value), float to support soft inputs
+		} vCand;
+
+		/** Clear a structure. */
+		void vitClear(vCand& v)
+		{
+			v.iState=0;
+			v.oState=0;
+			v.cost=0;
+			for (unsigned i = 0; i < mIRate; i++) v.rState[i] = 0;
+		}
+		
+
+	private:
+
+		/**@name Survivors and candidates. */
+		//@{
+		vCand mSurvivors[mIStates];			///< current survivor pool
+		vCand mCandidates[2*mIStates];		///< current candidate pool
+		//@}
+
+	public:
+
+		unsigned iRate() const { return mIRate; }
+		uint32_t cMask() const { return mCMask; }
+		uint32_t stateTable(unsigned g, unsigned i) const { return mStateTable[g][i]; }
+		unsigned deferral() const { return mDeferral; }
+		
+
+		ViterbiTCH_AFS7_4();
+
+		/** Set all cost metrics to zero. */
+		void initializeStates();
+
+		/**
+			Full cycle of the Viterbi algorithm: branch, metrics, prune, select.
+			@return reference to minimum-cost candidate.
+		*/
+		const vCand& step(uint32_t inSample, const float *probs, const float *iprobs);
+
+	private:
+
+		/** Branch survivors into new candidates. */
+		void branchCandidates();
+
+		/** Compute cost metrics for soft-inputs. */
+		void getSoftCostMetrics(uint32_t inSample, const float *probs, const float *iprobs);
+
+		/** Select survivors from the candidate set. */
+		void pruneCandidates();
+
+		/** Find the minimum cost survivor. */
+		const vCand& minCost() const;
+
+		/**
+			Precompute the state tables.
+			@param g Generator index 0..((1/rate)-1)
+		*/
+		void computeStateTables(unsigned g);
+
+		/**
+			Precompute the generator outputs.
+			mCoeffs must be defined first.
+		*/
+		void computeGeneratorTable();
+		void encode(const BitVector &in, BitVector& target) const;
+		void decode(const SoftVector &in, BitVector& target);
+
+};
+
+
+
+/**
+	Class to represent recursive systematic convolutional coders/decoders of rate 1/4, memory length 4.
+*/
+class ViterbiTCH_AFS6_7 : public ViterbiBase {
+
+	private:
+		/**name Lots of precomputed elements so the compiler can optimize like hell. */
+		//@{
+		/**@name Core values. */
+		//@{
+		static const unsigned mIRate = 4;	///< reciprocal of rate
+		static const unsigned mOrder = 4;	///< memory length of generators
+		//@}
+		/**@name Derived values. */
+		//@{
+		static const unsigned mIStates = 0x01 << mOrder;	///< number of states, number of survivors
+		static const uint32_t mSMask = mIStates-1;			///< survivor mask
+		static const uint32_t mCMask = (mSMask<<1) | 0x01;	///< candidate mask
+		static const uint32_t mOMask = (0x01<<mIRate)-1;	///< ouput mask, all iRate low bits set
+		static const unsigned mNumCands = mIStates*2;		///< number of candidates to generate during branching
+		static const unsigned mDeferral = 6*mOrder;			///< deferral to be used
+		//@}
+		//@}
+
+		/** Precomputed tables. */
+		//@{
+		uint32_t mCoeffs[mIRate];					///< output polynomial for each generator
+		uint32_t mCoeffsFB[mIRate];					///< feedback polynomial for each generator
+		uint32_t mStateTable[mIRate][2*mIStates];	///< precomputed generator output tables
+		uint32_t mGeneratorTable[2*mIStates];		///< precomputed coder output table
+		//@}
+	
+	public:
+
+		/**
+		  A candidate sequence in a Viterbi decoder.
+		  The 32-bit state register can support a deferral of 6 with a 4th-order coder.
+		 */
+		typedef struct candStruct {
+			uint32_t iState;	///< encoder input associated with this candidate
+			uint32_t oState;	///< encoder output associated with this candidate
+			char rState[mIRate];///< real states of encoders associated with this candidate
+			float cost;			///< cost (metric value), float to support soft inputs
+		} vCand;
+
+		/** Clear a structure. */
+		void vitClear(vCand& v)
+		{
+			v.iState=0;
+			v.oState=0;
+			v.cost=0;
+			for (unsigned i = 0; i < mIRate; i++) v.rState[i] = 0;
+		}
+		
+
+	private:
+
+		/**@name Survivors and candidates. */
+		//@{
+		vCand mSurvivors[mIStates];			///< current survivor pool
+		vCand mCandidates[2*mIStates];		///< current candidate pool
+		//@}
+
+	public:
+
+		unsigned iRate() const { return mIRate; }
+		uint32_t cMask() const { return mCMask; }
+		uint32_t stateTable(unsigned g, unsigned i) const { return mStateTable[g][i]; }
+		unsigned deferral() const { return mDeferral; }
+		
+
+		ViterbiTCH_AFS6_7();
+
+		/** Set all cost metrics to zero. */
+		void initializeStates();
+
+		/**
+			Full cycle of the Viterbi algorithm: branch, metrics, prune, select.
+			@return reference to minimum-cost candidate.
+		*/
+		const vCand& step(uint32_t inSample, const float *probs, const float *iprobs);
+
+	private:
+
+		/** Branch survivors into new candidates. */
+		void branchCandidates();
+
+		/** Compute cost metrics for soft-inputs. */
+		void getSoftCostMetrics(uint32_t inSample, const float *probs, const float *iprobs);
+
+		/** Select survivors from the candidate set. */
+		void pruneCandidates();
+
+		/** Find the minimum cost survivor. */
+		const vCand& minCost() const;
+
+		/**
+			Precompute the state tables.
+			@param g Generator index 0..((1/rate)-1)
+		*/
+		void computeStateTables(unsigned g);
+
+		/**
+			Precompute the generator outputs.
+			mCoeffs must be defined first.
+		*/
+		void computeGeneratorTable();
+		void encode(const BitVector &in, BitVector& target) const;
+		void decode(const SoftVector &in, BitVector& target);
+
+};
+
+
+
+/**
+	Class to represent recursive systematic convolutional coders/decoders of rate 1/4, memory length 6.
+*/
+class ViterbiTCH_AFS5_9 : public ViterbiBase {
+
+	private:
+		/**name Lots of precomputed elements so the compiler can optimize like hell. */
+		//@{
+		/**@name Core values. */
+		//@{
+		static const unsigned mIRate = 4;	///< reciprocal of rate
+		static const unsigned mOrder = 6;	///< memory length of generators
+		//@}
+		/**@name Derived values. */
+		//@{
+		static const unsigned mIStates = 0x01 << mOrder;	///< number of states, number of survivors
+		static const uint32_t mSMask = mIStates-1;			///< survivor mask
+		static const uint32_t mCMask = (mSMask<<1) | 0x01;	///< candidate mask
+		static const uint32_t mOMask = (0x01<<mIRate)-1;	///< ouput mask, all iRate low bits set
+		static const unsigned mNumCands = mIStates*2;		///< number of candidates to generate during branching
+		static const unsigned mDeferral = 5*mOrder;			///< deferral to be used
+		//@}
+		//@}
+
+		/** Precomputed tables. */
+		//@{
+		uint32_t mCoeffs[mIRate];					///< output polynomial for each generator
+		uint32_t mCoeffsFB[mIRate];					///< feedback polynomial for each generator
+		uint32_t mStateTable[mIRate][2*mIStates];	///< precomputed generator output tables
+		uint32_t mGeneratorTable[2*mIStates];		///< precomputed coder output table
+		//@}
+	
+	public:
+
+		/**
+		  A candidate sequence in a Viterbi decoder.
+		  The 32-bit state register can support a deferral of 5*order with a 6th-order coder.
+		 */
+		typedef struct candStruct {
+			uint32_t iState;	///< encoder input associated with this candidate
+			uint32_t oState;	///< encoder output associated with this candidate
+			char rState[mIRate];///< real states of encoders associated with this candidate
+			float cost;			///< cost (metric value), float to support soft inputs
+		} vCand;
+
+		/** Clear a structure. */
+		void vitClear(vCand& v)
+		{
+			v.iState=0;
+			v.oState=0;
+			v.cost=0;
+			for (unsigned i = 0; i < mIRate; i++) v.rState[i] = 0;
+		}
+		
+
+	private:
+
+		/**@name Survivors and candidates. */
+		//@{
+		vCand mSurvivors[mIStates];			///< current survivor pool
+		vCand mCandidates[2*mIStates];		///< current candidate pool
+		//@}
+
+	public:
+
+		unsigned iRate() const { return mIRate; }
+		uint32_t cMask() const { return mCMask; }
+		uint32_t stateTable(unsigned g, unsigned i) const { return mStateTable[g][i]; }
+		unsigned deferral() const { return mDeferral; }
+		
+
+		ViterbiTCH_AFS5_9();
+
+		/** Set all cost metrics to zero. */
+		void initializeStates();
+
+		/**
+			Full cycle of the Viterbi algorithm: branch, metrics, prune, select.
+			@return reference to minimum-cost candidate.
+		*/
+		const vCand& step(uint32_t inSample, const float *probs, const float *iprobs);
+
+	private:
+
+		/** Branch survivors into new candidates. */
+		void branchCandidates();
+
+		/** Compute cost metrics for soft-inputs. */
+		void getSoftCostMetrics(uint32_t inSample, const float *probs, const float *iprobs);
+
+		/** Select survivors from the candidate set. */
+		void pruneCandidates();
+
+		/** Find the minimum cost survivor. */
+		const vCand& minCost() const;
+
+		/**
+			Precompute the state tables.
+			@param g Generator index 0..((1/rate)-1)
+		*/
+		void computeStateTables(unsigned g);
+
+		/**
+			Precompute the generator outputs.
+			mCoeffs must be defined first.
+		*/
+		void computeGeneratorTable();
+		void encode(const BitVector &in, BitVector& target) const;
+		void decode(const SoftVector &in, BitVector& target);
+
+};
+
+
+
+/**
+	Class to represent recursive systematic convolutional coders/decoders of rate 1/5, memory length 4.
+*/
+class ViterbiTCH_AFS5_15 : public ViterbiBase {
+
+	private:
+		/**name Lots of precomputed elements so the compiler can optimize like hell. */
+		//@{
+		/**@name Core values. */
+		//@{
+		static const unsigned mIRate = 5;	///< reciprocal of rate
+		static const unsigned mOrder = 4;	///< memory length of generators
+		//@}
+		/**@name Derived values. */
+		//@{
+		static const unsigned mIStates = 0x01 << mOrder;	///< number of states, number of survivors
+		static const uint32_t mSMask = mIStates-1;			///< survivor mask
+		static const uint32_t mCMask = (mSMask<<1) | 0x01;	///< candidate mask
+		static const uint32_t mOMask = (0x01<<mIRate)-1;	///< ouput mask, all iRate low bits set
+		static const unsigned mNumCands = mIStates*2;		///< number of candidates to generate during branching
+		static const unsigned mDeferral = 6*mOrder;			///< deferral to be used
+		//@}
+		//@}
+
+		/** Precomputed tables. */
+		//@{
+		uint32_t mCoeffs[mIRate];					///< output polynomial for each generator
+		uint32_t mCoeffsFB[mIRate];					///< feedback polynomial for each generator
+		uint32_t mStateTable[mIRate][2*mIStates];	///< precomputed generator output tables
+		uint32_t mGeneratorTable[2*mIStates];		///< precomputed coder output table
+		//@}
+	
+	public:
+
+		/**
+		  A candidate sequence in a Viterbi decoder.
+		  The 32-bit state register can support a deferral of 6 with a 4th-order coder.
+		 */
+		typedef struct candStruct {
+			uint32_t iState;	///< encoder input associated with this candidate
+			uint32_t oState;	///< encoder output associated with this candidate
+			char rState[mIRate];///< real states of encoders associated with this candidate
+			float cost;			///< cost (metric value), float to support soft inputs
+		} vCand;
+
+		/** Clear a structure. */
+		void vitClear(vCand& v)
+		{
+			v.iState=0;
+			v.oState=0;
+			v.cost=0;
+			for (unsigned i = 0; i < mIRate; i++) v.rState[i] = 0;
+		}
+		
+
+	private:
+
+		/**@name Survivors and candidates. */
+		//@{
+		vCand mSurvivors[mIStates];			///< current survivor pool
+		vCand mCandidates[2*mIStates];		///< current candidate pool
+		//@}
+
+	public:
+
+		unsigned iRate() const { return mIRate; }
+		uint32_t cMask() const { return mCMask; }
+		uint32_t stateTable(unsigned g, unsigned i) const { return mStateTable[g][i]; }
+		unsigned deferral() const { return mDeferral; }
+		
+
+		ViterbiTCH_AFS5_15();
+
+		/** Set all cost metrics to zero. */
+		void initializeStates();
+
+		/**
+			Full cycle of the Viterbi algorithm: branch, metrics, prune, select.
+			@return reference to minimum-cost candidate.
+		*/
+		const vCand& step(uint32_t inSample, const float *probs, const float *iprobs);
+
+	private:
+
+		/** Branch survivors into new candidates. */
+		void branchCandidates();
+
+		/** Compute cost metrics for soft-inputs. */
+		void getSoftCostMetrics(uint32_t inSample, const float *probs, const float *iprobs);
+
+		/** Select survivors from the candidate set. */
+		void pruneCandidates();
+
+		/** Find the minimum cost survivor. */
+		const vCand& minCost() const;
+
+		/**
+			Precompute the state tables.
+			@param g Generator index 0..((1/rate)-1)
+		*/
+		void computeStateTables(unsigned g);
+
+		/**
+			Precompute the generator outputs.
+			mCoeffs must be defined first.
+		*/
+		void computeGeneratorTable();
+		void encode(const BitVector &in, BitVector& target) const;
+		void decode(const SoftVector &in, BitVector& target);
+
+};
+
+
+
+/**
+	Class to represent recursive systematic convolutional coders/decoders of rate 1/5, memory length 6.
+*/
+class ViterbiTCH_AFS4_75 : public ViterbiBase {
+
+	private:
+		/**name Lots of precomputed elements so the compiler can optimize like hell. */
+		//@{
+		/**@name Core values. */
+		//@{
+		static const unsigned mIRate = 5;	///< reciprocal of rate
+		static const unsigned mOrder = 6;	///< memory length of generators
+		//@}
+		/**@name Derived values. */
+		//@{
+		static const unsigned mIStates = 0x01 << mOrder;	///< number of states, number of survivors
+		static const uint32_t mSMask = mIStates-1;			///< survivor mask
+		static const uint32_t mCMask = (mSMask<<1) | 0x01;	///< candidate mask
+		static const uint32_t mOMask = (0x01<<mIRate)-1;	///< ouput mask, all iRate low bits set
+		static const unsigned mNumCands = mIStates*2;		///< number of candidates to generate during branching
+		static const unsigned mDeferral = 5*mOrder;			///< deferral to be used
+		//@}
+		//@}
+
+		/** Precomputed tables. */
+		//@{
+		uint32_t mCoeffs[mIRate];					///< output polynomial for each generator
+		uint32_t mCoeffsFB[mIRate];					///< feedback polynomial for each generator
+		uint32_t mStateTable[mIRate][2*mIStates];	///< precomputed generator output tables
+		uint32_t mGeneratorTable[2*mIStates];		///< precomputed coder output table
+		//@}
+	
+	public:
+
+		/**
+		  A candidate sequence in a Viterbi decoder.
+		  The 32-bit state register can support a deferral of 5*order with a 6th-order coder.
+		 */
+		typedef struct candStruct {
+			uint32_t iState;	///< encoder input associated with this candidate
+			uint32_t oState;	///< encoder output associated with this candidate
+			char rState[mIRate];///< real states of encoders associated with this candidate
+			float cost;			///< cost (metric value), float to support soft inputs
+		} vCand;
+
+		/** Clear a structure. */
+		void vitClear(vCand& v)
+		{
+			v.iState=0;
+			v.oState=0;
+			v.cost=0;
+			for (unsigned i = 0; i < mIRate; i++) v.rState[i] = 0;
+		}
+		
+
+	private:
+
+		/**@name Survivors and candidates. */
+		//@{
+		vCand mSurvivors[mIStates];			///< current survivor pool
+		vCand mCandidates[2*mIStates];		///< current candidate pool
+		//@}
+
+	public:
+
+		unsigned iRate() const { return mIRate; }
+		uint32_t cMask() const { return mCMask; }
+		uint32_t stateTable(unsigned g, unsigned i) const { return mStateTable[g][i]; }
+		unsigned deferral() const { return mDeferral; }
+		
+
+		ViterbiTCH_AFS4_75();
+
+		/** Set all cost metrics to zero. */
+		void initializeStates();
+
+		/**
+			Full cycle of the Viterbi algorithm: branch, metrics, prune, select.
+			@return reference to minimum-cost candidate.
+		*/
+		const vCand& step(uint32_t inSample, const float *probs, const float *iprobs);
+
+	private:
+
+		/** Branch survivors into new candidates. */
+		void branchCandidates();
+
+		/** Compute cost metrics for soft-inputs. */
+		void getSoftCostMetrics(uint32_t inSample, const float *probs, const float *iprobs);
+
+		/** Select survivors from the candidate set. */
+		void pruneCandidates();
+
+		/** Find the minimum cost survivor. */
+		const vCand& minCost() const;
+
+		/**
+			Precompute the state tables.
+			@param g Generator index 0..((1/rate)-1)
+		*/
+		void computeStateTables(unsigned g);
+
+		/**
+			Precompute the generator outputs.
+			mCoeffs must be defined first.
+		*/
+		void computeGeneratorTable();
+		void encode(const BitVector &in, BitVector& target) const;
+		void decode(const SoftVector &in, BitVector& target);
+
+};
+
+
+
+
+#endif
diff --git a/lib/decoding/openbts/BitVector.cpp b/lib/decoding/openbts/BitVector.cpp
new file mode 100644
index 0000000..00730f6
--- /dev/null
+++ b/lib/decoding/openbts/BitVector.cpp
@@ -0,0 +1,525 @@
+/*
+ * Copyright 2008, 2009, 2014 Free Software Foundation, Inc.
+ * Copyright 2014 Range Networks, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This use of this software may be subject to additional restrictions.
+ * See the LEGAL file in the main directory for details.
+ */
+
+#include "BitVector.h"
+#include <iostream>
+#include <stdio.h>
+#include <sstream>
+#include <string.h>
+//#include <Logger.h>
+
+using namespace std;
+
+
+
+BitVector::BitVector(const char *valString)
+{
+	// 1-30-2013 pat: I dont know what this was intended to do, but it did not create a normalized BitVector,
+	// and it could even fail if the accum overlows 8 bits.
+	//uint32_t accum = 0;
+	//for (size_t i=0; i<size(); i++) {
+	//	accum <<= 1;
+	//	if (valString[i]=='1') accum |= 0x01;
+	//	mStart[i] = accum;
+	//}
+	vInit(strlen(valString));
+	char *rp = begin();
+	for (const char *cp = valString; *cp; cp++, rp++) {
+		*rp = (*cp == '1');
+	}
+}
+
+
+uint64_t BitVector::peekField(size_t readIndex, unsigned length) const
+{
+	uint64_t accum = 0;
+	char *dp = mStart + readIndex;
+
+	for (unsigned i=0; i<length; i++) {
+		accum = (accum<<1) | ((*dp++) & 0x01);
+	}
+	return accum;
+}
+
+
+
+
+uint64_t BitVector::peekFieldReversed(size_t readIndex, unsigned length) const
+{
+	uint64_t accum = 0;
+	char *dp = mStart + readIndex + length - 1;
+	assert(dp<mEnd);
+	for (int i=(length-1); i>=0; i--) {
+		accum = (accum<<1) | ((*dp--) & 0x01);
+	}
+	return accum;
+}
+
+
+
+
+uint64_t BitVector::readField(size_t& readIndex, unsigned length) const
+{
+	const uint64_t retVal = peekField(readIndex,length);
+	readIndex += length;
+	return retVal;
+}
+
+
+uint64_t BitVector::readFieldReversed(size_t& readIndex, unsigned length) const
+{
+
+	const uint64_t retVal = peekFieldReversed(readIndex,length);
+	readIndex += length;
+	return retVal;
+
+}
+
+
+
+
+void BitVector::fillField(size_t writeIndex, uint64_t value, unsigned length)
+{
+	if (length != 0) {
+		char *dpBase = mStart + writeIndex;
+		char *dp = dpBase + length - 1;
+		assert(dp < mEnd);
+		while (dp>=dpBase) {
+			*dp-- = value & 0x01;
+			value >>= 1;
+		}
+	}
+}
+
+
+void BitVector::fillFieldReversed(size_t writeIndex, uint64_t value, unsigned length)
+{
+	if (length != 0) {
+		char *dp = mStart + writeIndex;
+		char *dpEnd = dp + length - 1;
+		assert(dpEnd < mEnd);
+		while (dp<=dpEnd) {
+			*dp++ = value & 0x01;
+			value >>= 1;
+		}
+	}
+}
+
+
+
+
+void BitVector::writeField(size_t& writeIndex, uint64_t value, unsigned length)
+{
+	if (length != 0) {
+		fillField(writeIndex,value,length);
+		writeIndex += length;
+	}
+}
+
+
+void BitVector::writeFieldReversed(size_t& writeIndex, uint64_t value, unsigned length)
+{
+	if (length != 0) {
+		fillFieldReversed(writeIndex,value,length);
+		writeIndex += length;
+	}
+}
+
+
+void BitVector::invert()
+{
+	for (size_t i=0; i<size(); i++) {
+		mStart[i] = ~mStart[i];
+	}
+}
+
+
+
+
+void BitVector::reverse8()
+{
+	assert(size()>=8);
+
+	char tmp0 = mStart[0];
+	mStart[0] = mStart[7];
+	mStart[7] = tmp0;
+
+	char tmp1 = mStart[1];
+	mStart[1] = mStart[6];
+	mStart[6] = tmp1;
+
+	char tmp2 = mStart[2];
+	mStart[2] = mStart[5];
+	mStart[5] = tmp2;
+
+	char tmp3 = mStart[3];
+	mStart[3] = mStart[4];
+	mStart[4] = tmp3;
+}
+
+
+
+void BitVector::LSB8MSB()
+{
+	if (size()<8) return;
+	size_t size8 = 8*(size()/8);
+	size_t iTop = size8 - 8;
+	for (size_t i=0; i<=iTop; i+=8) segment(i,8).reverse8();
+}
+
+
+
+uint64_t BitVector::syndrome(Generator& gen) const
+{
+	gen.clear();
+	const char *dp = mStart;
+	while (dp<mEnd) gen.syndromeShift(*dp++);
+	return gen.state();
+}
+
+
+uint64_t BitVector::parity(Generator& gen) const
+{
+	gen.clear();
+	const char *dp = mStart;
+	while (dp<mEnd) gen.encoderShift(*dp++);
+	return gen.state();
+}
+
+
+unsigned BitVector::sum() const
+{
+	unsigned sum = 0;
+	for (size_t i=0; i<size(); i++) sum += mStart[i] & 0x01;
+	return sum;
+}
+
+
+
+
+void BitVector::map(const unsigned *map, size_t mapSize, BitVector& dest) const
+{
+	for (unsigned i=0; i<mapSize; i++) {
+		dest.mStart[i] = mStart[map[i]];
+	}
+}
+
+
+
+
+void BitVector::unmap(const unsigned *map, size_t mapSize, BitVector& dest) const
+{
+	for (unsigned i=0; i<mapSize; i++) {
+		dest.mStart[map[i]] = mStart[i];
+	}
+}
+
+
+
+
+
+
+
+ostream& operator<<(ostream& os, const BitVector& hv)
+{
+	for (size_t i=0; i<hv.size(); i++) {
+		if (hv.bit(i)) os << '1';
+		else os << '0';
+	}
+	return os;
+}
+
+
+
+
+uint64_t Parity::syndrome(const BitVector& receivedCodeword)
+{
+	return receivedCodeword.syndrome(*this);
+}
+
+
+void Parity::writeParityWord(const BitVector& data, BitVector& parityTarget, bool invert)
+{
+	uint64_t pWord = data.parity(*this);
+	if (invert) pWord = ~pWord;
+	parityTarget.fillField(0,pWord,size());
+}
+
+
+
+
+
+
+
+
+
+SoftVector::SoftVector(const BitVector& source)
+{
+	resize(source.size());
+	for (size_t i=0; i<size(); i++) {
+		if (source.bit(i)) mStart[i]=1.0F;
+		else mStart[i]=0.0F;
+	}
+}
+
+
+BitVector SoftVector::sliced() const
+{
+	size_t sz = size();
+	BitVector newSig(sz);
+	for (size_t i=0; i<sz; i++) {
+		if (mStart[i]>0.5F) newSig[i]=1;
+		else newSig[i] = 0;
+	}
+	return newSig;
+}
+
+
+
+// (pat) Added 6-22-2012
+float SoftVector::getEnergy(float *plow) const
+{
+	const SoftVector &vec = *this;
+	int len = vec.size();
+	float avg = 0; float low = 1;
+	for (int i = 0; i < len; i++) {
+		float bit = vec[i];
+		float energy = 2*((bit < 0.5) ? (0.5-bit) : (bit-0.5));
+		if (energy < low) low = energy;
+		avg += energy/len;
+	}
+	if (plow) { *plow = low; }
+	return avg;
+}
+
+// (pat) Added 1-2014.  Compute SNR of a soft vector.  Very similar to above.
+// Since we dont really know what the expected signal values are, we will assume that the signal is 0 or 1
+// and return the SNR on that basis.
+// SNR is power(signal) / power(noise) where power can be calculated as (RMS(signal) / RMS(noise))**2 of the values.
+// Since RMS is square-rooted, ie RMS = sqrt(1/n * (x1**2 + x2**2 ...)), we just add up the squares.
+// To compute RMS of the signal we will remove any constant offset, so the signal values are either 0.5 or -0.5,
+// so the RMS of the signal is just 0.5**2 * len;  all we need to compute is the noise component.
+float SoftVector::getSNR() const
+{
+	float sumSquaresNoise = 0;
+	const SoftVector &vec = *this;
+	int len = vec.size();
+	if (len == 0) { return 0.0; }
+	for (int i = 0; i < len; i++) {
+		float bit = vec[i];
+		if (bit < 0.5) {
+			// Assume signal is 0.
+			sumSquaresNoise += (bit - 0.0) * (bit - 0.0);
+		} else {
+			// Assume signal is 1.
+			sumSquaresNoise += (bit - 1.0) * (bit - 1.0);
+		}
+	}
+	float sumSquaresSignal = 0.5 * 0.5 * len;
+	// I really want log10 of this to convert to dB, but log is expensive, and Harvind seems to like absolute SNR.
+	// Clamp max to 999; it shouldnt get up there but be sure.  This also avoids divide by zero.
+	if (sumSquaresNoise * 1000 < sumSquaresSignal) return 999;
+	return sumSquaresSignal / sumSquaresNoise;
+}
+
+
+
+ostream& operator<<(ostream& os, const SoftVector& sv)
+{
+	for (size_t i=0; i<sv.size(); i++) {
+		if (sv[i]<0.25) os << "0";
+		else if (sv[i]>0.75) os << "1";
+		else os << "-";
+	}
+	return os;
+}
+
+
+
+void BitVector::pack(unsigned char* targ) const
+{
+	// Assumes MSB-first packing.
+	unsigned bytes = size()/8;
+	for (unsigned i=0; i<bytes; i++) {
+		targ[i] = peekField(i*8,8);
+	}
+	unsigned whole = bytes*8;
+	unsigned rem = size() - whole;
+	if (rem==0) return;
+	targ[bytes] = peekField(whole,rem) << (8-rem);
+}
+
+void BitVector::pack2(unsigned char* targ) const
+{
+    unsigned int i;
+    unsigned char curbyte = 0;
+
+    for (i = 0; i < size(); i++)
+    {
+        uint8_t bitnum = 7 - (i % 8);
+        curbyte |= ((char)bit(i) << bitnum);
+        if(i % 8 == 7){
+            *targ++ = curbyte;
+            curbyte = 0;
+        }
+    }
+
+	// Assumes MSB-first packing.
+//	unsigned bytes = size()/8;
+//	for (unsigned i=0; i<bytes; i++) {
+//		targ[i] = peekField(i*8,8);
+//	}
+//	unsigned whole = bytes*8;
+//	unsigned rem = size() - whole;
+//	if (rem==0) return;
+//	targ[bytes] = peekField(whole,rem) << (8-rem);
+}
+
+
+
+string BitVector::packToString() const
+{
+	string result;
+	result.reserve((size()+7)/8);
+	// Tempting to call this->pack(result.c_str()) but technically c_str() is read-only.
+	unsigned bytes = size()/8;
+	for (unsigned i=0; i<bytes; i++) {
+		result.push_back(peekField(i*8,8));
+	}
+	unsigned whole = bytes*8;
+	unsigned rem = size() - whole;
+	if (rem==0) return result;
+	result.push_back(peekField(whole,rem) << (8-rem));
+	return result;
+}
+
+
+void BitVector::unpack(const unsigned char* src)
+{
+	// Assumes MSB-first packing.
+	unsigned bytes = size()/8;
+	for (unsigned i=0; i<bytes; i++) {
+		fillField(i*8,src[i],8);
+	}
+	unsigned whole = bytes*8;
+	unsigned rem = size() - whole;
+	if (rem==0) return;
+        fillField(whole,src[bytes] >> (8-rem),rem);
+}
+
+void BitVector::hex(ostream& os) const
+{
+	os << std::hex;
+	unsigned digits = size()/4;
+	size_t wp=0;
+	for (unsigned i=0; i<digits; i++) {
+		os << readField(wp,4);
+	}
+	os << std::dec;
+}
+
+std::string BitVector::hexstr() const
+{
+	std::ostringstream ss;
+	hex(ss);
+	return ss.str();
+}
+
+
+bool BitVector::unhex(const char* src)
+{
+	// Assumes MSB-first packing.
+	unsigned int val;
+	unsigned digits = size()/4;
+	for (unsigned i=0; i<digits; i++) {
+		if (sscanf(src+i, "%1x", &val) < 1) {
+			return false;
+		}
+		fillField(i*4,val,4);
+	}
+	unsigned whole = digits*4;
+	unsigned rem = size() - whole;
+	if (rem>0) {
+		if (sscanf(src+digits, "%1x", &val) < 1) {
+			return false;
+		}
+		fillField(whole,val,rem);
+	}
+	return true;
+}
+
+bool BitVector::operator==(const BitVector &other) const
+{
+	unsigned l = size();
+	return l == other.size() && 0==memcmp(begin(),other.begin(),l);
+}
+
+void BitVector::copyPunctured(BitVector &dst, const unsigned *puncture, const size_t plth)
+{
+	assert(size() - plth == dst.size());
+	char *srcp = mStart;
+	char *dstp = dst.mStart;
+	const unsigned *pend = puncture + plth;
+	while (srcp < mEnd) {
+		if (puncture < pend) {
+			int n = (*puncture++) - (srcp - mStart);
+			assert(n >= 0);
+			for (int i = 0; i < n; i++) {
+				assert(srcp < mEnd && dstp < dst.mEnd);
+				*dstp++ = *srcp++;
+			}
+			srcp++;
+		} else {
+			while (srcp < mEnd) {
+				assert(dstp < dst.mEnd);
+				*dstp++ = *srcp++;
+			}
+		}
+	}
+	assert(dstp == dst.mEnd && puncture == pend);
+}
+
+void SoftVector::copyUnPunctured(SoftVector &dst, const unsigned *puncture, const size_t plth)
+{
+	assert(size() + plth == dst.size());
+	float *srcp = mStart;
+	float *dstp = dst.mStart;
+	const unsigned *pend = puncture + plth;
+	while (dstp < dst.mEnd) {
+		if (puncture < pend) {
+			int n = (*puncture++) - (dstp - dst.mStart);
+			assert(n >= 0);
+			for (int i = 0; i < n; i++) {
+				assert(srcp < mEnd && dstp < dst.mEnd);
+				*dstp++ = *srcp++;
+			}
+			*dstp++ = 0.5;
+		} else {
+			while (srcp < mEnd) {
+				assert(dstp < dst.mEnd);
+				*dstp++ = *srcp++;
+			}
+		}
+	}
+	assert(dstp == dst.mEnd && puncture == pend);
+}
+
+// vim: ts=4 sw=4
diff --git a/lib/decoding/openbts/BitVector.h b/lib/decoding/openbts/BitVector.h
new file mode 100644
index 0000000..c8782eb
--- /dev/null
+++ b/lib/decoding/openbts/BitVector.h
@@ -0,0 +1,432 @@
+/*
+ * Copyright 2008, 2009, 2014 Free Software Foundation, Inc.
+ * Copyright 2014 Range Networks, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This use of this software may be subject to additional restrictions.
+ * See the LEGAL file in the main directory for details.
+ */
+
+#ifndef BITVECTORS_H
+#define BITVECTORS_H
+
+#include "Vector.h"
+#include <stdint.h>
+#include <stdio.h>
+
+
+class BitVector;
+class SoftVector;
+
+
+
+
+/** Shift-register (LFSR) generator. */
+class Generator {
+
+	private:
+
+	uint64_t mCoeff;	///< polynomial coefficients. LSB is zero exponent.
+	uint64_t mState;	///< shift register state. LSB is most recent.
+	uint64_t mMask;		///< mask for reading state
+	unsigned mLen;		///< number of bits used in shift register
+	unsigned mLen_1;	///< mLen - 1
+
+	public:
+
+	Generator(uint64_t wCoeff, unsigned wLen)
+		:mCoeff(wCoeff),mState(0),
+		mMask((1ULL<<wLen)-1),
+		mLen(wLen),mLen_1(wLen-1)
+	{ assert(wLen<64); }
+
+	void clear() { mState=0; }
+
+	/**@name Accessors */
+	//@{
+	uint64_t state() const { return mState & mMask; }
+	unsigned size() const { return mLen; }
+	//@}
+
+	/**
+		Calculate one bit of a syndrome.
+		This is in the .h for inlining.
+	*/
+	void syndromeShift(unsigned inBit)
+	{
+		const unsigned fb = (mState>>(mLen_1)) & 0x01;
+		mState = (mState<<1) ^ (inBit & 0x01);
+		if (fb) mState ^= mCoeff;
+	}
+
+	/**
+		Update the generator state by one cycle.
+		This is in the .h for inlining.
+	*/
+	void encoderShift(unsigned inBit)
+	{
+		const unsigned fb = ((mState>>(mLen_1)) ^ inBit) & 0x01;
+		mState <<= 1;
+		if (fb) mState ^= mCoeff;
+	}
+
+
+};
+
+
+
+
+/** Parity (CRC-type) generator and checker based on a Generator. */
+class Parity : public Generator {
+
+	protected:
+
+	unsigned mCodewordSize;
+
+	public:
+
+	Parity(uint64_t wCoefficients, unsigned wParitySize, unsigned wCodewordSize)
+		:Generator(wCoefficients, wParitySize),
+		mCodewordSize(wCodewordSize)
+	{ }
+
+	/** Compute the parity word and write it into the target segment.  */
+	void writeParityWord(const BitVector& data, BitVector& parityWordTarget, bool invert=true);
+
+	/** Compute the syndrome of a received sequence. */
+	uint64_t syndrome(const BitVector& receivedCodeword);
+};
+
+
+// (pat) Nov 2013.  I rationalized the behavior of BitVector and added assertions to core dump code
+// that relied on the bad aspects of the original behavior.  See comments at VectorBase.
+class BitVector : public VectorBase<char>
+{
+	public:
+	/**@name Constructors. */
+	//@{
+
+	/**@name Casts of Vector constructors. */
+	BitVector(VectorDataType wData, char* wStart, char* wEnd) : VectorBase<char>(wData, wStart, wEnd) {}
+
+	// The one and only copy-constructor.
+	BitVector(const BitVector&other) : VectorBase<char>() {
+		VECTORDEBUG("BitVector(%p)",(void*)&other);
+		if (other.getData()) {
+			this->clone(other);
+		} else {
+			this->makeAlias(other);
+		}
+	}
+
+	// (pat) Removed default value for len and added 'explicit'.  Please do not remove 'explicit';
+	// it prevents auto-conversion of int to BitVector in constructors.
+	// Previous code was often ambiguous, especially for L3Frame and descendent constructors, leading to latent bugs.
+	explicit BitVector(size_t len) { this->vInit(len); }
+	BitVector() { this->vInit(0); }
+
+	/** Build a BitVector by concatenation. */
+	BitVector(const BitVector& other1, const BitVector& other2) : VectorBase<char>()
+	{
+		assert(this->getData() == 0);
+		this->vConcat(other1,other2);
+	}
+
+	/** Construct from a string of "0" and "1". */
+	// (pat) Characters that are not '0' or '1' map to '0'.
+	BitVector(const char* valString);
+	//@}
+
+	/**@name Casts and overrides of Vector operators. */
+	//@{
+	// (pat) Please DO NOT add a const anywhere in this method.  Use cloneSegment instead.
+	BitVector segment(size_t start, size_t span)
+	{
+		char* wStart = this->begin() + start;
+		char* wEnd = wStart + span;
+		assert(wEnd<=this->end());
+#if BITVECTOR_REFCNTS
+		return BitVector(mData,wStart,wEnd);
+#else
+		return BitVector(NULL,wStart,wEnd);
+#endif
+	}
+
+	// (pat) Historically the BitVector segment method had const and non-const versions with different behavior.
+	// I changed the name of the const version to cloneSegment and replaced all uses throughout OpenBTS.
+	const BitVector cloneSegment(size_t start, size_t span) const
+	{
+		BitVector seg = const_cast<BitVector*>(this)->segment(start,span);
+		// (pat) We are depending on the Return Value Optimization not to invoke the copy-constructor on the result,
+		// which would result in its immediate destruction while we are still using it.
+		BitVector result;
+		result.clone(seg);
+		return result;
+	}
+
+	BitVector alias() const {
+		return const_cast<BitVector*>(this)->segment(0,size());
+	}
+
+	BitVector head(size_t span) { return segment(0,span); }
+	BitVector tail(size_t start) { return segment(start,size()-start); }
+
+	// (pat) Please do NOT put the const version of head and tail back in, because historically they were messed up.
+	// Use cloneSegment instead.
+	//const BitVector head(size_t span) const { return segment(0,span); }
+	//const BitVector tail(size_t start) const { return segment(start,size()-start); }
+	//@}
+
+
+	void zero() { fill(0); }
+
+	/**@name FEC operations. */
+	//@{
+	/** Calculate the syndrome of the vector with the given Generator. */
+	uint64_t syndrome(Generator& gen) const;
+	/** Calculate the parity word for the vector with the given Generator. */
+	uint64_t parity(Generator& gen) const;
+	//@}
+
+
+	/** Invert 0<->1. */
+	void invert();
+
+	/**@name Byte-wise operations. */
+	//@{
+	/** Reverse an 8-bit vector. */
+	void reverse8();
+	/** Reverse groups of 8 within the vector (byte reversal). */
+	void LSB8MSB();
+	//@}
+
+	/**@name Serialization and deserialization. */
+	//@{
+	uint64_t peekField(size_t readIndex, unsigned length) const;
+	uint64_t peekFieldReversed(size_t readIndex, unsigned length) const;
+	uint64_t readField(size_t& readIndex, unsigned length) const;
+	uint64_t readFieldReversed(size_t& readIndex, unsigned length) const;
+	void fillField(size_t writeIndex, uint64_t value, unsigned length);
+	void fillFieldReversed(size_t writeIndex, uint64_t value, unsigned length);
+	void writeField(size_t& writeIndex, uint64_t value, unsigned length);
+	void writeFieldReversed(size_t& writeIndex, uint64_t value, unsigned length);
+	void write0(size_t& writeIndex) { writeField(writeIndex,0,1); }
+	void write1(size_t& writeIndex) { writeField(writeIndex,1,1); }
+
+	//@}
+
+	/** Sum of bits. */
+	unsigned sum() const;
+
+	/** Reorder bits, dest[i] = this[map[i]]. */
+	void map(const unsigned *map, size_t mapSize, BitVector& dest) const;
+
+	/** Reorder bits, dest[map[i]] = this[i]. */
+	void unmap(const unsigned *map, size_t mapSize, BitVector& dest) const;
+
+	/** Pack into a char array. */
+	void pack(unsigned char*) const;
+
+	/*  Roman: This is here for debugging */
+	void pack2(unsigned char*) const;
+
+	// Same as pack but return a string.
+	std::string packToString() const;
+
+	/** Unpack from a char array. */
+	void unpack(const unsigned char*);
+
+	/** Make a hexdump string. */
+	void hex(std::ostream&) const;
+	std::string hexstr() const;
+
+	/** Unpack from a hexdump string.
+	*  @returns true on success, false on error. */
+	bool unhex(const char*);
+
+	// For this method, 'other' should have been run through the copy-constructor already
+	// (unless it was newly created, ie foo.dup(L2Frame(...)), in which case we are screwed anyway)
+	// so the call to makeAlias is redundant.
+	// This only works if other is already an alias.
+	void dup(BitVector other) { assert(!this->getData()); makeAlias(other); assert(this->mStart == other.mStart); }
+	void dup(BitVector &other) { makeAlias(other); assert(this->mStart == other.mStart); }
+
+#if 0
+	void operator=(const BitVector& other) {
+		printf("BitVector::operator=\n");
+		assert(0);
+		//this->dup(other);
+	}
+#endif
+
+    bool operator==(const BitVector &other) const;
+
+	/** Copy to dst, not including those indexed in puncture. */
+	void copyPunctured(BitVector &dst, const unsigned *puncture, const size_t plth);
+
+	/** Index a single bit. */
+	// (pat) Cant have too many ways to do this, I guess.
+	bool bit(size_t index) const
+	{
+		// We put this code in .h for fast inlining.
+		const char *dp = this->begin()+index;
+		assert(dp<this->end());
+		return (*dp) & 0x01;
+	}
+
+	char& operator[](size_t index)
+	{
+		assert(this->mStart+index<this->mEnd);
+		return this->mStart[index];
+	}
+
+	const char& operator[](size_t index) const
+	{
+		assert(this->mStart+index<this->mEnd);
+		return this->mStart[index];
+	}
+
+	/** Set a bit */
+	void settfb(size_t index, int value)
+	{
+		char *dp = this->mStart+index;
+		assert(dp<this->mEnd);
+		*dp = value;
+	}
+
+	typedef char* iterator;
+	typedef const char* const_iterator;
+};
+
+// (pat) BitVector2 was an intermediate step in fixing BitVector but is no longer needed.
+#define BitVector2 BitVector
+
+
+std::ostream& operator<<(std::ostream&, const BitVector&);
+
+
+
+
+
+
+/**
+  The SoftVector class is used to represent a soft-decision signal.
+  Values 0..1 represent probabilities that a bit is "true".
+ */
+class SoftVector: public Vector<float> {
+
+	public:
+
+	/** Build a SoftVector of a given length. */
+	SoftVector(size_t wSize=0):Vector<float>(wSize) {}
+
+	/** Construct a SoftVector from a C string of "0", "1", and "X". */
+	SoftVector(const char* valString);
+
+	/** Construct a SoftVector from a BitVector. */
+	SoftVector(const BitVector& source);
+
+	/**
+		Wrap a SoftVector around a block of floats.
+		The block will be delete[]ed upon desctuction.
+	*/
+	SoftVector(float *wData, unsigned length)
+		:Vector<float>(wData,length)
+	{}
+
+	SoftVector(float* wData, float* wStart, float* wEnd)
+		:Vector<float>(wData,wStart,wEnd)
+	{ }
+
+	/**
+		Casting from a Vector<float>.
+		Note that this is NOT pass-by-reference.
+	*/
+	SoftVector(Vector<float> source)
+		:Vector<float>(source)
+	{}
+
+
+	/**@name Casts and overrides of Vector operators. */
+	//@{
+	SoftVector segment(size_t start, size_t span)
+	{
+		float* wStart = mStart + start;
+		float* wEnd = wStart + span;
+		assert(wEnd<=mEnd);
+		return SoftVector(NULL,wStart,wEnd);
+	}
+
+	SoftVector alias()
+		{ return segment(0,size()); }
+
+	const SoftVector segment(size_t start, size_t span) const
+		{ return (SoftVector)(Vector<float>::segment(start,span)); }
+
+	SoftVector head(size_t span) { return segment(0,span); }
+	const SoftVector head(size_t span) const { return segment(0,span); }
+	SoftVector tail(size_t start) { return segment(start,size()-start); }
+	const SoftVector tail(size_t start) const { return segment(start,size()-start); }
+	//@}
+
+	// (pat) How good is the SoftVector in the sense of the bits being solid?
+	// Result of 1 is perfect and 0 means all the bits were 0.5
+	// If plow is non-NULL, also return the lowest energy bit.
+	float getEnergy(float *low=0) const;
+	float getSNR() const;
+
+	/** Fill with "unknown" values. */
+	void unknown() { fill(0.5F); }
+
+	/** Return a hard bit value from a given index by slicing. */
+	bool bit(size_t index) const
+	{
+		const float *dp = mStart+index;
+		assert(dp<mEnd);
+		return (*dp)>0.5F;
+	}
+
+	/** Slice the whole signal into bits. */
+	BitVector sliced() const;
+
+	/** Copy to dst, adding in 0.5 for those indexed in puncture. */
+	void copyUnPunctured(SoftVector &dst, const unsigned *puncture, const size_t plth);
+
+	/** Return a soft bit. */
+	float softbit(size_t index) const
+	{
+		const float *dp = mStart+index;
+		assert(dp<mEnd);
+		return *dp;
+	}
+
+	/** Set a soft bit */
+	void settfb(size_t index, float value)
+	{
+		float *dp = mStart+index;
+		assert(dp<mEnd);
+		*dp = value;
+	}
+};
+
+
+
+std::ostream& operator<<(std::ostream&, const SoftVector&);
+
+
+
+
+#endif
+// vim: ts=4 sw=4
diff --git a/lib/decoding/openbts/GSM503Tables.cpp b/lib/decoding/openbts/GSM503Tables.cpp
new file mode 100644
index 0000000..fb0f235
--- /dev/null
+++ b/lib/decoding/openbts/GSM503Tables.cpp
@@ -0,0 +1,326 @@
+/*
+ * Copyright 2012, 2014 Range Networks, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This use of this software may be subject to additional restrictions.
+ * See the LEGAL file in the main directory for details.
+ */
+
+#include "GSM503Tables.h"
+
+
+/*
+	This array encodes GSM 05.03 Table 7.
+*/
+const unsigned int GSM::gAMRBitOrderTCH_AFS12_2[244] = {
+	   0,   1,   2,   3,   4,   5,   6,   7,   8,   9,
+	  10,  11,  12,  13,  14,  23,  15,  16,  17,  18,
+	  19,  20,  21,  22,  24,  25,  26,  27,  28,  38,
+	 141,  39, 142,  40, 143,  41, 144,  42, 145,  43,
+	 146,  44, 147,  45, 148,  46, 149,  47,  97, 150,
+	 200,  48,  98, 151, 201,  49,  99, 152, 202,  86,
+	 136, 189, 239,  87, 137, 190, 240,  88, 138, 191,
+	 241,  91, 194,  92, 195,  93, 196,  94, 197,  95,
+	 198,  29,  30,  31,  32,  33,  34,  35,  50, 100,
+	 153, 203,  89, 139, 192, 242,  51, 101, 154, 204,
+	  55, 105, 158, 208,  90, 140, 193, 243,  59, 109,
+	 162, 212,  63, 113, 166, 216,  67, 117, 170, 220,
+	  36,  37,  54,  53,  52,  58,  57,  56,  62,  61,
+	  60,  66,  65,  64,  70,  69,  68, 104, 103, 102,
+	 108, 107, 106, 112, 111, 110, 116, 115, 114, 120,
+	 119, 118, 157, 156, 155, 161, 160, 159, 165, 164,
+	 163, 169, 168, 167, 173, 172, 171, 207, 206, 205,
+	 211, 210, 209, 215, 214, 213, 219, 218, 217, 223,
+	 222, 221,  73,  72,  71,  76,  75,  74,  79,  78,
+	  77,  82,  81,  80,  85,  84,  83, 123, 122, 121,
+	 126, 125, 124, 129, 128, 127, 132, 131, 130, 135,
+	 134, 133, 176, 175, 174, 179, 178, 177, 182, 181,
+	 180, 185, 184, 183, 188, 187, 186, 226, 225, 224,
+	 229, 228, 227, 232, 231, 230, 235, 234, 233, 238,
+	 237, 236,  96, 199
+};
+
+
+/*
+	This array encodes GSM 05.03 Table 8.
+*/
+const unsigned int GSM::gAMRBitOrderTCH_AFS10_2[204] = {
+	   7,   6,   5,   4,   3,   2,   1,   0,  16,  15,
+	  14,  13,  12,  11,  10,   9,   8,  26,  27,  28,
+	  29,  30,  31, 115, 116, 117, 118, 119, 120,  72,
+	  73, 161, 162,  65,  68,  69, 108, 111, 112, 154,
+	 157, 158, 197, 200, 201,  32,  33, 121, 122,  74,
+	  75, 163, 164,  66, 109, 155, 198,  19,  23,  21,
+	  22,  18,  17,  20,  24,  25,  37,  36,  35,  34,
+	  80,  79,  78,  77, 126, 125, 124, 123, 169, 168,
+	 167, 166,  70,  67,  71, 113, 110, 114, 159, 156,
+	 160, 202, 199, 203,  76, 165,  81,  82,  92,  91,
+	  93,  83,  95,  85,  84,  94, 101, 102,  96, 104,
+	  86, 103,  87,  97, 127, 128, 138, 137, 139, 129,
+	 141, 131, 130, 140, 147, 148, 142, 150, 132, 149,
+	 133, 143, 170, 171, 181, 180, 182, 172, 184, 174,
+	 173, 183, 190, 191, 185, 193, 175, 192, 176, 186,
+	  38,  39,  49,  48,  50,  40,  52,  42,  41,  51,
+	  58,  59,  53,  61,  43,  60,  44,  54, 194, 179,
+	 189, 196, 177, 195, 178, 187, 188, 151, 136, 146,
+	 153, 134, 152, 135, 144, 145, 105,  90, 100, 107,
+	  88, 106,  89,  98,  99,  62,  47,  57,  64,  45,
+	  63,  46,  55,  56
+};
+
+
+/*
+	This array encodes GSM 05.03 Table 9.
+*/
+const unsigned int GSM::gAMRBitOrderTCH_AFS7_95[159] = {
+	   8,   7,   6,   5,   4,   3,   2,  14,  16,   9,
+	  10,  12,  13,  15,  11,  17,  20,  22,  24,  23,
+	  19,  18,  21,  56,  88, 122, 154,  57,  89, 123,
+	 155,  58,  90, 124, 156,  52,  84, 118, 150,  53,
+	  85, 119, 151,  27,  93,  28,  94,  29,  95,  30,
+	  96,  31,  97,  61, 127,  62, 128,  63, 129,  59,
+	  91, 125, 157,  32,  98,  64, 130,   1,   0,  25,
+	  26,  33,  99,  34, 100,  65, 131,  66, 132,  54,
+	  86, 120, 152,  60,  92, 126, 158,  55,  87, 121,
+	 153, 117, 116, 115,  46,  78, 112, 144,  43,  75,
+	 109, 141,  40,  72, 106, 138,  36,  68, 102, 134,
+	 114, 149, 148, 147, 146,  83,  82,  81,  80,  51,
+	  50,  49,  48,  47,  45,  44,  42,  39,  35,  79,
+	  77,  76,  74,  71,  67, 113, 111, 110, 108, 105,
+	 101, 145, 143, 142, 140, 137, 133,  41,  73, 107,
+	 139,  37,  69, 103, 135,  38,  70, 104, 136
+};
+
+/*
+	This array encodes GSM 05.03 Table 10.
+*/
+const unsigned int GSM::gAMRBitOrderTCH_AFS7_4[148] = {
+	   0,   1,   2,   3,   4,   5,   6,   7,   8,   9,
+	  10,  11,  12,  13,  14,  15,  16,  26,  87,  27,
+	  88,  28,  89,  29,  90,  30,  91,  51,  80, 112,
+	 141,  52,  81, 113, 142,  54,  83, 115, 144,  55,
+	  84, 116, 145,  58, 119,  59, 120,  21,  22,  23,
+	  17,  18,  19,  31,  60,  92, 121,  56,  85, 117,
+	 146,  20,  24,  25,  50,  79, 111, 140,  57,  86,
+	 118, 147,  49,  78, 110, 139,  48,  77,  53,  82,
+	 114, 143, 109, 138,  47,  76, 108, 137,  32,  33,
+	  61,  62,  93,  94, 122, 123,  41,  42,  43,  44,
+	  45,  46,  70,  71,  72,  73,  74,  75, 102, 103,
+	 104, 105, 106, 107, 131, 132, 133, 134, 135, 136,
+	  34,  63,  95, 124,  35,  64,  96, 125,  36,  65,
+	  97, 126,  37,  66,  98, 127,  38,  67,  99, 128,
+	  39,  68, 100, 129,  40,  69, 101, 130
+};
+
+/*
+	This array encodes GSM 05.03 Table 11.
+*/
+const unsigned int GSM::gAMRBitOrderTCH_AFS6_7[134] = {
+	   0,   1,   4,   3,   5,   6,  13,   7,   2,   8,
+	   9,  11,  15,  12,  14,  10,  28,  82,  29,  83,
+	  27,  81,  26,  80,  30,  84,  16,  55, 109,  56,
+	 110,  31,  85,  57, 111,  48,  73, 102, 127,  32,
+	  86,  51,  76, 105, 130,  52,  77, 106, 131,  58,
+	 112,  33,  87,  19,  23,  53,  78, 107, 132,  21,
+	  22,  18,  17,  20,  24,  25,  50,  75, 104, 129,
+	  47,  72, 101, 126,  54,  79, 108, 133,  46,  71,
+	 100, 125, 128, 103,  74,  49,  45,  70,  99, 124,
+	  42,  67,  96, 121,  39,  64,  93, 118,  38,  63,
+	  92, 117,  35,  60,  89, 114,  34,  59,  88, 113,
+	  44,  69,  98, 123,  43,  68,  97, 122,  41,  66,
+	  95, 120,  40,  65,  94, 119,  37,  62,  91, 116,
+	  36,  61,  90, 115
+};
+
+/*
+	This array encodes GSM 05.03 Table 12.
+*/
+const unsigned int GSM::gAMRBitOrderTCH_AFS5_9[118] = {
+	   0,   1,   4,   5,   3,   6,   7,   2,  13,  15,
+	   8,   9,  11,  12,  14,  10,  16,  28,  74,  29,
+	  75,  27,  73,  26,  72,  30,  76,  51,  97,  50,
+	  71,  96, 117,  31,  77,  52,  98,  49,  70,  95,
+	 116,  53,  99,  32,  78,  33,  79,  48,  69,  94,
+	 115,  47,  68,  93, 114,  46,  67,  92, 113,  19,
+	  21,  23,  22,  18,  17,  20,  24, 111,  43,  89,
+	 110,  64,  65,  44,  90,  25,  45,  66,  91, 112,
+	  54, 100,  40,  61,  86, 107,  39,  60,  85, 106,
+	  36,  57,  82, 103,  35,  56,  81, 102,  34,  55,
+	  80, 101,  42,  63,  88, 109,  41,  62,  87, 108,
+	  38,  59,  84, 105,  37,  58,  83, 104
+};
+
+/*
+	This array encodes GSM 05.03 Table 13.
+*/
+const unsigned int GSM::gAMRBitOrderTCH_AFS5_15[103] = {
+	   7,   6,   5,   4,   3,   2,   1,   0,  15,  14,
+	  13,  12,  11,  10,   9,   8,  23,  24,  25,  26,
+	  27,  46,  65,  84,  45,  44,  43,  64,  63,  62,
+	  83,  82,  81, 102, 101, 100,  42,  61,  80,  99,
+	  28,  47,  66,  85,  18,  41,  60,  79,  98,  29,
+	  48,  67,  17,  20,  22,  40,  59,  78,  97,  21,
+	  30,  49,  68,  86,  19,  16,  87,  39,  38,  58,
+	  57,  77,  35,  54,  73,  92,  76,  96,  95,  36,
+	  55,  74,  93,  32,  51,  33,  52,  70,  71,  89,
+	  90,  31,  50,  69,  88,  37,  56,  75,  94,  34,
+	  53,  72,  91
+};
+
+/*
+	This array encodes GSM 05.03 Table 14.
+*/
+const unsigned int GSM::gAMRBitOrderTCH_AFS4_75[95] = {
+	   0,   1,   2,   3,   4,   5,   6,   7,   8,   9,
+	  10,  11,  12,  13,  14,  15,  23,  24,  25,  26,
+	  27,  28,  48,  49,  61,  62,  82,  83,  47,  46,
+	  45,  44,  81,  80,  79,  78,  17,  18,  20,  22,
+	  77,  76,  75,  74,  29,  30,  43,  42,  41,  40,
+	  38,  39,  16,  19,  21,  50,  51,  59,  60,  63,
+	  64,  72,  73,  84,  85,  93,  94,  32,  33,  35,
+	  36,  53,  54,  56,  57,  66,  67,  69,  70,  87,
+	  88,  90,  91,  34,  55,  68,  89,  37,  58,  71,
+	  92,  31,  52,  65,  86
+};
+
+/* GSM 05.03 3.9.4.4 */
+const unsigned int GSM::gAMRPuncturedTCH_AFS12_2[60] = {
+	321, 325, 329, 333, 337, 341, 345, 349, 353, 357, 361, 363, 365,
+	369, 373, 377, 379, 381, 385, 389, 393, 395, 397, 401, 405, 409,
+	411, 413, 417, 421, 425, 427, 429, 433, 437, 441, 443, 445, 449,
+	453, 457, 459, 461, 465, 469, 473, 475, 477, 481, 485, 489, 491,
+	493, 495, 497, 499, 501, 503, 505, 507
+};
+
+/* GSM 05.03 3.9.4.4 */
+const unsigned int GSM::gAMRPuncturedTCH_AFS10_2[194] = {
+	1, 4, 7, 10, 16, 19, 22, 28, 31, 34, 40, 43, 46, 52, 55, 58,
+	64, 67, 70, 76, 79, 82, 88, 91, 94, 100, 103, 106, 112, 115,
+	118, 124, 127, 130, 136, 139, 142, 148, 151, 154, 160, 163, 166,
+	172, 175, 178, 184, 187, 190, 196, 199, 202, 208, 211, 214, 220,
+	223, 226, 232, 235, 238, 244, 247, 250, 256, 259, 262, 268, 271,
+	274, 280, 283, 286, 292, 295, 298, 304, 307, 310, 316, 319, 322,
+	325, 328, 331, 334, 337, 340, 343, 346, 349, 352, 355, 358, 361,
+	364, 367, 370, 373, 376, 379, 382, 385, 388, 391, 394, 397, 400,
+	403, 406, 409, 412, 415, 418, 421, 424, 427, 430, 433, 436, 439,
+	442, 445, 448, 451, 454, 457, 460, 463, 466, 469, 472, 475, 478,
+	481, 484, 487, 490, 493, 496, 499, 502, 505, 508, 511, 514, 517,
+	520, 523, 526, 529, 532, 535, 538, 541, 544, 547, 550, 553, 556,
+	559, 562, 565, 568, 571, 574, 577, 580, 583, 586, 589, 592, 595,
+	598, 601, 604, 607, 609, 610, 613, 616, 619, 621, 622, 625, 627,
+	628, 631, 633, 634, 636, 637, 639, 640
+};
+
+/* GSM 05.03 3.9.4.4 */
+const unsigned int GSM::gAMRPuncturedTCH_AFS7_95[65] = {
+	1, 2, 4, 5, 8, 22, 70, 118, 166, 214, 262, 310, 317, 319, 325,
+	332, 334, 341, 343, 349, 356, 358, 365, 367, 373, 380, 382, 385,
+	389, 391, 397, 404, 406, 409, 413, 415, 421, 428, 430, 433, 437,
+	439, 445, 452, 454, 457, 461, 463, 469, 476, 478, 481, 485, 487,
+	490, 493, 500, 502, 503, 505, 506, 508, 509, 511, 512
+};
+
+/* GSM 05.03 3.9.4.4 */
+const unsigned int GSM::gAMRPuncturedTCH_AFS7_4[26] = {
+	0, 355, 361, 367, 373, 379, 385, 391, 397, 403, 409, 415, 421,
+	427, 433, 439, 445, 451, 457, 460, 463, 466, 468, 469, 471,
+	472
+};
+
+/* GSM 05.03 3.9.4.4 */
+const unsigned int GSM::gAMRPuncturedTCH_AFS6_7[128] = {
+	1, 3, 7, 11, 15, 27, 39, 55, 67, 79, 95, 107, 119, 135, 147,
+	159, 175, 187, 199, 215, 227, 239, 255, 267, 279, 287, 291, 295,
+	299, 303, 307, 311, 315, 319, 323, 327, 331, 335, 339, 343, 347,
+	351, 355, 359, 363, 367, 369, 371, 375, 377, 379, 383, 385, 387,
+	391, 393, 395, 399, 401, 403, 407, 409, 411, 415, 417, 419, 423,
+	425, 427, 431, 433, 435, 439, 441, 443, 447, 449, 451, 455, 457,
+	459, 463, 465, 467, 471, 473, 475, 479, 481, 483, 487, 489, 491,
+	495, 497, 499, 503, 505, 507, 511, 513, 515, 519, 521, 523, 527,
+	529, 531, 535, 537, 539, 543, 545, 547, 549, 551, 553, 555, 557,
+	559, 561, 563, 565, 567, 569, 571, 573, 575
+};
+
+/* GSM 05.03 3.9.4.4 */
+const unsigned int GSM::gAMRPuncturedTCH_AFS5_9[72] = {
+	0, 1, 3, 5, 7, 11, 15, 31, 47, 63, 79, 95, 111, 127, 143,
+	159, 175, 191, 207, 223, 239, 255, 271, 287, 303, 319, 327, 331,
+	335, 343, 347, 351, 359, 363, 367, 375, 379, 383, 391, 395, 399,
+	407, 411, 415, 423, 427, 431, 439, 443, 447, 455, 459, 463, 467,
+	471, 475, 479, 483, 487, 491, 495, 499, 503, 507, 509, 511, 512,
+	513, 515, 516, 517, 519
+};
+
+/* GSM 05.03 3.9.4.4 */
+const unsigned int GSM::gAMRPuncturedTCH_AFS5_15[117] = {
+	0, 4, 5, 9, 10, 14, 15, 20, 25, 30, 35, 40, 50, 60, 70,
+	80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200,
+	210, 220, 230, 240, 250, 260, 270, 280, 290, 300, 310, 315, 320,
+	325, 330, 334, 335, 340, 344, 345, 350, 354, 355, 360, 364, 365,
+	370, 374, 375, 380, 384, 385, 390, 394, 395, 400, 404, 405, 410,
+	414, 415, 420, 424, 425, 430, 434, 435, 440, 444, 445, 450, 454,
+	455, 460, 464, 465, 470, 474, 475, 480, 484, 485, 490, 494, 495,
+	500, 504, 505, 510, 514, 515, 520, 524, 525, 529, 530, 534, 535,
+	539, 540, 544, 545, 549, 550, 554, 555, 559, 560, 564
+};
+
+/* GSM 05.03 3.9.4.4 */
+const unsigned int GSM::gAMRPuncturedTCH_AFS4_75[87] = {
+	0, 1, 2, 4, 5, 7, 9, 15, 25, 35, 45, 55, 65, 75, 85, 95,
+	105, 115, 125, 135, 145, 155, 165, 175, 185, 195, 205, 215, 225,
+	235, 245, 255, 265, 275, 285, 295, 305, 315, 325, 335, 345, 355,
+	365, 375, 385, 395, 400, 405, 410, 415, 420, 425, 430, 435, 440,
+	445, 450, 455, 459, 460, 465, 470, 475, 479, 480, 485, 490, 495,
+	499, 500, 505, 509, 510, 515, 517, 519, 520, 522, 524, 525, 526,
+	527, 529, 530, 531, 532, 534
+};
+
+/* GSM 05.03 Tables 7-14 */
+const unsigned int *GSM::gAMRBitOrder[8] = {
+	GSM::gAMRBitOrderTCH_AFS12_2,
+	GSM::gAMRBitOrderTCH_AFS10_2,
+	GSM::gAMRBitOrderTCH_AFS7_95,
+	GSM::gAMRBitOrderTCH_AFS7_4,
+	GSM::gAMRBitOrderTCH_AFS6_7,
+	GSM::gAMRBitOrderTCH_AFS5_9,
+	GSM::gAMRBitOrderTCH_AFS5_15,
+	GSM::gAMRBitOrderTCH_AFS4_75
+};
+
+/* GSM 05.03 3.9.4.2 */
+const unsigned int GSM::gAMRKd[9] = {244, 204, 159, 148, 134, 118, 103, 95, 260};	// The last entry is for TCH_FS (GSM mode)
+
+/* GSM 05.03 3.9.4.2 */
+const unsigned int GSM::gAMRClass1ALth[8] = {81, 65, 75, 61, 55, 55, 49, 39};
+
+/* GSM 05.03 3.9.4.4 */
+const unsigned int GSM::gAMRTCHUCLth[8] = {508, 642, 513, 474, 576, 520, 565, 535};
+
+/* GSM 05.03 3.9.4.2 */
+const unsigned int GSM::gAMRPunctureLth[8] = {60, 194, 65, 26, 128, 72, 117, 87};
+
+/* GSM 05.03 3.9.4.4 */
+const unsigned int *GSM::gAMRPuncture[8] = {
+	GSM::gAMRPuncturedTCH_AFS12_2,
+	GSM::gAMRPuncturedTCH_AFS10_2,
+	GSM::gAMRPuncturedTCH_AFS7_95,
+	GSM::gAMRPuncturedTCH_AFS7_4,
+	GSM::gAMRPuncturedTCH_AFS6_7,
+	GSM::gAMRPuncturedTCH_AFS5_9,
+	GSM::gAMRPuncturedTCH_AFS5_15,
+	GSM::gAMRPuncturedTCH_AFS4_75
+};
+
+
diff --git a/lib/decoding/openbts/GSM503Tables.h b/lib/decoding/openbts/GSM503Tables.h
new file mode 100644
index 0000000..1fe405e
--- /dev/null
+++ b/lib/decoding/openbts/GSM503Tables.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2012, 2014 Range Networks, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This use of this software may be subject to additional restrictions.
+ * See the LEGAL file in the main directory for details.
+ */
+
+#ifndef GSM503TABLES_H
+#define GSM503TABLES_H
+
+
+
+namespace GSM {
+
+// don't change the positions in this enum
+// (pat) The first 8 values are used as indicies into numerous tables.
+// (pat) Encoder/decoder mode includes 8 modes for AMR + TCH_FS makes 9.
+// TODO: Add AFS_SID type.  And why is it not type 8?
+enum AMRMode {TCH_AFS12_2, TCH_AFS10_2, TCH_AFS7_95, TCH_AFS7_4, TCH_AFS6_7, TCH_AFS5_9, TCH_AFS5_15, TCH_AFS4_75, TCH_FS};
+
+/** Tables #7-14 from GSM 05.03 */
+extern const unsigned int gAMRBitOrderTCH_AFS12_2[244];
+extern const unsigned int gAMRBitOrderTCH_AFS10_2[204];
+extern const unsigned int gAMRBitOrderTCH_AFS7_95[159];
+extern const unsigned int gAMRBitOrderTCH_AFS7_4[148];
+extern const unsigned int gAMRBitOrderTCH_AFS6_7[134];
+extern const unsigned int gAMRBitOrderTCH_AFS5_9[118];
+extern const unsigned int gAMRBitOrderTCH_AFS5_15[103];
+extern const unsigned int gAMRBitOrderTCH_AFS4_75[95];
+
+/** GSM 05.03 3.9.4.4 */
+extern const unsigned int gAMRPuncturedTCH_AFS12_2[60];
+extern const unsigned int gAMRPuncturedTCH_AFS10_2[194];
+extern const unsigned int gAMRPuncturedTCH_AFS7_95[65];
+extern const unsigned int gAMRPuncturedTCH_AFS7_4[26];
+extern const unsigned int gAMRPuncturedTCH_AFS6_7[128];
+extern const unsigned int gAMRPuncturedTCH_AFS5_9[72];
+extern const unsigned int gAMRPuncturedTCH_AFS5_15[117];
+extern const unsigned int gAMRPuncturedTCH_AFS4_75[87];
+
+/* GSM 05.03 Tables 7-14 */
+extern const unsigned *gAMRBitOrder[8];
+
+/* GSM 05.03 3.9.4.2 */
+extern const unsigned gAMRKd[9];
+
+/* GSM 05.03 3.9.4.2 */
+extern const unsigned gAMRClass1ALth[8];
+
+/* GSM 05.03 3.9.4.4 */
+extern const unsigned gAMRTCHUCLth[8];
+
+/* GSM 05.03 3.9.4.2 */
+extern const unsigned gAMRPunctureLth[8];
+
+/* GSM 05.03 3.9.4.4 */
+extern const unsigned *gAMRPuncture[8];
+
+}
+
+
+#endif
diff --git a/lib/decoding/openbts/GSM610Tables.cpp b/lib/decoding/openbts/GSM610Tables.cpp
new file mode 100644
index 0000000..1b08496
--- /dev/null
+++ b/lib/decoding/openbts/GSM610Tables.cpp
@@ -0,0 +1,489 @@
+/*
+ * Copyright 2008 Free Software Foundation, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This use of this software may be subject to additional restrictions.
+ * See the LEGAL file in the main directory for details.
+ */
+
+#include "GSM610Tables.h"
+
+
+/*
+RFC 3551                    RTP A/V Profile                    July 2003
+
+
+   Octet  Bit 0   Bit 1   Bit 2   Bit 3   Bit 4   Bit 5   Bit 6   Bit 7
+   _____________________________________________________________________
+       0    1       1       0       1    LARc0.0 LARc0.1 LARc0.2 LARc0.3
+       1 LARc0.4 LARc0.5 LARc1.0 LARc1.1 LARc1.2 LARc1.3 LARc1.4 LARc1.5
+       2 LARc2.0 LARc2.1 LARc2.2 LARc2.3 LARc2.4 LARc3.0 LARc3.1 LARc3.2
+       3 LARc3.3 LARc3.4 LARc4.0 LARc4.1 LARc4.2 LARc4.3 LARc5.0 LARc5.1
+       4 LARc5.2 LARc5.3 LARc6.0 LARc6.1 LARc6.2 LARc7.0 LARc7.1 LARc7.2
+       5  Nc0.0   Nc0.1   Nc0.2   Nc0.3   Nc0.4   Nc0.5   Nc0.6  bc0.0
+       6  bc0.1   Mc0.0   Mc0.1  xmaxc00 xmaxc01 xmaxc02 xmaxc03 xmaxc04
+       7 xmaxc05 xmc0.0  xmc0.1  xmc0.2  xmc1.0  xmc1.1  xmc1.2  xmc2.0
+       8 xmc2.1  xmc2.2  xmc3.0  xmc3.1  xmc3.2  xmc4.0  xmc4.1  xmc4.2
+       9 xmc5.0  xmc5.1  xmc5.2  xmc6.0  xmc6.1  xmc6.2  xmc7.0  xmc7.1
+      10 xmc7.2  xmc8.0  xmc8.1  xmc8.2  xmc9.0  xmc9.1  xmc9.2  xmc10.0
+      11 xmc10.1 xmc10.2 xmc11.0 xmc11.1 xmc11.2 xmc12.0 xmc12.1 xcm12.2
+      12  Nc1.0   Nc1.1   Nc1.2   Nc1.3   Nc1.4   Nc1.5   Nc1.6   bc1.0
+      13  bc1.1   Mc1.0   Mc1.1  xmaxc10 xmaxc11 xmaxc12 xmaxc13 xmaxc14
+      14 xmax15  xmc13.0 xmc13.1 xmc13.2 xmc14.0 xmc14.1 xmc14.2 xmc15.0
+      15 xmc15.1 xmc15.2 xmc16.0 xmc16.1 xmc16.2 xmc17.0 xmc17.1 xmc17.2
+      16 xmc18.0 xmc18.1 xmc18.2 xmc19.0 xmc19.1 xmc19.2 xmc20.0 xmc20.1
+      17 xmc20.2 xmc21.0 xmc21.1 xmc21.2 xmc22.0 xmc22.1 xmc22.2 xmc23.0
+      18 xmc23.1 xmc23.2 xmc24.0 xmc24.1 xmc24.2 xmc25.0 xmc25.1 xmc25.2
+      19  Nc2.0   Nc2.1   Nc2.2   Nc2.3   Nc2.4   Nc2.5   Nc2.6   bc2.0
+      20  bc2.1   Mc2.0   Mc2.1  xmaxc20 xmaxc21 xmaxc22 xmaxc23 xmaxc24
+      21 xmaxc25 xmc26.0 xmc26.1 xmc26.2 xmc27.0 xmc27.1 xmc27.2 xmc28.0
+      22 xmc28.1 xmc28.2 xmc29.0 xmc29.1 xmc29.2 xmc30.0 xmc30.1 xmc30.2
+      23 xmc31.0 xmc31.1 xmc31.2 xmc32.0 xmc32.1 xmc32.2 xmc33.0 xmc33.1
+      24 xmc33.2 xmc34.0 xmc34.1 xmc34.2 xmc35.0 xmc35.1 xmc35.2 xmc36.0
+      25 Xmc36.1 xmc36.2 xmc37.0 xmc37.1 xmc37.2 xmc38.0 xmc38.1 xmc38.2
+      26  Nc3.0   Nc3.1   Nc3.2   Nc3.3   Nc3.4   Nc3.5   Nc3.6   bc3.0
+      27  bc3.1   Mc3.0   Mc3.1  xmaxc30 xmaxc31 xmaxc32 xmaxc33 xmaxc34
+      28 xmaxc35 xmc39.0 xmc39.1 xmc39.2 xmc40.0 xmc40.1 xmc40.2 xmc41.0
+      29 xmc41.1 xmc41.2 xmc42.0 xmc42.1 xmc42.2 xmc43.0 xmc43.1 xmc43.2
+      30 xmc44.0 xmc44.1 xmc44.2 xmc45.0 xmc45.1 xmc45.2 xmc46.0 xmc46.1
+      31 xmc46.2 xmc47.0 xmc47.1 xmc47.2 xmc48.0 xmc48.1 xmc48.2 xmc49.0
+      32 xmc49.1 xmc49.2 xmc50.0 xmc50.1 xmc50.2 xmc51.0 xmc51.1 xmc51.2
+
+                        Table 3: GSM payload format
+*/
+
+
+/*
+	This file encodes a mapping between
+	GSM 05.03 Table 2 and RFC-3551 Table 3.
+*/
+
+/*
+	Naming convention:
+	xxx_p	position (bit index)
+	xxx_l	length (bit field length)
+	LAR	log area ratio
+	N	LTP lag
+	b	LTP gain
+	M	grid
+	Xmax	block amplitude
+	x	RPE pulses
+*/
+
+
+/**@name Lengths of GSM 06.10 fields */
+//@{
+const unsigned int LAR1_l=6;	///< log area ratio
+const unsigned int LAR2_l=6;	///< log area ratio
+const unsigned int LAR3_l=5;	///< log area ratio
+const unsigned int LAR4_l=5;	///< log area ratio
+const unsigned int LAR5_l=4;	///< log area ratio
+const unsigned int LAR6_l=4;	///< log area ratio
+const unsigned int LAR7_l=3;	///< log area ratio
+const unsigned int LAR8_l=3;	///< log area ratio
+const unsigned int N_l=7;	///< LTP lag
+const unsigned int b_l=2;	///< LTP gain
+const unsigned int M_l=2;	///< grid position
+const unsigned int Xmax_l=6;	///< block amplitude
+const unsigned int x_l=3;	///< RPE pulses
+//@}
+
+
+
+/*@name Indecies of GSM 06.10 fields as they appear in RFC-3551 Table 3. */
+//@{
+
+/**@name Log area ratios, apply to whole frame. */
+//@{
+const unsigned int LAR1_p = 0;
+const unsigned int LAR2_p = LAR1_p + LAR1_l;
+const unsigned int LAR3_p = LAR2_p + LAR2_l;
+const unsigned int LAR4_p = LAR3_p + LAR3_l;
+const unsigned int LAR5_p = LAR4_p + LAR4_l;
+const unsigned int LAR6_p = LAR5_p + LAR5_l;
+const unsigned int LAR7_p = LAR6_p + LAR6_l;
+const unsigned int LAR8_p = LAR7_p + LAR7_l;
+//@}
+/**@name Subframe 1 */
+//@{
+const unsigned int N1_p = LAR8_p + LAR8_l;
+const unsigned int b1_p = N1_p + N_l;
+const unsigned int M1_p = b1_p + b_l;
+const unsigned int Xmax1_p = M1_p + M_l;
+const unsigned int x1_0_p = Xmax1_p + Xmax_l;
+const unsigned int x1_1_p = x1_0_p + x_l;
+const unsigned int x1_2_p = x1_1_p + x_l;
+const unsigned int x1_3_p = x1_2_p + x_l;
+const unsigned int x1_4_p = x1_3_p + x_l;
+const unsigned int x1_5_p = x1_4_p + x_l;
+const unsigned int x1_6_p = x1_5_p + x_l;
+const unsigned int x1_7_p = x1_6_p + x_l;
+const unsigned int x1_8_p = x1_7_p + x_l;
+const unsigned int x1_9_p = x1_8_p + x_l;
+const unsigned int x1_10_p = x1_9_p + x_l;
+const unsigned int x1_11_p = x1_10_p + x_l;
+const unsigned int x1_12_p = x1_11_p + x_l;
+//@}
+/**@name Subframe 2 */
+//@{
+const unsigned int N2_p = x1_12_p + x_l;
+const unsigned int b2_p = N2_p + N_l;
+const unsigned int M2_p = b2_p + b_l;
+const unsigned int Xmax2_p = M2_p + M_l;
+const unsigned int x2_0_p = Xmax2_p + Xmax_l;
+const unsigned int x2_1_p = x2_0_p + x_l;
+const unsigned int x2_2_p = x2_1_p + x_l;
+const unsigned int x2_3_p = x2_2_p + x_l;
+const unsigned int x2_4_p = x2_3_p + x_l;
+const unsigned int x2_5_p = x2_4_p + x_l;
+const unsigned int x2_6_p = x2_5_p + x_l;
+const unsigned int x2_7_p = x2_6_p + x_l;
+const unsigned int x2_8_p = x2_7_p + x_l;
+const unsigned int x2_9_p = x2_8_p + x_l;
+const unsigned int x2_10_p = x2_9_p + x_l;
+const unsigned int x2_11_p = x2_10_p + x_l;
+const unsigned int x2_12_p = x2_11_p + x_l;
+//@}
+/**@mame Subframe 3 */
+//@{
+const unsigned int N3_p = x2_12_p + x_l;
+const unsigned int b3_p = N3_p + N_l;
+const unsigned int M3_p = b3_p + b_l;
+const unsigned int Xmax3_p = M3_p + M_l;
+const unsigned int x3_0_p = Xmax3_p + Xmax_l;
+const unsigned int x3_1_p = x3_0_p + x_l;
+const unsigned int x3_2_p = x3_1_p + x_l;
+const unsigned int x3_3_p = x3_2_p + x_l;
+const unsigned int x3_4_p = x3_3_p + x_l;
+const unsigned int x3_5_p = x3_4_p + x_l;
+const unsigned int x3_6_p = x3_5_p + x_l;
+const unsigned int x3_7_p = x3_6_p + x_l;
+const unsigned int x3_8_p = x3_7_p + x_l;
+const unsigned int x3_9_p = x3_8_p + x_l;
+const unsigned int x3_10_p = x3_9_p + x_l;
+const unsigned int x3_11_p = x3_10_p + x_l;
+const unsigned int x3_12_p = x3_11_p + x_l;
+//@}
+/**@name Subframe 4 */
+//@{
+const unsigned int N4_p = x3_12_p + x_l;
+const unsigned int b4_p = N4_p + N_l;
+const unsigned int M4_p = b4_p + b_l;
+const unsigned int Xmax4_p = M4_p + M_l;
+const unsigned int x4_0_p = Xmax4_p + Xmax_l;
+const unsigned int x4_1_p = x4_0_p + x_l;
+const unsigned int x4_2_p = x4_1_p + x_l;
+const unsigned int x4_3_p = x4_2_p + x_l;
+const unsigned int x4_4_p = x4_3_p + x_l;
+const unsigned int x4_5_p = x4_4_p + x_l;
+const unsigned int x4_6_p = x4_5_p + x_l;
+const unsigned int x4_7_p = x4_6_p + x_l;
+const unsigned int x4_8_p = x4_7_p + x_l;
+const unsigned int x4_9_p = x4_8_p + x_l;
+const unsigned int x4_10_p = x4_9_p + x_l;
+const unsigned int x4_11_p = x4_10_p + x_l;
+const unsigned int x4_12_p = x4_11_p + x_l;
+//@}
+//@}
+
+
+/*
+	This array encodes GSM 05.03 Table 2.
+	It's also GSM 06.10 Table A2.1a.
+	This is the order of bits as they appear in
+	the d[] bits of the GSM TCH/F.
+	RTP[4+g610BitOrder[i]] <=> GSM[i]
+*/
+unsigned int GSM::g610BitOrder[260] = {
+/**@name importance class 1 */
+//@{
+/** LAR1:5 */	LAR1_p+LAR1_l-1-5, 		/* bit 0 */
+/** Xmax1:5 */	Xmax1_p+Xmax_l-1-5,
+/** Xmax2:5 */	Xmax2_p+Xmax_l-1-5,
+/** Xmax3:5 */	Xmax3_p+Xmax_l-1-5,
+/** Xmax4:5 */	Xmax4_p+Xmax_l-1-5,
+//@}
+/**@name importance class 2 */
+//@{
+/** LAR1:4 */	LAR1_p+LAR1_l-1-4,
+/** LAR2:5 */	LAR2_p+LAR2_l-1-5,
+/** LAR3:4 */	LAR3_p+LAR3_l-1-4,
+//@}
+/**@name importance class 3 */
+//@{
+/** LAR1:3 */	LAR1_p+LAR1_l-1-3,
+/** LAR2:4 */	LAR2_p+LAR2_l-1-4,
+/** LAR3:3 */	LAR3_p+LAR3_l-1-3,		/* bit 10 */
+/** LAR4:4 */	LAR4_p+LAR4_l-1-4,
+/** N1:6 */	N1_p+N_l-1-6,
+/** N2:6 */	N2_p+N_l-1-6,
+/** N3:6 */	N3_p+N_l-1-6,
+/** N4:6 */	N4_p+N_l-1-6,
+/** Xmax1:4 */	Xmax1_p+Xmax_l-1-4,
+/** Xmax2:4 */	Xmax2_p+Xmax_l-1-4,
+/** Xmax3:4 */	Xmax3_p+Xmax_l-1-4,
+/** Xmax4:4 */	Xmax4_p+Xmax_l-1-4,
+/** LAR2:3 */	LAR2_p+LAR2_l-1-3,		/* bit 20 */
+/** LAR5:3 */	LAR5_p+LAR5_l-1-3,
+/** LAR6:3 */	LAR6_p+LAR6_l-1-3,
+/** N1:5 */	N1_p+N_l-1-5,
+/** N2:5 */	N2_p+N_l-1-5,
+/** N3:5 */	N3_p+N_l-1-5,
+/** N4:5 */	N4_p+N_l-1-5,
+/** N1:4 */	N1_p+N_l-1-4,
+/** N2:4 */	N2_p+N_l-1-4,
+/** N3:4 */	N3_p+N_l-1-4,
+/** N4:4 */	N4_p+N_l-1-4,			/* bit 30 */
+/** N1:3 */	N1_p+N_l-1-3,
+/** N2:3 */	N2_p+N_l-1-3,
+/** N3:3 */	N3_p+N_l-1-3,
+/** N4:3 */	N4_p+N_l-1-3,
+/** N1:2 */	N1_p+N_l-1-2,
+/** N2:2 */	N2_p+N_l-1-2,
+/** N3:2 */	N3_p+N_l-1-2,
+/** N4:2 */	N4_p+N_l-1-2,
+//@}
+/**@name importance class 4 */
+//@{
+/** Xmax1:3 */	Xmax1_p+Xmax_l-1-3,
+/** Xmax2:3 */	Xmax2_p+Xmax_l-1-3,		/* bit 40 */
+/** Xmax3:3 */	Xmax3_p+Xmax_l-1-3,
+/** Xmax4:3 */	Xmax4_p+Xmax_l-1-3,
+/** LAR1:2 */	LAR1_p+LAR1_l-1-2,
+/** LAR4:3 */	LAR4_p+LAR4_l-1-3,
+/** LAR7:2 */	LAR7_p+LAR7_l-1-2,
+/** N1:1 */	N1_p+N_l-1-1,
+/** N2:1 */	N2_p+N_l-1-1,
+/** N3:1 */	N3_p+N_l-1-1,
+/** N4:1 */	N4_p+N_l-1-1,
+/** LAR5:2 */	LAR5_p+LAR5_l-1-2,		/* bit 50 */
+/** LAR6:2 */	LAR6_p+LAR6_l-1-2,
+/** b1:1 */	b1_p+b_l-1-1,
+/** b2:1 */	b2_p+b_l-1-1,
+/** b3:1 */	b3_p+b_l-1-1,
+/** b4:1 */	b4_p+b_l-1-1,
+/** N1:0 */	N1_p+N_l-1-0,
+/** N2:0 */	N2_p+N_l-1-0,
+/** N3:0 */	N3_p+N_l-1-0,
+/** N4:0 */	N4_p+N_l-1-0,
+/** M1:1 */	M1_p+M_l-1-1,			/* bit 60 */
+/** M2:1 */	M2_p+M_l-1-1,
+/** M3:1 */	M3_p+M_l-1-1,
+/** M4:1 */	M4_p+M_l-1-1,
+//@}
+/**@name importance class 5 */
+//@{
+/** LAR1:1 */	LAR1_p+LAR1_l-1-1,
+/** LAR2:2 */	LAR2_p+LAR2_l-1-2,
+/** LAR3:2 */	LAR3_p+LAR3_l-1-2,
+/** LAR8:2 */	LAR8_p+LAR8_l-1-2,
+/** LAR4:2 */	LAR4_p+LAR4_l-1-2,
+/** LAR5:1 */	LAR5_p+LAR5_l-1-1,
+/** LAR7:1 */	LAR7_p+LAR7_l-1-1,		/* bit 70 */
+/** b1:0 */	b1_p+b_l-1-0,
+/** b2:0 */	b2_p+b_l-1-0,
+/** b3:0 */	b3_p+b_l-1-0,
+/** b4:0 */	b4_p+b_l-1-0,
+/** Xmax1:2 */	Xmax1_p+Xmax_l-1-2,
+/** Xmax2:2 */	Xmax2_p+Xmax_l-1-2,
+/** Xmax3:2 */	Xmax3_p+Xmax_l-1-2,
+/** Xmax4:2 */	Xmax4_p+Xmax_l-1-2,
+/** x1_0:2 */	x1_0_p+x_l-1-2,
+/** x1_1:2 */	x1_1_p+x_l-1-2,		/* bit 80 */
+/** x1_2:2 */	x1_2_p+x_l-1-2,
+/** x1_3:2 */	x1_3_p+x_l-1-2,
+/** x1_4:2 */	x1_4_p+x_l-1-2,
+/** x1_5:2 */	x1_5_p+x_l-1-2,
+/** x1_6:2 */	x1_6_p+x_l-1-2,
+/** x1_7:2 */	x1_7_p+x_l-1-2,
+/** x1_8:2 */	x1_8_p+x_l-1-2,
+/** x1_9:2 */	x1_9_p+x_l-1-2,
+/** x1_10:2 */	x1_10_p+x_l-1-2,
+/** x1_11:2 */	x1_11_p+x_l-1-2,		/* bit 90 */
+/** x1_12:2 */	x1_12_p+x_l-1-2,
+/** x2_0:2 */	x2_0_p+x_l-1-2,
+/** x2_1:2 */	x2_1_p+x_l-1-2,
+/** x2_2:2 */	x2_2_p+x_l-1-2,
+/** x2_3:2 */	x2_3_p+x_l-1-2,
+/** x2_4:2 */	x2_4_p+x_l-1-2,
+/** x2_5:2 */	x2_5_p+x_l-1-2,
+/** x2_6:2 */	x2_6_p+x_l-1-2,
+/** x2_7:2 */	x2_7_p+x_l-1-2,
+/** x2_8:2 */	x2_8_p+x_l-1-2,		/* bit 100 */
+/** x2_9:2 */	x2_9_p+x_l-1-2,
+/** x2_10:2 */	x2_10_p+x_l-1-2,
+/** x2_11:2 */	x2_11_p+x_l-1-2,
+/** x2_12:2 */	x2_12_p+x_l-1-2,
+/** x3_0:2 */	x3_0_p+x_l-1-2,
+/** x3_1:2 */	x3_1_p+x_l-1-2,
+/** x3_2:2 */	x3_2_p+x_l-1-2,
+/** x3_3:2 */	x3_3_p+x_l-1-2,
+/** x3_4:2 */	x3_4_p+x_l-1-2,
+/** x3_5:2 */	x3_5_p+x_l-1-2,		/* bit 110 */
+/** x3_6:2 */	x3_6_p+x_l-1-2,
+/** x3_7:2 */	x3_7_p+x_l-1-2,
+/** x3_8:2 */	x3_8_p+x_l-1-2,
+/** x3_9:2 */	x3_9_p+x_l-1-2,
+/** x3_10:2 */	x3_10_p+x_l-1-2,
+/** x3_11:2 */	x3_11_p+x_l-1-2,
+/** x3_12:2 */	x3_12_p+x_l-1-2,
+/** x4_0:2 */	x4_0_p+x_l-1-2,
+/** x4_1:2 */	x4_1_p+x_l-1-2,
+/** x4_2:2 */	x4_2_p+x_l-1-2,		/* bit 120 */
+/** x4_3:2 */	x4_3_p+x_l-1-2,
+/** x4_4:2 */	x4_4_p+x_l-1-2,
+/** x4_5:2 */	x4_5_p+x_l-1-2,
+/** x4_6:2 */	x4_6_p+x_l-1-2,
+/** x4_7:2 */	x4_7_p+x_l-1-2,
+/** x4_8:2 */	x4_8_p+x_l-1-2,
+/** x4_9:2 */	x4_9_p+x_l-1-2,
+/** x4_10:2 */	x4_10_p+x_l-1-2,
+/** x4_11:2 */	x4_11_p+x_l-1-2,
+/** x4_12:2 */	x4_12_p+x_l-1-2,		/* bit 130 */
+/** M1:0 */	M1_p+M_l-1-0,
+/** M2:0 */	M2_p+M_l-1-0,
+/** M3:0 */	M3_p+M_l-1-0,
+/** M4:0 */	M4_p+M_l-1-0,
+/** Xmax1:1 */	Xmax1_p+Xmax_l-1-1,
+/** Xmax2:1 */	Xmax2_p+Xmax_l-1-1,
+/** Xmax3:1 */	Xmax3_p+Xmax_l-1-1,
+/** Xmax4:1 */	Xmax4_p+Xmax_l-1-1,
+/** x1_0:1 */	x1_0_p+x_l-1-1,
+/** x1_1:1 */	x1_1_p+x_l-1-1,		/* bit 140 */
+/** x1_2:1 */	x1_2_p+x_l-1-1,
+/** x1_3:1 */	x1_3_p+x_l-1-1,
+/** x1_4:1 */	x1_4_p+x_l-1-1,
+/** x1_5:1 */	x1_5_p+x_l-1-1,
+/** x1_6:1 */	x1_6_p+x_l-1-1,
+/** x1_7:1 */	x1_7_p+x_l-1-1,
+/** x1_8:1 */	x1_8_p+x_l-1-1,
+/** x1_9:1 */	x1_9_p+x_l-1-1,
+/** x1_10:1 */	x1_10_p+x_l-1-1,
+/** x1_11:1 */	x1_11_p+x_l-1-1,		/* bit 150 */
+/** x1_12:1 */	x1_12_p+x_l-1-1,
+/** x2_0:1 */	x2_0_p+x_l-1-1,
+/** x2_1:1 */	x2_1_p+x_l-1-1,
+/** x2_2:1 */	x2_2_p+x_l-1-1,
+/** x2_3:1 */	x2_3_p+x_l-1-1,
+/** x2_4:1 */	x2_4_p+x_l-1-1,
+/** x2_5:1 */	x2_5_p+x_l-1-1,
+/** x2_6:1 */	x2_6_p+x_l-1-1,
+/** x2_7:1 */	x2_7_p+x_l-1-1,
+/** x2_8:1 */	x2_8_p+x_l-1-1,		/* bit 160 */
+/** x2_9:1 */	x2_9_p+x_l-1-1,
+/** x2_10:1 */	x2_10_p+x_l-1-1,
+/** x2_11:1 */	x2_11_p+x_l-1-1,
+/** x2_12:1 */	x2_12_p+x_l-1-1,
+/** x3_0:1 */	x3_0_p+x_l-1-1,
+/** x3_1:1 */	x3_1_p+x_l-1-1,
+/** x3_2:1 */	x3_2_p+x_l-1-1,
+/** x3_3:1 */	x3_3_p+x_l-1-1,
+/** x3_4:1 */	x3_4_p+x_l-1-1,
+/** x3_5:1 */	x3_5_p+x_l-1-1,		/* bit 170 */
+/** x3_6:1 */	x3_6_p+x_l-1-1,
+/** x3_7:1 */	x3_7_p+x_l-1-1,
+/** x3_8:1 */	x3_8_p+x_l-1-1,
+/** x3_9:1 */	x3_9_p+x_l-1-1,
+/** x3_10:1 */	x3_10_p+x_l-1-1,
+/** x3_11:1 */	x3_11_p+x_l-1-1,
+/** x3_12:1 */	x3_12_p+x_l-1-1,
+/** x4_0:1 */	x4_0_p+x_l-1-1,
+/** x4_1:1 */	x4_1_p+x_l-1-1,
+/** x4_2:1 */	x4_2_p+x_l-1-1,		/* bit 180 */
+/** x4_3:1 */	x4_3_p+x_l-1-1,
+//@}
+/**@name importance class 6 */
+//@{
+/** x4_4:1 */	x4_4_p+x_l-1-1,
+/** x4_5:1 */	x4_5_p+x_l-1-1,
+/** x4_6:1 */	x4_6_p+x_l-1-1,
+/** x4_7:1 */	x4_7_p+x_l-1-1,
+/** x4_8:1 */	x4_8_p+x_l-1-1,
+/** x4_9:1 */	x4_9_p+x_l-1-1,
+/** x4_10:1 */	x4_10_p+x_l-1-1,
+/** x4_11:1 */	x4_11_p+x_l-1-1,
+/** x4_12:1 */	x4_12_p+x_l-1-1,		/* bit 190 */
+/** LAR1:0 */	LAR1_p+LAR1_l-1-0,
+/** LAR2:1 */	LAR2_p+LAR2_l-1-1,
+/** LAR3:1 */	LAR3_p+LAR3_l-1-1,
+/** LAR6:1 */	LAR6_p+LAR6_l-1-1,
+/** LAR7:0 */	LAR7_p+LAR7_l-1-0,
+/** LAR8:1 */	LAR8_p+LAR8_l-1-1,
+/** LAR8:0 */	LAR8_p+LAR8_l-1-0,
+/** LAR3:0 */	LAR3_p+LAR3_l-1-0,
+/** LAR4:1 */	LAR4_p+LAR4_l-1-1,
+/** LAR4:0 */	LAR4_p+LAR4_l-1-0,
+/** LAR5:0 */	LAR5_p+LAR5_l-1-0,
+/** Xmax1:0 */	Xmax1_p+Xmax_l-1-0,
+/** Xmax2:0 */	Xmax2_p+Xmax_l-1-0,
+/** Xmax3:0 */	Xmax3_p+Xmax_l-1-0,
+/** Xmax4:0 */	Xmax4_p+Xmax_l-1-0,
+/** x1_0:0 */	x1_0_p+x_l-1-0,
+/** x1_1:0 */	x1_1_p+x_l-1-0,
+/** x1_2:0 */	x1_2_p+x_l-1-0,
+/** x1_3:0 */	x1_3_p+x_l-1-0,
+/** x1_4:0 */	x1_4_p+x_l-1-0,
+/** x1_5:0 */	x1_5_p+x_l-1-0,
+/** x1_6:0 */	x1_6_p+x_l-1-0,
+/** x1_7:0 */	x1_7_p+x_l-1-0,
+/** x1_8:0 */	x1_8_p+x_l-1-0,
+/** x1_9:0 */	x1_9_p+x_l-1-0,
+/** x1_10:0 */	x1_10_p+x_l-1-0,
+/** x1_11:0 */	x1_11_p+x_l-1-0,
+/** x1_12:0 */	x1_12_p+x_l-1-0,
+/** x2_0:0 */	x2_0_p+x_l-1-0,
+/** x2_1:0 */	x2_1_p+x_l-1-0,
+/** x2_2:0 */	x2_2_p+x_l-1-0,
+/** x2_3:0 */	x2_3_p+x_l-1-0,
+/** x2_4:0 */	x2_4_p+x_l-1-0,
+/** x2_5:0 */	x2_5_p+x_l-1-0,
+/** x2_6:0 */	x2_6_p+x_l-1-0,
+/** x2_7:0 */	x2_7_p+x_l-1-0,
+/** x2_8:0 */	x2_8_p+x_l-1-0,
+/** x2_9:0 */	x2_9_p+x_l-1-0,
+/** x2_10:0 */	x2_10_p+x_l-1-0,
+/** x2_11:0 */	x2_11_p+x_l-1-0,
+/** x2_12:0 */	x2_12_p+x_l-1-0,
+/** x3_0:0 */	x3_0_p+x_l-1-0,
+/** x3_1:0 */	x3_1_p+x_l-1-0,
+/** x3_2:0 */	x3_2_p+x_l-1-0,
+/** x3_3:0 */	x3_3_p+x_l-1-0,
+/** x3_4:0 */	x3_4_p+x_l-1-0,
+/** x3_5:0 */	x3_5_p+x_l-1-0,
+/** x3_6:0 */	x3_6_p+x_l-1-0,
+/** x3_7:0 */	x3_7_p+x_l-1-0,
+/** x3_8:0 */	x3_8_p+x_l-1-0,
+/** x3_9:0 */	x3_9_p+x_l-1-0,
+/** x3_10:0 */	x3_10_p+x_l-1-0,
+/** x3_11:0 */	x3_11_p+x_l-1-0,
+/** x3_12:0 */	x3_12_p+x_l-1-0,
+/** x4_0:0 */	x4_0_p+x_l-1-0,
+/** x4_1:0 */	x4_1_p+x_l-1-0,
+/** x4_2:0 */	x4_2_p+x_l-1-0,
+/** x4_3:0 */	x4_3_p+x_l-1-0,
+/** x4_4:0 */	x4_4_p+x_l-1-0,
+/** x4_5:0 */	x4_5_p+x_l-1-0,
+/** x4_6:0 */	x4_6_p+x_l-1-0,
+/** x4_7:0 */	x4_7_p+x_l-1-0,
+/** x4_8:0 */	x4_8_p+x_l-1-0,
+/** x4_9:0 */	x4_9_p+x_l-1-0,
+/** x4_10:0 */	x4_10_p+x_l-1-0,
+/** x4_11:0 */	x4_11_p+x_l-1-0,
+/** x4_12:0 */	x4_12_p+x_l-1-0,
+/** LAR2:0 */	LAR2_p+LAR2_l-1-0,
+/** LAR6:0 */	LAR6_p+LAR6_l-1-0
+//@}
+};
+
diff --git a/lib/decoding/openbts/GSM610Tables.h b/lib/decoding/openbts/GSM610Tables.h
new file mode 100644
index 0000000..0b8d64f
--- /dev/null
+++ b/lib/decoding/openbts/GSM610Tables.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2008 Free Software Foundation, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This use of this software may be subject to additional restrictions.
+ * See the LEGAL file in the main directory for details.
+ */
+
+#ifndef GSM610TABLES_H
+#define GSM610TABLES_H
+
+
+
+namespace GSM {
+
+/** Table #2 from GSM 05.03 */
+extern unsigned int g610BitOrder[260];
+
+}
+
+
+#endif
diff --git a/lib/decoding/openbts/GSM660Tables.cpp b/lib/decoding/openbts/GSM660Tables.cpp
new file mode 100644
index 0000000..e5687c3
--- /dev/null
+++ b/lib/decoding/openbts/GSM660Tables.cpp
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2010  Sylvain Munaut <tnt@246tNt.com>
+ * All Rights Reserved
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This use of this software may be subject to additional restrictions.
+ * See the LEGAL file in the main directory for details.
+ */
+
+/* EFR (GSM 06.60) importance bit ordering */
+
+#include "GSM660Tables.h"
+
+unsigned int GSM::g660BitOrder[260] = {
+	 38,  39,  40,  41,  42,  43,		/*   0 -> LTP-LAG 1: b8..b3 */
+	145, 146, 147, 148, 149, 150,		/*   6 -> LTP-LAG 3: b8..b3 */
+	 93,  94,				/*  12 -> LTP-LAG 2: b5..b4 */
+	200, 201,				/*  14 -> LTP-LAG 4: b5..b4 */
+	 47,					/*  16 -> LTP-GAIN 1: b3    */
+	 88,					/*  17 -> FCB-GAIN 1: b4    */
+	 99,					/*  18 -> LTP-GAIN 2: b3    */
+	140,					/*  19 -> FCB-GAIN 2: b4    */
+	 44,					/*  20 -> LTP-LAG 1: b2     */
+	151,					/*  21 -> LTP-LAG 3: b2     */
+	 95,					/*  22 -> LTP-LAG 2: b3     */
+	202,					/*  23 -> LTP-LAG 4: b3     */
+	  1,   2,				/*  24 -> LPC 1: b5..b4     */
+	  7,					/*  26 -> LPC 2: b7         */
+	  9,					/*  27 -> LPC 2: b5         */
+	 17,  18,				/*  28 -> LPC 3: b6..b5     */
+	 23,					/*  30 -> LPC 3: b0         */
+	 45,  46,				/*  31 -> LTP-LAG 1: b1..b0 */
+	152, 153,				/*  33 -> LTP-LAG 3: b1..b0 */
+	 96,					/*  35 -> LTP-LAG 2: b2     */
+	203,					/*  36 -> LTP-LAG 4: b2     */
+	  3,   4,				/*  37 -> LPC 1: b3..b2     */
+	 10,  11,				/*  39 -> LPC 2: b4..b3     */
+	 15,					/*  41 -> LPC 3: b8         */
+	  8,					/*  42 -> LPC 2: b6         */
+	  5,   6,				/*  43 -> LPC 1: b1..b0     */
+	 12,					/*  45 -> LPC 2: b2         */
+	 16,					/*  46 -> LPC 3: b7         */
+	 19,					/*  47 -> LPC 3: b4         */
+	 97,					/*  48 -> LTP-LAG 2: b1     */
+	204,					/*  49 -> LTP-LAG 4: b1     */
+	  0,					/*  50 -> LPC 1: b6         */
+	 13,  14,				/*  51 -> LPC 2: b1..b0     */
+	 20,					/*  53 -> LPC 3: b3         */
+	 24,  25,				/*  54 -> LPC 4: b7..b6     */
+	 27,					/*  56 -> LPC 4: b4         */
+	154,					/*  57 -> LTP-GAIN 3: b3    */
+	206,					/*  58 -> LTP-GAIN 4: b3    */
+	195,					/*  59 -> FCB-GAIN 3: b4    */
+	247,					/*  60 -> FCB-GAIN 4: b4    */
+	 89,					/*  61 -> FCB-GAIN 1: b3    */
+	141,					/*  62 -> FCB-GAIN 2: b3    */
+	196,					/*  63 -> FCB-GAIN 3: b3    */
+	248,					/*  64 -> FCB-GAIN 4: b3    */
+	252, 253, 254, 255, 256, 257, 258, 259,	/*  65 -> CRC-POLY: b7..b0  */
+	 48,					/*  73 -> LTP-GAIN 1: b2    */
+	100,					/*  74 -> LTP-GAIN 2: b2    */
+	155,					/*  75 -> LTP-GAIN 3: b2    */
+	207,					/*  76 -> LTP-GAIN 4: b2    */
+	 21,  22,				/*  77 -> LPC 3: b2..b1     */
+	 26,					/*  79 -> LPC 4: b5         */
+	 28,					/*  80 -> LPC 4: b3         */
+	 51,					/*  81 -> PULSE 1_1: b3     */
+	 55,					/*  82 -> PULSE 1_2: b3     */
+	 59,					/*  83 -> PULSE 1_3: b3     */
+	 63,					/*  84 -> PULSE 1_4: b3     */
+	 67,					/*  85 -> PULSE 1_5: b3     */
+	103,					/*  86 -> PULSE 2_1: b3     */
+	107,					/*  87 -> PULSE 2_2: b3     */
+	111,					/*  88 -> PULSE 2_3: b3     */
+	115,					/*  89 -> PULSE 2_4: b3     */
+	119,					/*  90 -> PULSE 2_5: b3     */
+	158,					/*  91 -> PULSE 3_1: b3     */
+	162,					/*  92 -> PULSE 3_2: b3     */
+	166,					/*  93 -> PULSE 3_3: b3     */
+	170,					/*  94 -> PULSE 3_4: b3     */
+	174,					/*  95 -> PULSE 3_5: b3     */
+	210,					/*  96 -> PULSE 4_1: b3     */
+	214,					/*  97 -> PULSE 4_2: b3     */
+	218,					/*  98 -> PULSE 4_3: b3     */
+	222,					/*  99 -> PULSE 4_4: b3     */
+	226,					/* 100 -> PULSE 4_5: b3     */
+	 90,					/* 101 -> FCB-GAIN 1: b2    */
+	142,					/* 102 -> FCB-GAIN 2: b2    */
+	197,					/* 103 -> FCB-GAIN 3: b2    */
+	249,					/* 104 -> FCB-GAIN 4: b2    */
+	 49,					/* 105 -> LTP-GAIN 1: b1    */
+	101,					/* 106 -> LTP-GAIN 2: b1    */
+	156,					/* 107 -> LTP-GAIN 3: b1    */
+	208,					/* 108 -> LTP-GAIN 4: b1    */
+	 29,  30,  31,				/* 109 -> LPC 4: b2..b0     */
+	 32,  33,  34,  35,			/* 112 -> LPC 5: b5..b2     */
+	 98,					/* 116 -> LTP-LAG 2: b0     */
+	205,					/* 117 -> LTP-LAG 4: b0     */
+	 52,					/* 118 -> PULSE 1_1: b2     */
+	 56,					/* 119 -> PULSE 1_2: b2     */
+	 60,					/* 120 -> PULSE 1_3: b2     */
+	 64,					/* 121 -> PULSE 1_4: b2     */
+	 68,					/* 122 -> PULSE 1_5: b2     */
+	104,					/* 123 -> PULSE 2_1: b2     */
+	108,					/* 124 -> PULSE 2_2: b2     */
+	112,					/* 125 -> PULSE 2_3: b2     */
+	116,					/* 126 -> PULSE 2_4: b2     */
+	120,					/* 127 -> PULSE 2_5: b2     */
+	159,					/* 128 -> PULSE 3_1: b2     */
+	163,					/* 129 -> PULSE 3_2: b2     */
+	167,					/* 130 -> PULSE 3_3: b2     */
+	171,					/* 131 -> PULSE 3_4: b2     */
+	175,					/* 132 -> PULSE 3_5: b2     */
+	211,					/* 133 -> PULSE 4_1: b2     */
+	215,					/* 134 -> PULSE 4_2: b2     */
+	219,					/* 135 -> PULSE 4_3: b2     */
+	223,					/* 136 -> PULSE 4_4: b2     */
+	227,					/* 137 -> PULSE 4_5: b2     */
+	 53,					/* 138 -> PULSE 1_1: b1     */
+	 57,					/* 139 -> PULSE 1_2: b1     */
+	 61,					/* 140 -> PULSE 1_3: b1     */
+	 65,					/* 141 -> PULSE 1_4: b1     */
+	105,					/* 142 -> PULSE 2_1: b1     */
+	109,					/* 143 -> PULSE 2_2: b1     */
+	113,					/* 144 -> PULSE 2_3: b1     */
+	117,					/* 145 -> PULSE 2_4: b1     */
+	160,					/* 146 -> PULSE 3_1: b1     */
+	164,					/* 147 -> PULSE 3_2: b1     */
+	168,					/* 148 -> PULSE 3_3: b1     */
+	172,					/* 149 -> PULSE 3_4: b1     */
+	212,					/* 150 -> PULSE 4_1: b1     */
+	220,					/* 151 -> PULSE 4_3: b1     */
+	224,					/* 152 -> PULSE 4_4: b1     */
+	 91,					/* 153 -> FCB-GAIN 1: b1    */
+	143,					/* 154 -> FCB-GAIN 2: b1    */
+	198,					/* 155 -> FCB-GAIN 3: b1    */
+	250,					/* 156 -> FCB-GAIN 4: b1    */
+	 50,					/* 157 -> LTP-GAIN 1: b0    */
+	102,					/* 158 -> LTP-GAIN 2: b0    */
+	157,					/* 159 -> LTP-GAIN 3: b0    */
+	209,					/* 160 -> LTP-GAIN 4: b0    */
+	 92,					/* 161 -> FCB-GAIN 1: b0    */
+	144,					/* 162 -> FCB-GAIN 2: b0    */
+	199,					/* 163 -> FCB-GAIN 3: b0    */
+	251,					/* 164 -> FCB-GAIN 4: b0    */
+	 54,					/* 165 -> PULSE 1_1: b0     */
+	 58,					/* 166 -> PULSE 1_2: b0     */
+	 62,					/* 167 -> PULSE 1_3: b0     */
+	 66,					/* 168 -> PULSE 1_4: b0     */
+	106,					/* 169 -> PULSE 2_1: b0     */
+	110,					/* 170 -> PULSE 2_2: b0     */
+	114,					/* 171 -> PULSE 2_3: b0     */
+	118,					/* 172 -> PULSE 2_4: b0     */
+	161,					/* 173 -> PULSE 3_1: b0     */
+	165,					/* 174 -> PULSE 3_2: b0     */
+	169,					/* 175 -> PULSE 3_3: b0     */
+	173,					/* 176 -> PULSE 3_4: b0     */
+	213,					/* 177 -> PULSE 4_1: b0     */
+	221,					/* 178 -> PULSE 4_3: b0     */
+	225,					/* 179 -> PULSE 4_4: b0     */
+	 36,  37,				/* 180 -> LPC 5: b1..b0     */
+	 69,					/* 182 -> PULSE 1_5: b1     */
+	 71,  72,				/* 183 -> PULSE 1_5: b1..b1 */
+	121,					/* 185 -> PULSE 2_5: b1     */
+	123, 124,				/* 186 -> PULSE 2_5: b1..b1 */
+	176,					/* 188 -> PULSE 3_5: b1     */
+	178, 179,				/* 189 -> PULSE 3_5: b1..b1 */
+	228,					/* 191 -> PULSE 4_5: b1     */
+	230, 231,				/* 192 -> PULSE 4_5: b1..b1 */
+	216, 217,				/* 194 -> PULSE 4_2: b1..b0 */
+	 70,					/* 196 -> PULSE 1_5: b0     */
+	122,					/* 197 -> PULSE 2_5: b0     */
+	177,					/* 198 -> PULSE 3_5: b0     */
+	229,					/* 199 -> PULSE 4_5: b0     */
+	 73,					/* 200 -> PULSE 1_6: b2     */
+	 76,					/* 201 -> PULSE 1_7: b2     */
+	 79,					/* 202 -> PULSE 1_8: b2     */
+	 82,					/* 203 -> PULSE 1_9: b2     */
+	 85,					/* 204 -> PULSE 1_10: b2    */
+	125,					/* 205 -> PULSE 2_6: b2     */
+	128,					/* 206 -> PULSE 2_7: b2     */
+	131,					/* 207 -> PULSE 2_8: b2     */
+	134,					/* 208 -> PULSE 2_9: b2     */
+	137,					/* 209 -> PULSE 2_10: b2    */
+	180,					/* 210 -> PULSE 3_6: b2     */
+	183,					/* 211 -> PULSE 3_7: b2     */
+	186,					/* 212 -> PULSE 3_8: b2     */
+	189,					/* 213 -> PULSE 3_9: b2     */
+	192,					/* 214 -> PULSE 3_10: b2    */
+	232,					/* 215 -> PULSE 4_6: b2     */
+	235,					/* 216 -> PULSE 4_7: b2     */
+	238,					/* 217 -> PULSE 4_8: b2     */
+	241,					/* 218 -> PULSE 4_9: b2     */
+	244,					/* 219 -> PULSE 4_10: b2    */
+	 74,					/* 220 -> PULSE 1_6: b1     */
+	 77,					/* 221 -> PULSE 1_7: b1     */
+	 80,					/* 222 -> PULSE 1_8: b1     */
+	 83,					/* 223 -> PULSE 1_9: b1     */
+	 86,					/* 224 -> PULSE 1_10: b1    */
+	126,					/* 225 -> PULSE 2_6: b1     */
+	129,					/* 226 -> PULSE 2_7: b1     */
+	132,					/* 227 -> PULSE 2_8: b1     */
+	135,					/* 228 -> PULSE 2_9: b1     */
+	138,					/* 229 -> PULSE 2_10: b1    */
+	181,					/* 230 -> PULSE 3_6: b1     */
+	184,					/* 231 -> PULSE 3_7: b1     */
+	187,					/* 232 -> PULSE 3_8: b1     */
+	190,					/* 233 -> PULSE 3_9: b1     */
+	193,					/* 234 -> PULSE 3_10: b1    */
+	233,					/* 235 -> PULSE 4_6: b1     */
+	236,					/* 236 -> PULSE 4_7: b1     */
+	239,					/* 237 -> PULSE 4_8: b1     */
+	242,					/* 238 -> PULSE 4_9: b1     */
+	245,					/* 239 -> PULSE 4_10: b1    */
+	 75,					/* 240 -> PULSE 1_6: b0     */
+	 78,					/* 241 -> PULSE 1_7: b0     */
+	 81,					/* 242 -> PULSE 1_8: b0     */
+	 84,					/* 243 -> PULSE 1_9: b0     */
+	 87,					/* 244 -> PULSE 1_10: b0    */
+	127,					/* 245 -> PULSE 2_6: b0     */
+	130,					/* 246 -> PULSE 2_7: b0     */
+	133,					/* 247 -> PULSE 2_8: b0     */
+	136,					/* 248 -> PULSE 2_9: b0     */
+	139,					/* 249 -> PULSE 2_10: b0    */
+	182,					/* 250 -> PULSE 3_6: b0     */
+	185,					/* 251 -> PULSE 3_7: b0     */
+	188,					/* 252 -> PULSE 3_8: b0     */
+	191,					/* 253 -> PULSE 3_9: b0     */
+	194,					/* 254 -> PULSE 3_10: b0    */
+	234,					/* 255 -> PULSE 4_6: b0     */
+	237,					/* 256 -> PULSE 4_7: b0     */
+	240,					/* 257 -> PULSE 4_8: b0     */
+	243,					/* 258 -> PULSE 4_9: b0     */
+	246,					/* 259 -> PULSE 4_10: b0    */
+};
diff --git a/lib/decoding/openbts/GSM660Tables.h b/lib/decoding/openbts/GSM660Tables.h
new file mode 100644
index 0000000..9052c3b
--- /dev/null
+++ b/lib/decoding/openbts/GSM660Tables.h
@@ -0,0 +1,34 @@
+/* EFR (GSM 06.60) importance bit ordering */
+
+/*
+ * Copyright 2010  Sylvain Munaut <tnt@246tNt.com>
+ * All Rights Reserved
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This use of this software may be subject to additional restrictions.
+ * See the LEGAL file in the main directory for details.
+ */
+
+#ifndef GSM660TABLES_H
+#define GSM660TABLES_H
+
+namespace GSM {
+
+/** Table #6 from GSM 05.03 */
+extern unsigned int g660BitOrder[260];
+
+}
+
+#endif /* GSM660TABLES_H */
diff --git a/lib/decoding/openbts/Vector.h b/lib/decoding/openbts/Vector.h
new file mode 100644
index 0000000..e1224e8
--- /dev/null
+++ b/lib/decoding/openbts/Vector.h
@@ -0,0 +1,397 @@
+/*
+ * Copyright 2008 Free Software Foundation, Inc.
+ * Copyright 2014 Range Networks, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This use of this software may be subject to additional restrictions.
+ * See the LEGAL file in the main directory for details.
+ */
+
+/**@file Simplified Vector template with aliases. */
+
+
+#ifndef VECTOR_H
+#define VECTOR_H
+
+#include <string.h>
+#include <iostream>
+#include <assert.h>
+#include <stdio.h>
+// We cant use Logger.h in this file...
+extern int gVectorDebug;
+//#define ENABLE_VECTORDEBUG
+#ifdef ENABLE_VECTORDEBUG
+#define VECTORDEBUG(...) { printf(__VA_ARGS__); printf(" this=%p [%p,%p,%p]\n",(void*)this,(void*)&mData,mStart,mEnd); }
+//#define VECTORDEBUG(msg) { std::cout<<msg<<std::endl; }
+#else
+#define VECTORDEBUG(...)
+#endif
+
+#define BITVECTOR_REFCNTS 0
+
+#if BITVECTOR_REFCNTS
+// (pat) Started to add refcnts, decided against it for now.
+template <class T> class RCData : public RefCntBase {
+        public:
+        T* mPointer;
+};
+#endif
+
+
+/**
+        A simplified Vector template with aliases.
+        Unlike std::vector, this class does not support dynamic resizing.
+        Unlike std::vector, this class does support "aliases" and subvectors.
+*/
+// (pat) Nov 2013:  Vector and the derived classes BitVector and SoftVector were originally written with behavior
+// that differed for const and non-const cases, making them very difficult to use and resulting in many extremely
+// difficult to find bugs in the code base.
+// Ultimately these classes should all be converted to reference counted methodologies, but as an interim measure
+// I am rationalizing their behavior until we flush out all places in the code base that inadvertently depended
+// on the original behavior.  This is done with assert statements in BitVector methods.
+// ====
+// What the behavior was probably supposed to be:
+//              Vectors can 'own' the data they point to or not.  Only one Vector 'owns' the memory at a time,
+//              so that automatic destruction can be used.  So whenever there is an operation that yields one
+//              vector from another the options were: clone (allocate a new vector from memory), alias (make the
+//              new vector point into the memory of the original vector) or shift (the new Vector steals the
+//              memory ownership from the original vector.)
+//              The const copy-constructor did a clone, the non-const copy constructor did a shiftMem, and the segment and
+//              related methods (head, tail, etc) returned aliases.
+//              Since a copy-constructor is inserted transparently in sometimes surprising places, this made the
+//              class very difficult to use.  Moreover, since the C++ standard specifies that a copy-constructor is used
+//              to copy the return value from functions, it makes it literally impossible for a function to fully control
+//              the return value.  Our code has relied on the "Return Value Optimization" which says that the C++ compiler
+//              may omit the copy-construction of the return value even if the copy-constructor has side-effects, which ours does.
+//              This methodology is fundamentally incompatible with C++.
+// What the original behavior actually was:
+//      class Vector:
+//              The copy-constructor and assignment operators did a clone for the const case and a shift for the non-const case.
+//                      This is really horrible.
+//              The segment methods were identical for const and non-const cases, always returning an alias.
+//              This also resulted in zillions of redundant mallocs and copies throughout the code base.
+//      class BitVector:
+//              Copy-constructor:
+//                      BitVector did not have any copy-constructors, and I think the intent was that it would have the same behavior
+//                      as Vector, but that is not how C++ works: with no copy-constructor the default copy-constructor
+//                      uses only the const case, so only the const Vector copy-constructor was used.  Therefore it always cloned,
+//                      and the code base relied heavily on the "Return Value Optimization" to work at all.
+//              Assignment operator:
+//                      BitVector did not have one, so C++ makes a default one that calls Vector::operator=() as a side effect,
+//                      which did a clone; not sure if there was a non-const version and no longer care.
+//              segment methods:
+//                      The non-const segment() returned an alias, and the const segment() returned a clone.
+//                      I think the intent was that the behavior should be the same as Vector, but there was a conversion
+//                      of the result of the const segment() method from Vector to BitVector which caused the Vector copy-constructor
+//                      to be (inadvertently) invoked, resulting in the const version of the segment method returning a clone.
+// What the behavior is now:
+//      VectorBase:
+//              There is a new VectorBase class that has only the common methods and extremely basic constructors.
+//              The VectorBase class MUST NOT CONTAIN: copy constructors, non-trivial constructors called from derived classes,
+//              or any method that returns a VectorBase type object.  Why?  Because any of the above when used in derived classes
+//              can cause copy-constructor invocation, often surprisingly, obfuscating the code.
+//              Each derived class must provide its own: copy-constructors and segment() and related methods, since we do not
+//              want to inadvertently invoke a copy-constructor to convert the segment() result from VectorBase to the derived type.
+//      BitVector:
+//              The BitVector copy-constructor and assignment operator (inherited from VectorBase) paradigm is:
+//              if the copied Vector owned memory, perform a clone so the new vector owns memory also,
+//              otherwise just do a simple copy, which is another alias.  This isnt perfect but works every place
+//              in our code base and easier to use than the previous paradigm.
+//              The segment method always returns an alias.
+//              If you want a clone of a segment, use cloneSegment(), which replaces the previous: const segment(...) const method.
+//              Note that the semantics of cloneSegment still rely on the Return Value Optimization.  Oh well, we should use refcnts.
+//      Vector:
+//              I left Vector alone (except for rearrangement to separate out VectorBase.)  Vector should just not be used.
+//      SoftVector:
+//              SoftVector and signalVector should be updated similar to BitVector, but I did not want to disturb them.
+// What the behavior should be:
+//              All these should be reference-counted, similar to ByteVector.
+template <class T> class VectorBase
+{
+        // TODO -- Replace memcpy calls with for-loops. (pat) in case class T is not POD [Plain Old Data]
+
+        protected:
+#if BITVECTOR_REFCNTS
+        typedef RefCntPointer<RCData<T> > VectorDataType;
+#else
+        typedef T* VectorDataType;
+#endif
+        VectorDataType mData;           ///< allocated data block.
+        T* mStart;              ///< start of useful data
+        T* mEnd;                ///< end of useful data + 1
+
+        // Init vector with specified size.  Previous contents are completely discarded.  This is only used for initialization.
+        void vInit(size_t elements)
+        {
+                mData = elements ? new T[elements] : NULL;
+                mStart = mData;  // This is where mStart get set to zero
+                mEnd = mStart + elements;
+        }
+
+        /** Assign from another Vector, shifting ownership. */
+        // (pat) This should be eliminated, but it is used by Vector and descendents.
+        void shiftMem(VectorBase<T>&other)
+        {
+                VECTORDEBUG("VectorBase::shiftMem(%p)",(void*)&other);
+                this->clear();
+                this->mData=other.mData;
+                this->mStart=other.mStart;
+                this->mEnd=other.mEnd;
+                other.mData=NULL;
+        }
+
+        // Assign from another Vector, making this an alias to other.
+        void makeAlias(const VectorBase<T> &other)
+        {
+                if (this->getData()) {
+                        assert(this->getData() != other.getData()); // Not possible by the semantics of Vector.
+                        this->clear();
+                }
+                this->mStart=const_cast<T*>(other.mStart);
+                this->mEnd=const_cast<T*>(other.mEnd);
+        }
+
+        public:
+
+        /** Return the size of the Vector in units, ie, the number of T elements. */
+        size_t size() const
+        {
+                assert(mStart>=mData);
+                assert(mEnd>=mStart);
+                return mEnd - mStart;
+        }
+
+        /** Return size in bytes. */
+        size_t bytes() const { return this->size()*sizeof(T); }
+
+        /** Change the size of the Vector in items (not bytes), discarding content. */
+        void resize(size_t newElements) {
+                //VECTORDEBUG("VectorBase::resize("<<(void*)this<<","<<newElements<<")");
+                VECTORDEBUG("VectorBase::resize(%p,%d) %s",this,newElements, (mData?"delete":""));
+                if (mData!=NULL) delete[] mData;
+                vInit(newElements);
+        }
+
+        /** Release memory and clear pointers. */
+        void clear() { this->resize(0); }
+
+
+        /** Copy data from another vector. */
+        void clone(const VectorBase<T>& other) {
+                this->resize(other.size());
+                memcpy(mData,other.mStart,other.bytes());
+        }
+
+        void vConcat(const VectorBase<T>&other1, const VectorBase<T>&other2) {
+                this->resize(other1.size()+other2.size());
+                memcpy(this->mStart, other1.mStart, other1.bytes());
+                memcpy(this->mStart+other1.size(), other2.mStart, other2.bytes());
+        }
+
+        protected:
+
+        VectorBase() : mData(0), mStart(0), mEnd(0) {}
+
+        /** Build a Vector with explicit values. */
+        VectorBase(VectorDataType wData, T* wStart, T* wEnd) :mData(wData),mStart(wStart),mEnd(wEnd) {
+                //VECTORDEBUG("VectorBase("<<(void*)wData);
+                VECTORDEBUG("VectorBase(%p,%p,%p)",this->getData(),wStart,wEnd);
+        }
+
+        public:
+
+        /** Destroy a Vector, deleting held memory. */
+        ~VectorBase() {
+                //VECTORDEBUG("~VectorBase("<<(void*)this<<")");
+                VECTORDEBUG("~VectorBase(%p)",this);
+                this->clear();
+        }
+
+        bool isOwner() { return !!this->mData; }        // Do we own any memory ourselves?
+
+        std::string inspect() const {
+                char buf[100];
+                snprintf(buf,100," mData=%p mStart=%p mEnd=%p ",(void*)mData,mStart,mEnd);
+                return std::string(buf);
+        }
+
+
+        /**
+                Copy part of this Vector to a segment of another Vector.
+                @param other The other vector.
+                @param start The start point in the other vector.
+                @param span The number of elements to copy.
+        */
+        void copyToSegment(VectorBase<T>& other, size_t start, size_t span) const
+        {
+                T* base = other.mStart + start;
+                assert(base+span<=other.mEnd);
+                assert(mStart+span<=mEnd);
+                memcpy(base,mStart,span*sizeof(T));
+        }
+
+        /** Copy all of this Vector to a segment of another Vector. */
+        void copyToSegment(VectorBase<T>& other, size_t start=0) const { copyToSegment(other,start,size()); }
+
+        void copyTo(VectorBase<T>& other) const { copyToSegment(other,0,size()); }
+
+        /**
+                Copy a segment of this vector into another.
+                @param other The other vector (to copt into starting at 0.)
+                @param start The start point in this vector.
+                @param span The number of elements to copy.
+                WARNING: This function does NOT resize the result - you must set the result size before entering.
+        */
+        void segmentCopyTo(VectorBase<T>& other, size_t start, size_t span) const
+        {
+                const T* base = mStart + start;
+                assert(base+span<=mEnd);
+                assert(other.mStart+span<=other.mEnd);
+                memcpy(other.mStart,base,span*sizeof(T));
+        }
+
+        void fill(const T& val)
+        {
+                T* dp=mStart;
+                while (dp<mEnd) *dp++=val;
+        }
+
+        void fill(const T& val, unsigned start, unsigned length)
+        {
+                T* dp=mStart+start;
+                T* end=dp+length;
+                assert(end<=mEnd);
+                while (dp<end) *dp++=val;
+        }
+
+        /** Assign from another Vector. */
+        // (pat) This is used for both const and non-const cases.
+        // If the original vector owned memory, clone it, otherwise just copy the segment data.
+        void operator=(const VectorBase<T>& other) {
+                //std::cout << "Vector=(this="<<this->inspect()<<",other="<<other.inspect()<<")"<<endl;
+                if (other.getData()) {
+                        this->clone(other);
+                } else {
+                        this->makeAlias(other);
+                }
+                //std::cout << "Vector= after(this="<<this->inspect()<<")"<<endl;
+        }
+
+
+        T& operator[](size_t index)
+        {
+                assert(mStart+index<mEnd);
+                return mStart[index];
+        }
+
+        const T& operator[](size_t index) const
+        {
+                assert(mStart+index<mEnd);
+                return mStart[index];
+        }
+
+        const T* begin() const { return this->mStart; }
+        T* begin() { return this->mStart; }
+        const T* end() const { return this->mEnd; }
+        T* end() { return this->mEnd; }
+#if BITVECTOR_REFCNTS
+        const T*getData() const { return this->mData.isNULL() ? 0 : this->mData->mPointer; }
+#else
+        const T*getData() const { return this->mData; }
+#endif
+};
+
+// (pat) Nov 2013.  This class retains the original poor behavior.  See comments at VectorBase
+template <class T> class Vector : public VectorBase<T>
+{
+        public:
+
+        /** Build an empty Vector of a given size. */
+        Vector(size_t wSize=0) { this->resize(wSize); }
+
+        /** Build a Vector by shifting the data block. */
+        Vector(Vector<T>& other) : VectorBase<T>(other.mData,other.mStart,other.mEnd) { other.mData=NULL; }
+
+        /** Build a Vector by copying another. */
+        Vector(const Vector<T>& other):VectorBase<T>() { this->clone(other); }
+
+        /** Build a Vector with explicit values. */
+        Vector(T* wData, T* wStart, T* wEnd) : VectorBase<T>(wData,wStart,wEnd) { }
+
+        /** Build a vector from an existing block, NOT to be deleted upon destruction. */
+        Vector(T* wStart, size_t span) : VectorBase<T>(NULL,wStart,wStart+span) { }
+
+        /** Build a Vector by concatenation. */
+        Vector(const Vector<T>& other1, const Vector<T>& other2):VectorBase<T>() {
+                assert(this->mData == 0);
+                this->vConcat(other1,other2);
+        }
+
+        //@{
+
+        /** Assign from another Vector, shifting ownership. */
+        void operator=(Vector<T>& other) { this->shiftMem(other); }
+
+        /** Assign from another Vector, copying. */
+        void operator=(const Vector<T>& other) { this->clone(other); }
+
+        /** Return an alias to a segment of this Vector. */
+        Vector<T> segment(size_t start, size_t span)
+        {
+                T* wStart = this->mStart + start;
+                T* wEnd = wStart + span;
+                assert(wEnd<=this->mEnd);
+                return Vector<T>(NULL,wStart,wEnd);
+        }
+
+        /** Return an alias to a segment of this Vector. */
+        const Vector<T> segment(size_t start, size_t span) const
+        {
+                T* wStart = this->mStart + start;
+                T* wEnd = wStart + span;
+                assert(wEnd<=this->mEnd);
+                return Vector<T>(NULL,wStart,wEnd);
+        }
+
+        Vector<T> head(size_t span) { return segment(0,span); }
+        const Vector<T> head(size_t span) const { return segment(0,span); }
+        Vector<T> tail(size_t start) { return segment(start,this->size()-start); }
+        const Vector<T> tail(size_t start) const { return segment(start,this->size()-start); }
+
+        /**@name Iterator types. */
+        //@{
+        typedef T* iterator;
+        typedef const T* const_iterator;
+        //@}
+
+        //@}
+};
+
+
+
+
+
+/** Basic print operator for Vector objects. */
+template <class T>
+std::ostream& operator<<(std::ostream& os, const Vector<T>& v)
+{
+        for (unsigned i=0; i<v.size(); i++) os << v[i] << " ";
+        return os;
+}
+
+
+
+#endif
+// vim: ts=4 sw=4
diff --git a/lib/decoding/openbts/Viterbi.h b/lib/decoding/openbts/Viterbi.h
new file mode 100644
index 0000000..77bd599
--- /dev/null
+++ b/lib/decoding/openbts/Viterbi.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2013, 2014 Range Networks, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This use of this software may be subject to additional restrictions.
+ * See the LEGAL file in the main directory for details.
+ */
+
+
+#ifndef _VITERBI_H_
+#define _VITERBI_H_ 1
+
+// (pat) Virtual base class for Viterbi and Turbo coder/decoders.
+class ViterbiBase {
+	public:
+	virtual void encode(const BitVector &in, BitVector& target) const = 0;
+	virtual void decode(const SoftVector &in, BitVector& target) = 0;
+	// (pat) Return error count from most recent decoder run.
+	// If you get -1 from this, the method is not defined in the Viterbi class.
+	virtual int getBEC() { return -1; }
+	//virtual ~ViterbiBase();   Currently None of these have destructors.
+
+	// These functions are logically part of the Viterbi functionality, even though they do not use any class variables.
+	unsigned applyPoly(uint64_t val, uint64_t poly);
+	unsigned applyPoly(uint64_t val, uint64_t poly, unsigned order);
+};
+#endif
diff --git a/lib/decoding/openbts/ViterbiR204.cpp b/lib/decoding/openbts/ViterbiR204.cpp
new file mode 100644
index 0000000..296e292
--- /dev/null
+++ b/lib/decoding/openbts/ViterbiR204.cpp
@@ -0,0 +1,301 @@
+/*
+ * Copyright 2008, 2009, 2014 Free Software Foundation, Inc.
+ * Copyright 2014 Range Networks, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This use of this software may be subject to additional restrictions.
+ * See the LEGAL file in the main directory for details.
+ */
+
+
+
+
+#include "BitVector.h"
+#include "ViterbiR204.h"
+#include <iostream>
+#include <stdio.h>
+#include <sstream>
+#include <string.h>
+
+using namespace std;
+
+
+/**
+  Apply a Galois polymonial to a binary seqeunce.
+  @param val The input sequence.
+  @param poly The polynomial.
+  @param order The order of the polynomial.
+  @return Single-bit result.
+*/
+unsigned ViterbiBase::applyPoly(uint64_t val, uint64_t poly, unsigned order)
+{
+	uint64_t prod = val & poly;
+	unsigned sum = prod;
+	for (unsigned i=1; i<order; i++) sum ^= prod>>i;
+	return sum & 0x01;
+}
+
+unsigned ViterbiBase::applyPoly(uint64_t val, uint64_t poly)
+{
+	uint64_t prod = val & poly;
+	prod = (prod ^ (prod >> 32));
+	prod = (prod ^ (prod >> 16));
+	prod = (prod ^ (prod >> 8));
+	prod = (prod ^ (prod >> 4));
+	prod = (prod ^ (prod >> 2));
+	prod = (prod ^ (prod >> 1));
+	return prod & 0x01;
+}
+
+
+
+//void BitVector::encode(const ViterbiR2O4& coder, BitVector& target)
+void ViterbiR2O4::encode(const BitVector& in, BitVector& target) const
+{
+	const ViterbiR2O4& coder = *this;
+	size_t sz = in.size();
+
+	assert(sz*coder.iRate() == target.size());
+
+	// Build a "history" array where each element contains the full history.
+	uint32_t history[sz];
+	uint32_t accum = 0;
+	for (size_t i=0; i<sz; i++) {
+		accum = (accum<<1) | in.bit(i);
+		history[i] = accum;
+	}
+
+	// Look up histories in the pre-generated state table.
+	char *op = target.begin();
+	for (size_t i=0; i<sz; i++) {
+		unsigned index = coder.cMask() & history[i];
+		for (unsigned g=0; g<coder.iRate(); g++) {
+			*op++ = coder.stateTable(g,index);
+		}
+	}
+}
+
+
+ViterbiR2O4::ViterbiR2O4()
+{
+	assert(mDeferral < 32);
+	// (pat) The generator polynomials are: G0 = 1 + D**3 + D**4; and G1 = 1 + D + D**3 + D**4
+	mCoeffs[0] = 0x019;     // G0 = D**4 + D**3 + 1; represented as binary 11001,
+	mCoeffs[1] = 0x01b;     // G1 = + D**4 + D**3 + D + 1; represented as binary 11011
+	computeStateTables(0);
+	computeStateTables(1);
+	computeGeneratorTable();
+}
+
+
+void ViterbiR2O4::initializeStates()
+{
+	for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
+	for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
+}
+
+
+
+// (pat) The state machine has 16 states.
+// Each state has two possible next states corresponding to 0 or 1 inputs to original encoder.
+// which are saved in mStateTable in consecutive locations.
+// In other words the mStateTable second index is ((current_state <<1) + encoder_bit)
+// g is 0 or 1 for the first or second bit of the encoded stream, ie, the one we are decoding.
+void ViterbiR2O4::computeStateTables(unsigned g)
+{
+	assert(g<mIRate);
+	for (unsigned state=0; state<mIStates; state++) {
+		// 0 input
+		uint32_t inputVal = state<<1;
+		mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g], mOrder+1);
+		// 1 input
+		inputVal |= 1;
+		mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g], mOrder+1);
+	}
+}
+
+void ViterbiR2O4::computeGeneratorTable()
+{
+	for (unsigned index=0; index<mIStates*2; index++) {
+		mGeneratorTable[index] = (mStateTable[0][index]<<1) | mStateTable[1][index];
+	}
+}
+
+
+void ViterbiR2O4::branchCandidates()
+{
+	// Branch to generate new input states.
+	const vCand *sp = mSurvivors;
+	for (unsigned i=0; i<mNumCands; i+=2) {
+		// extend and suffix
+		const uint32_t iState0 = (sp->iState) << 1;				// input state for 0
+		const uint32_t iState1 = iState0 | 0x01;				// input state for 1
+		const uint32_t oStateShifted = (sp->oState) << mIRate;	// shifted output (by 2)
+		const float cost = sp->cost;
+		int bec = sp->bitErrorCnt;
+		sp++;
+		// 0 input extension
+		mCandidates[i].cost = cost;
+		// mCMask is the low 5 bits, ie, full width of mGeneratorTable.
+		mCandidates[i].oState = oStateShifted | mGeneratorTable[iState0 & mCMask];
+		mCandidates[i].iState = iState0;
+		mCandidates[i].bitErrorCnt = bec;
+		// 1 input extension
+		mCandidates[i+1].cost = cost;
+		mCandidates[i+1].oState = oStateShifted | mGeneratorTable[iState1 & mCMask];
+		mCandidates[i+1].iState = iState1;
+		mCandidates[i+1].bitErrorCnt = bec;
+	}
+}
+
+
+void ViterbiR2O4::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
+{
+	const float *cTab[2] = {matchCost,mismatchCost};
+	for (unsigned i=0; i<mNumCands; i++) {
+		vCand& thisCand = mCandidates[i];
+		// We examine input bits 2 at a time for a rate 1/2 coder.
+		// (pat) mismatched will end up with bits in it for previous transitions,
+		// but we only use the bottom two bits of mismatched so it is ok.
+		const unsigned mismatched = inSample ^ (thisCand.oState);
+		// (pat) TODO: Are these two tests swapped?
+		thisCand.cost += cTab[mismatched&0x01][1] + cTab[(mismatched>>1)&0x01][0];
+		if (mismatched & 1) { thisCand.bitErrorCnt++; }
+		if (mismatched & 2) { thisCand.bitErrorCnt++; }
+	}
+}
+
+
+void ViterbiR2O4::pruneCandidates()
+{
+	const vCand* c1 = mCandidates;					// 0-prefix
+	const vCand* c2 = mCandidates + mIStates;		// 1-prefix
+	for (unsigned i=0; i<mIStates; i++) {
+		if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
+		else mSurvivors[i] = c2[i];
+	}
+}
+
+
+const ViterbiR2O4::vCand& ViterbiR2O4::minCost() const
+{
+	int minIndex = 0;
+	float minCost = mSurvivors[0].cost;
+	for (unsigned i=1; i<mIStates; i++) {
+		const float thisCost = mSurvivors[i].cost;
+		if (thisCost>=minCost) continue;
+		minCost = thisCost;
+		minIndex=i;
+	}
+	return mSurvivors[minIndex];
+}
+
+
+const ViterbiR2O4::vCand* ViterbiR2O4::vstep(uint32_t inSample, const float *probs, const float *iprobs, bool isNotTailBits)
+{
+	branchCandidates();
+	// (pat) tail bits do not affect cost or error bit count of any branch.
+	if (isNotTailBits) getSoftCostMetrics(inSample,probs,iprobs);
+	pruneCandidates();
+	return &minCost();
+}
+
+
+void ViterbiR2O4::decode(const SoftVector &in, BitVector& target)
+{
+	ViterbiR2O4& decoder = *this;
+	const size_t sz = in.size();
+	const unsigned oSize = in.size() / decoder.iRate();
+	const unsigned deferral = decoder.deferral();
+	const size_t ctsz = sz + deferral*decoder.iRate();
+	assert(sz <= decoder.iRate()*target.size());
+
+	// Build a "history" array where each element contains the full history.
+	// (pat) We only use every other history element, so why are we setting them?
+	uint32_t history[ctsz];
+	{
+		BitVector bits = in.sliced();
+		uint32_t accum = 0;
+		for (size_t i=0; i<sz; i++) {
+			accum = (accum<<1) | bits.bit(i);
+			history[i] = accum;
+		}
+		// Repeat last bit at the end.
+		// (pat) TODO: really?  Does this matter?
+		for (size_t i=sz; i<ctsz; i++) {
+			accum = (accum<<1) | (accum & 0x01);
+			history[i] = accum;
+		}
+	}
+
+	// Precompute metric tables.
+	float matchCostTable[ctsz];
+	float mismatchCostTable[ctsz];
+	{
+		const float *dp = in.begin();
+		for (size_t i=0; i<sz; i++) {
+			// pVal is the probability that a bit is correct.
+			// ipVal is the probability that a bit is incorrect.
+			float pVal = dp[i];
+			if (pVal>0.5F) pVal = 1.0F-pVal;
+			float ipVal = 1.0F-pVal;
+			// This is a cheap approximation to an ideal cost function.
+			if (pVal<0.01F) pVal = 0.01;
+			if (ipVal<0.01F) ipVal = 0.01;
+			matchCostTable[i] = 0.25F/ipVal;
+			mismatchCostTable[i] = 0.25F/pVal;
+		}
+	
+		// pad end of table with unknowns
+		// Note that these bits should not contribute to Bit Error Count.
+		for (size_t i=sz; i<ctsz; i++) {
+			matchCostTable[i] = 0.5F;
+			mismatchCostTable[i] = 0.5F;
+		}
+	}
+
+	{
+		decoder.initializeStates();
+		// Each sample of history[] carries its history.
+		// So we only have to process every iRate-th sample.
+		const unsigned step = decoder.iRate();
+		// input pointer
+		const uint32_t *ip = history + step - 1;
+		// output pointers
+		char *op = target.begin();
+		const char *const opt = target.end();	// (pat) Not right if target is larger than needed; should be: op + sz/2;
+		// table pointers
+		const float* match = matchCostTable;
+		const float* mismatch = mismatchCostTable;
+		size_t oCount = 0;
+		const ViterbiR2O4::vCand *minCost = NULL;
+		while (op<opt) {
+			// Viterbi algorithm
+			assert(match-matchCostTable<(float)sizeof(matchCostTable)/sizeof(matchCostTable[0])-1);
+			assert(mismatch-mismatchCostTable<(float)sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1);
+			minCost = decoder.vstep(*ip, match, mismatch, oCount < oSize);
+			ip += step;
+			match += step;
+			mismatch += step;
+			// output
+			if (oCount>=deferral) *op++ = (minCost->iState >> deferral)&0x01;
+			oCount++;
+		}
+		// Dont think minCost == NULL can happen.
+		mBitErrorCnt = minCost ? minCost->bitErrorCnt : 0;
+	}
+}
+
+// vim: ts=4 sw=4
diff --git a/lib/decoding/openbts/ViterbiR204.h b/lib/decoding/openbts/ViterbiR204.h
new file mode 100644
index 0000000..090f1e8
--- /dev/null
+++ b/lib/decoding/openbts/ViterbiR204.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2008, 2009, 2014 Free Software Foundation, Inc.
+ * Copyright 2014 Range Networks, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This use of this software may be subject to additional restrictions.
+ * See the LEGAL file in the main directory for details.
+ */
+
+#ifndef _VITERBIR204_H_
+#define _VITERBIR204_H_ 1
+
+#include "Viterbi.h"
+
+
+/**
+	Class to represent convolutional coders/decoders of rate 1/2, memory length 4.
+	This is the "workhorse" coder for most GSM channels.
+*/
+class ViterbiR2O4 : public ViterbiBase {
+
+	private:
+		/**name Lots of precomputed elements so the compiler can optimize like hell. */
+		//@{
+		/**@name Core values. */
+		//@{
+		static const unsigned mIRate = 2;	///< reciprocal of rate
+		static const unsigned mOrder = 4;	///< memory length of generators
+		//@}
+		/**@name Derived values. */
+		//@{
+		static const unsigned mIStates = 0x01 << mOrder;	///< (16) number of states, number of survivors
+		static const uint32_t mSMask = mIStates-1;			///< survivor mask
+		static const uint32_t mCMask = (mSMask<<1) | 0x01;	///< candidate mask
+		static const uint32_t mOMask = (0x01<<mIRate)-1;	///< ouput mask, all iRate low bits set
+		static const unsigned mNumCands = mIStates*2;		///< number of candidates to generate during branching
+		static const unsigned mDeferral = 6*mOrder;			///< deferral to be used
+		//@}
+		//@}
+
+		/** Precomputed tables. */
+		//@{
+		uint32_t mCoeffs[mIRate];					///< polynomial for each generator
+		// (pat) There are 16 states, each of which has two possible output states.
+		// These are stored in these two tables in consecutive locations.
+		uint32_t mStateTable[mIRate][2*mIStates];	///< precomputed generator output tables
+		// mGeneratorTable is the encoder output state for a given input state and encoder input bit.
+		uint32_t mGeneratorTable[2*mIStates];		///< precomputed coder output table
+		//@}
+		int mBitErrorCnt;
+	
+	public:
+
+		/**
+		  A candidate sequence in a Viterbi decoder.
+		  The 32-bit state register can support a deferral of 6 with a 4th-order coder.
+		 */
+		typedef struct candStruct {
+			uint32_t iState;	///< encoder input associated with this candidate
+			uint32_t oState;	///< encoder output associated with this candidate
+			float cost;			///< cost (metric value), float to support soft inputs
+			int bitErrorCnt;	///< number of bit errors in the encoded vector being decoded.
+		} vCand;
+
+		/** Clear a structure. */
+		void vitClear(vCand& v)
+		{
+			v.iState=0;
+			v.oState=0;
+			v.cost=0;
+			v.bitErrorCnt = 0;
+		}
+		
+
+	private:
+
+		/**@name Survivors and candidates. */
+		//@{
+		vCand mSurvivors[mIStates];			///< current survivor pool
+		vCand mCandidates[2*mIStates];		///< current candidate pool
+		//@}
+
+	public:
+
+		unsigned iRate() const { return mIRate; }
+		uint32_t cMask() const { return mCMask; }
+		uint32_t stateTable(unsigned g, unsigned i) const { return mStateTable[g][i]; }
+		unsigned deferral() const { return mDeferral; }
+		
+
+		ViterbiR2O4();
+
+		/** Set all cost metrics to zero. */
+		void initializeStates();
+
+		/**
+			Full cycle of the Viterbi algorithm: branch, metrics, prune, select.
+			@return reference to minimum-cost candidate.
+		*/
+		const vCand* vstep(uint32_t inSample, const float *probs, const float *iprobs, bool isNotTailBits);
+
+	private:
+
+		/** Branch survivors into new candidates. */
+		void branchCandidates();
+
+		/** Compute cost metrics for soft-inputs. */
+		void getSoftCostMetrics(uint32_t inSample, const float *probs, const float *iprobs);
+
+		/** Select survivors from the candidate set. */
+		void pruneCandidates();
+
+		/** Find the minimum cost survivor. */
+		const vCand& minCost() const;
+
+		/**
+			Precompute the state tables.
+			@param g Generator index 0..((1/rate)-1)
+		*/
+		void computeStateTables(unsigned g);
+
+		/**
+			Precompute the generator outputs.
+			mCoeffs must be defined first.
+		*/
+		void computeGeneratorTable();
+
+	public:
+		void encode(const BitVector &in, BitVector& target) const;
+		void decode(const SoftVector &in, BitVector& target);
+		int getBEC() { return mBitErrorCnt; }
+};
+#endif