blob: 718baf0842a9f8a837861b0294b0fb71f720d385 [file] [log] [blame]
Roman Khassrafd38206c2015-06-07 16:26:29 +02001/*
Piotr Krysikb9a87a12017-08-23 15:59:28 +02002 * Copyright 2013, 2014 Range Networks, Inc.
3 *
4 * This program is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU Affero General Public License as published by
6 * the Free Software Foundation, either version 3 of the License, or
7 * (at your option) any later version.
Roman Khassrafd38206c2015-06-07 16:26:29 +02008
Piotr Krysikb9a87a12017-08-23 15:59:28 +02009 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU Affero General Public License for more details.
13 *
14 * You should have received a copy of the GNU Affero General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 *
17 * This use of this software may be subject to additional restrictions.
18 * See the LEGAL file in the main directory for details.
19 */
Roman Khassrafd38206c2015-06-07 16:26:29 +020020
21
22#include "BitVector.h"
23#include "AmrCoder.h"
24#include <iostream>
25#include <stdio.h>
26#include <sstream>
27
28using namespace std;
29
30
31
32ViterbiTCH_AFS12_2::ViterbiTCH_AFS12_2()
33{
34 assert(mDeferral < 32);
35 mCoeffs[0] = 0x019;
36 mCoeffsFB[0] = 0x019;
37 mCoeffs[1] = 0x01b;
38 mCoeffsFB[1] = 0x019;
39 for (unsigned i = 0; i < mIRate; i++) {
40 computeStateTables(i);
41 }
42 computeGeneratorTable();
43}
44
45
46//void BitVector::encode(const ViterbiTCH_AFS12_2& coder, BitVector& target) const
47void ViterbiTCH_AFS12_2::encode(const BitVector& in, BitVector& target) const
48{
49 assert(in.size() == 250);
50 assert(target.size() == 508);
51 const char *u = in.begin();
52 char *C = target.begin();
53 const unsigned H = 4;
54 BitVector r(254+H);
55 for (int k = -H; k <= -1; k++) r[k+H] = 0;
56 for (unsigned k = 0; k <= 249; k++) {
57 r[k+H] = u[k] ^ r[k-3+H] ^ r[k-4+H];
58 C[2*k] = u[k];
59 C[2*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
60 }
61 // termination
62 for (unsigned k = 250; k <= 253; k++) {
63 r[k+H] = 0;
64 C[2*k] = r[k-3+H] ^ r[k-4+H];
65 C[2*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
66 }
67}
68
69
70
71//void BitVector::encode(const ViterbiTCH_AFS10_2& coder, BitVector& target)
72void ViterbiTCH_AFS10_2::encode(const BitVector& in, BitVector& target) const
73{
74 assert(in.size() == 210);
75 assert(target.size() == 642);
76 const char *u = in.begin();
77 char *C = target.begin();
78 const unsigned H = 4;
79 BitVector r(214+H);
80 for (int k = -H; k <= -1; k++) r[k+H] = 0;
81 for (unsigned k = 0; k <= 209; k++) {
82 r[k+H] = u[k] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
83 C[3*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
84 C[3*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
85 C[3*k+2] = u[k];
86 }
87 // termination
88 for (unsigned k = 210; k <= 213; k++) {
89 r[k+H] = 0;
90 C[3*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
91 C[3*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
92 C[3*k+2] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
93 }
94}
95
96
97
98//void BitVector::encode(const ViterbiTCH_AFS7_95& coder, BitVector& target)
99void ViterbiTCH_AFS7_95::encode(const BitVector& in, BitVector& target) const
100{
101 assert(in.size() == 165);
102 assert(target.size() == 513);
103 const char *u = in.begin();
104 char *C = target.begin();
105 const unsigned H = 6;
106 BitVector r(171+H);
107 for (int k = -H; k <= -1; k++) r[k+H] = 0;
108 for (unsigned k = 0; k <= 164; k++) {
109 r[k+H] = u[k] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
110 C[3*k] = u[k];
111 C[3*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-4+H] ^ r[k-6+H];
112 C[3*k+2] = r[k+H] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
113 }
114 // termination
115 for (unsigned k = 165; k <= 170; k++) {
116 r[k+H] = 0;
117 C[3*k] = r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
118 C[3*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-4+H] ^ r[k-6+H];
119 C[3*k+2] = r[k+H] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
120 }
121}
122
123
124
125void ViterbiTCH_AFS7_4::encode(const BitVector& in, BitVector& target) const
126{
127 assert(in.size() == 154);
128 assert(target.size() == 474);
129 const char *u = in.begin();
130 char *C = target.begin();
131 const unsigned H = 4;
132 BitVector r(158+H);
133 for (int k = -H; k <= -1; k++) r[k+H] = 0;
134 for (unsigned k = 0; k <= 153; k++) {
135 r[k+H] = u[k] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
136 C[3*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
137 C[3*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
138 C[3*k+2] = u[k];
139 }
140 // termination
141 for (unsigned k = 154; k <= 157; k++) {
142 r[k+H] = 0;
143 C[3*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
144 C[3*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
145 C[3*k+2] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
146 }
147}
148
149
150
151void ViterbiTCH_AFS6_7::encode(const BitVector& in, BitVector& target) const
152{
153 assert(in.size() == 140);
154 assert(target.size() == 576);
155 const char *u = in.begin();
156 char *C = target.begin();
157 const unsigned H = 4;
158 BitVector r(144+H);
159 for (int k = -H; k <= -1; k++) r[k+H] = 0;
160 for (unsigned k = 0; k <= 139; k++) {
161 r[k+H] = u[k] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
162 C[4*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
163 C[4*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
164 C[4*k+2] = u[k];
165 C[4*k+3] = u[k];
166 }
167 // termination
168 for (unsigned k = 140; k <= 143; k++) {
169 r[k+H] = 0;
170 C[4*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
171 C[4*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
172 C[4*k+2] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
173 C[4*k+3] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
174 }
175}
176
177
178
179void ViterbiTCH_AFS5_9::encode(const BitVector& in, BitVector& target) const
180{
181 assert(in.size() == 124);
182 assert(target.size() == 520);
183 const char *u = in.begin();
184 char *C = target.begin();
185 const unsigned H = 6;
186 BitVector r(130+H);
187 for (int k = -H; k <= -1; k++) r[k+H] = 0;
188 for (unsigned k = 0; k <= 123; k++) {
189 r[k+H] = u[k] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
190 C[4*k] = r[k+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
191 C[4*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-4+H] ^ r[k-6+H];
192 C[4*k+2] = u[k];
193 C[4*k+3] = u[k];
194 }
195 // termination
196 for (unsigned k = 124; k <= 129; k++) {
197 r[k+H] = 0;
198 C[4*k] = r[k+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
199 C[4*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-4+H] ^ r[k-6+H];
200 C[4*k+2] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
201 C[4*k+3] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
202 }
203}
204
205
206
207void ViterbiTCH_AFS5_15::encode(const BitVector& in, BitVector& target) const
208{
209 assert(in.size() == 109);
210 assert(target.size() == 565);
211 const char *u = in.begin();
212 char *C = target.begin();
213 const unsigned H = 4;
214 BitVector r(113+H);
215 for (int k = -H; k <= -1; k++) r[k+H] = 0;
216 for (unsigned k = 0; k <= 108; k++) {
217 r[k+H] = u[k] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
218 C[5*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
219 C[5*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
220 C[5*k+2] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
221 C[5*k+3] = u[k];
222 C[5*k+4] = u[k];
223 }
224 // termination
225 for (unsigned k = 109; k <= 112; k++) {
226 r[k+H] = 0;
227 C[5*k] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
228 C[5*k+1] = r[k+H] ^ r[k-1+H] ^ r[k-3+H] ^ r[k-4+H];
229 C[5*k+2] = r[k+H] ^ r[k-2+H] ^ r[k-4+H];
230 C[5*k+3] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
231 C[5*k+4] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H];
232 }
233}
234
235
236
237void ViterbiTCH_AFS4_75::encode(const BitVector& in, BitVector& target) const
238{
239 assert(in.size() == 101);
240 assert(target.size() == 535);
241 const char *u = in.begin();
242 char *C = target.begin();
243 const unsigned H = 6;
244 BitVector r(107+H);
245 for (int k = -H; k <= -1; k++) r[k+H] = 0;
246 for (unsigned k = 0; k <= 100; k++) {
247 r[k+H] = u[k] ^ r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
248 C[5*k] = r[k+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
249 C[5*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
250 C[5*k+2] = r[k+H] ^ r[k-1+H] ^ r[k-4+H] ^ r[k-6+H];
251 C[5*k+3] = u[k];
252 C[5*k+4] = u[k];
253 }
254 // termination
255 for (unsigned k = 101; k <= 106; k++) {
256 r[k+H] = 0;
257 C[5*k] = r[k+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
258 C[5*k+1] = r[k+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-5+H] ^ r[k-6+H];
259 C[5*k+2] = r[k+H] ^ r[k-1+H] ^ r[k-4+H] ^ r[k-6+H];
260 C[5*k+3] = r[k+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
261 C[5*k+4] = r[k-1+H] ^ r[k-2+H] ^ r[k-3+H] ^ r[k-4+H] ^ r[k-6+H];
262 }
263}
264
265
266void ViterbiTCH_AFS12_2::initializeStates()
267{
268 for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
269 for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
270}
271
272
273
274void ViterbiTCH_AFS12_2::computeStateTables(unsigned g)
275{
276 assert(g<mIRate);
277 for (unsigned state=0; state<mIStates; state++) {
278 for (unsigned in = 0; in <= 1; in++) {
279 uint32_t inputVal = (state<<1) | in;
280 mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
281 }
282 }
283}
284
285void ViterbiTCH_AFS12_2::computeGeneratorTable()
286{
287 for (unsigned index=0; index<mIStates*2; index++) {
288 uint32_t t = 0;
289 for (unsigned i = 0; i < mIRate; i++) {
290 t = (t << 1) | mStateTable[i][index];
291 }
292 mGeneratorTable[index] = t;
293 }
294}
295
296
297
298
299
300
301void ViterbiTCH_AFS12_2::branchCandidates()
302{
303 // Branch to generate new input states.
304 const vCand *sp = mSurvivors;
305 for (unsigned cand=0; cand<mNumCands; cand+=2) {
306 uint32_t oStateShifted = (sp->oState) << mIRate;
307 for (unsigned in = 0; in <= 1; in++) {
308 mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
309 mCandidates[cand+in].cost = sp->cost;
310 uint32_t outputs = oStateShifted;
311 for (unsigned out = 0; out < mIRate; out++) {
312 char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
313 char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
314 mCandidates[cand+in].rState[out] = rState;
315 outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
316 }
317 mCandidates[cand+in].oState = outputs;
318 }
319 sp++;
320 }
321}
322
323
324void ViterbiTCH_AFS12_2::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
325{
326 const float *cTab[2] = {matchCost,mismatchCost};
327 for (unsigned i=0; i<mNumCands; i++) {
328 vCand& thisCand = mCandidates[i];
329 const unsigned mismatched = inSample ^ (thisCand.oState);
330 for (unsigned i = 0; i < mIRate; i++) {
331 thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
332 }
333 }
334}
335
336
337void ViterbiTCH_AFS12_2::pruneCandidates()
338{
339 const vCand* c1 = mCandidates; // 0-prefix
340 const vCand* c2 = mCandidates + mIStates; // 1-prefix
341 for (unsigned i=0; i<mIStates; i++) {
342 if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
343 else mSurvivors[i] = c2[i];
344 }
345}
346
347
348const ViterbiTCH_AFS12_2::vCand& ViterbiTCH_AFS12_2::minCost() const
349{
350 int minIndex = 0;
351 float minCost = mSurvivors[0].cost;
352 for (unsigned i=1; i<mIStates; i++) {
353 const float thisCost = mSurvivors[i].cost;
354 if (thisCost>=minCost) continue;
355 minCost = thisCost;
356 minIndex=i;
357 }
358 return mSurvivors[minIndex];
359}
360
361
362const ViterbiTCH_AFS12_2::vCand& ViterbiTCH_AFS12_2::step(uint32_t inSample, const float *probs, const float *iprobs)
363{
364 branchCandidates();
365 getSoftCostMetrics(inSample,probs,iprobs);
366 pruneCandidates();
367 return minCost();
368}
369
370
371
372void ViterbiTCH_AFS12_2::decode(const SoftVector &in, BitVector& target)
373{
374 ViterbiTCH_AFS12_2 &decoder = *this;
375 const size_t sz = in.size() - 8;
376 const unsigned deferral = decoder.deferral();
377 const size_t ctsz = sz + deferral*decoder.iRate();
378 assert(sz == decoder.iRate()*target.size());
379
380 // Build a "history" array where each element contains the full history.
381 uint32_t history[ctsz];
382 {
383 BitVector bits = in.sliced();
384 uint32_t accum = 0;
385 for (size_t i=0; i<sz; i++) {
386 accum = (accum<<1) | bits.bit(i);
387 history[i] = accum;
388 }
389 // Repeat last bit at the end.
390 for (size_t i=sz; i<ctsz; i++) {
391 accum = (accum<<1) | (accum & 0x01);
392 history[i] = accum;
393 }
394 }
395
396 // Precompute metric tables.
397 float matchCostTable[ctsz];
398 float mismatchCostTable[ctsz];
399 {
400 const float *dp = in.begin();
401 for (size_t i=0; i<sz; i++) {
402 // pVal is the probability that a bit is correct.
403 // ipVal is the probability that a bit is incorrect.
404 float pVal = dp[i];
405 if (pVal>0.5F) pVal = 1.0F-pVal;
406 float ipVal = 1.0F-pVal;
407 // This is a cheap approximation to an ideal cost function.
408 if (pVal<0.01F) pVal = 0.01;
409 if (ipVal<0.01F) ipVal = 0.01;
410 matchCostTable[i] = 0.25F/ipVal;
411 mismatchCostTable[i] = 0.25F/pVal;
412 }
413
414 // pad end of table with unknowns
415 for (size_t i=sz; i<ctsz; i++) {
416 matchCostTable[i] = 0.5F;
417 mismatchCostTable[i] = 0.5F;
418 }
419 }
420
421 {
422 decoder.initializeStates();
423 // Each sample of history[] carries its history.
424 // So we only have to process every iRate-th sample.
425 const unsigned step = decoder.iRate();
426 // input pointer
427 const uint32_t *ip = history + step - 1;
428 // output pointers
429 char *op = target.begin();
430 const char *const opt = target.end();
431 // table pointers
432 const float* match = matchCostTable;
433 const float* mismatch = mismatchCostTable;
434 size_t oCount = 0;
435 while (op<opt) {
436 // Viterbi algorithm
437 assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
438 assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
439 const ViterbiTCH_AFS12_2::vCand &minCost = decoder.step(*ip, match, mismatch);
440 ip += step;
441 match += step;
442 mismatch += step;
443 // output
444 if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
445 oCount++;
446 }
447 }
448}
449
450
451
452ViterbiTCH_AFS10_2::ViterbiTCH_AFS10_2()
453{
454 assert(mDeferral < 32);
455 mCoeffs[0] = 0x01b;
456 mCoeffsFB[0] = 0x01f;
457 mCoeffs[1] = 0x015;
458 mCoeffsFB[1] = 0x01f;
459 mCoeffs[2] = 0x01f;
460 mCoeffsFB[2] = 0x01f;
461 for (unsigned i = 0; i < mIRate; i++) {
462 computeStateTables(i);
463 }
464 computeGeneratorTable();
465}
466
467
468
469
470void ViterbiTCH_AFS10_2::initializeStates()
471{
472 for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
473 for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
474}
475
476
477
478void ViterbiTCH_AFS10_2::computeStateTables(unsigned g)
479{
480 assert(g<mIRate);
481 for (unsigned state=0; state<mIStates; state++) {
482 for (unsigned in = 0; in <= 1; in++) {
483 uint32_t inputVal = (state<<1) | in;
484 mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
485 }
486 }
487}
488
489void ViterbiTCH_AFS10_2::computeGeneratorTable()
490{
491 for (unsigned index=0; index<mIStates*2; index++) {
492 uint32_t t = 0;
493 for (unsigned i = 0; i < mIRate; i++) {
494 t = (t << 1) | mStateTable[i][index];
495 }
496 mGeneratorTable[index] = t;
497 }
498}
499
500
501
502
503
504
505void ViterbiTCH_AFS10_2::branchCandidates()
506{
507 // Branch to generate new input states.
508 const vCand *sp = mSurvivors;
509 for (unsigned cand=0; cand<mNumCands; cand+=2) {
510 uint32_t oStateShifted = (sp->oState) << mIRate;
511 for (unsigned in = 0; in <= 1; in++) {
512 mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
513 mCandidates[cand+in].cost = sp->cost;
514 uint32_t outputs = oStateShifted;
515 for (unsigned out = 0; out < mIRate; out++) {
516 char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
517 char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
518 mCandidates[cand+in].rState[out] = rState;
519 outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
520 }
521 mCandidates[cand+in].oState = outputs;
522 }
523 sp++;
524 }
525}
526
527
528void ViterbiTCH_AFS10_2::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
529{
530 const float *cTab[2] = {matchCost,mismatchCost};
531 for (unsigned i=0; i<mNumCands; i++) {
532 vCand& thisCand = mCandidates[i];
533 const unsigned mismatched = inSample ^ (thisCand.oState);
534 for (unsigned i = 0; i < mIRate; i++) {
535 thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
536 }
537 }
538}
539
540
541void ViterbiTCH_AFS10_2::pruneCandidates()
542{
543 const vCand* c1 = mCandidates; // 0-prefix
544 const vCand* c2 = mCandidates + mIStates; // 1-prefix
545 for (unsigned i=0; i<mIStates; i++) {
546 if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
547 else mSurvivors[i] = c2[i];
548 }
549}
550
551
552const ViterbiTCH_AFS10_2::vCand& ViterbiTCH_AFS10_2::minCost() const
553{
554 int minIndex = 0;
555 float minCost = mSurvivors[0].cost;
556 for (unsigned i=1; i<mIStates; i++) {
557 const float thisCost = mSurvivors[i].cost;
558 if (thisCost>=minCost) continue;
559 minCost = thisCost;
560 minIndex=i;
561 }
562 return mSurvivors[minIndex];
563}
564
565
566const ViterbiTCH_AFS10_2::vCand& ViterbiTCH_AFS10_2::step(uint32_t inSample, const float *probs, const float *iprobs)
567{
568 branchCandidates();
569 getSoftCostMetrics(inSample,probs,iprobs);
570 pruneCandidates();
571 return minCost();
572}
573
574
575
576void ViterbiTCH_AFS10_2::decode(const SoftVector &in, BitVector& target)
577{
578 ViterbiTCH_AFS10_2 &decoder = *this;
579 const size_t sz = in.size() - 12;
580 const unsigned deferral = decoder.deferral();
581 const size_t ctsz = sz + deferral*decoder.iRate();
582 assert(sz == decoder.iRate()*target.size());
583
584 // Build a "history" array where each element contains the full history.
585 uint32_t history[ctsz];
586 {
587 BitVector bits = in.sliced();
588 uint32_t accum = 0;
589 for (size_t i=0; i<sz; i++) {
590 accum = (accum<<1) | bits.bit(i);
591 history[i] = accum;
592 }
593 // Repeat last bit at the end.
594 for (size_t i=sz; i<ctsz; i++) {
595 accum = (accum<<1) | (accum & 0x01);
596 history[i] = accum;
597 }
598 }
599
600 // Precompute metric tables.
601 float matchCostTable[ctsz];
602 float mismatchCostTable[ctsz];
603 {
604 const float *dp = in.begin();
605 for (size_t i=0; i<sz; i++) {
606 // pVal is the probability that a bit is correct.
607 // ipVal is the probability that a bit is incorrect.
608 float pVal = dp[i];
609 if (pVal>0.5F) pVal = 1.0F-pVal;
610 float ipVal = 1.0F-pVal;
611 // This is a cheap approximation to an ideal cost function.
612 if (pVal<0.01F) pVal = 0.01;
613 if (ipVal<0.01F) ipVal = 0.01;
614 matchCostTable[i] = 0.25F/ipVal;
615 mismatchCostTable[i] = 0.25F/pVal;
616 }
617
618 // pad end of table with unknowns
619 for (size_t i=sz; i<ctsz; i++) {
620 matchCostTable[i] = 0.5F;
621 mismatchCostTable[i] = 0.5F;
622 }
623 }
624
625 {
626 decoder.initializeStates();
627 // Each sample of history[] carries its history.
628 // So we only have to process every iRate-th sample.
629 const unsigned step = decoder.iRate();
630 // input pointer
631 const uint32_t *ip = history + step - 1;
632 // output pointers
633 char *op = target.begin();
634 const char *const opt = target.end();
635 // table pointers
636 const float* match = matchCostTable;
637 const float* mismatch = mismatchCostTable;
638 size_t oCount = 0;
639 while (op<opt) {
640 // Viterbi algorithm
641 assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
642 assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
643 const ViterbiTCH_AFS10_2::vCand &minCost = decoder.step(*ip, match, mismatch);
644 ip += step;
645 match += step;
646 mismatch += step;
647 // output
648 if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
649 oCount++;
650 }
651 }
652}
653
654
655
656ViterbiTCH_AFS7_95::ViterbiTCH_AFS7_95()
657{
658 assert(mDeferral < 32);
659 mCoeffs[0] = 0x06d;
660 mCoeffsFB[0] = 0x06d;
661 mCoeffs[1] = 0x053;
662 mCoeffsFB[1] = 0x06d;
663 mCoeffs[2] = 0x05f;
664 mCoeffsFB[2] = 0x06d;
665 for (unsigned i = 0; i < mIRate; i++) {
666 computeStateTables(i);
667 }
668 computeGeneratorTable();
669}
670
671
672
673
674void ViterbiTCH_AFS7_95::initializeStates()
675{
676 for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
677 for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
678}
679
680
681
682void ViterbiTCH_AFS7_95::computeStateTables(unsigned g)
683{
684 assert(g<mIRate);
685 for (unsigned state=0; state<mIStates; state++) {
686 for (unsigned in = 0; in <= 1; in++) {
687 uint32_t inputVal = (state<<1) | in;
688 mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
689 }
690 }
691}
692
693void ViterbiTCH_AFS7_95::computeGeneratorTable()
694{
695 for (unsigned index=0; index<mIStates*2; index++) {
696 uint32_t t = 0;
697 for (unsigned i = 0; i < mIRate; i++) {
698 t = (t << 1) | mStateTable[i][index];
699 }
700 mGeneratorTable[index] = t;
701 }
702}
703
704
705
706
707
708
709void ViterbiTCH_AFS7_95::branchCandidates()
710{
711 // Branch to generate new input states.
712 const vCand *sp = mSurvivors;
713 for (unsigned cand=0; cand<mNumCands; cand+=2) {
714 uint32_t oStateShifted = (sp->oState) << mIRate;
715 for (unsigned in = 0; in <= 1; in++) {
716 mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
717 mCandidates[cand+in].cost = sp->cost;
718 uint32_t outputs = oStateShifted;
719 for (unsigned out = 0; out < mIRate; out++) {
720 char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
721 char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
722 mCandidates[cand+in].rState[out] = rState;
723 outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
724 }
725 mCandidates[cand+in].oState = outputs;
726 }
727 sp++;
728 }
729}
730
731
732void ViterbiTCH_AFS7_95::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
733{
734 const float *cTab[2] = {matchCost,mismatchCost};
735 for (unsigned i=0; i<mNumCands; i++) {
736 vCand& thisCand = mCandidates[i];
737 const unsigned mismatched = inSample ^ (thisCand.oState);
738 for (unsigned i = 0; i < mIRate; i++) {
739 thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
740 }
741 }
742}
743
744
745void ViterbiTCH_AFS7_95::pruneCandidates()
746{
747 const vCand* c1 = mCandidates; // 0-prefix
748 const vCand* c2 = mCandidates + mIStates; // 1-prefix
749 for (unsigned i=0; i<mIStates; i++) {
750 if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
751 else mSurvivors[i] = c2[i];
752 }
753}
754
755
756const ViterbiTCH_AFS7_95::vCand& ViterbiTCH_AFS7_95::minCost() const
757{
758 int minIndex = 0;
759 float minCost = mSurvivors[0].cost;
760 for (unsigned i=1; i<mIStates; i++) {
761 const float thisCost = mSurvivors[i].cost;
762 if (thisCost>=minCost) continue;
763 minCost = thisCost;
764 minIndex=i;
765 }
766 return mSurvivors[minIndex];
767}
768
769
770const ViterbiTCH_AFS7_95::vCand& ViterbiTCH_AFS7_95::step(uint32_t inSample, const float *probs, const float *iprobs)
771{
772 branchCandidates();
773 getSoftCostMetrics(inSample,probs,iprobs);
774 pruneCandidates();
775 return minCost();
776}
777
778
779
780void ViterbiTCH_AFS7_95::decode(const SoftVector &in, BitVector& target)
781{
782 ViterbiTCH_AFS7_95 &decoder = *this;
783 const size_t sz = in.size() - 18;
784 const unsigned deferral = decoder.deferral();
785 const size_t ctsz = sz + deferral*decoder.iRate();
786 assert(sz == decoder.iRate()*target.size());
787
788 // Build a "history" array where each element contains the full history.
789 uint32_t history[ctsz];
790 {
791 BitVector bits = in.sliced();
792 uint32_t accum = 0;
793 for (size_t i=0; i<sz; i++) {
794 accum = (accum<<1) | bits.bit(i);
795 history[i] = accum;
796 }
797 // Repeat last bit at the end.
798 for (size_t i=sz; i<ctsz; i++) {
799 accum = (accum<<1) | (accum & 0x01);
800 history[i] = accum;
801 }
802 }
803
804 // Precompute metric tables.
805 float matchCostTable[ctsz];
806 float mismatchCostTable[ctsz];
807 {
808 const float *dp = in.begin();
809 for (size_t i=0; i<sz; i++) {
810 // pVal is the probability that a bit is correct.
811 // ipVal is the probability that a bit is incorrect.
812 float pVal = dp[i];
813 if (pVal>0.5F) pVal = 1.0F-pVal;
814 float ipVal = 1.0F-pVal;
815 // This is a cheap approximation to an ideal cost function.
816 if (pVal<0.01F) pVal = 0.01;
817 if (ipVal<0.01F) ipVal = 0.01;
818 matchCostTable[i] = 0.25F/ipVal;
819 mismatchCostTable[i] = 0.25F/pVal;
820 }
821
822 // pad end of table with unknowns
823 for (size_t i=sz; i<ctsz; i++) {
824 matchCostTable[i] = 0.5F;
825 mismatchCostTable[i] = 0.5F;
826 }
827 }
828
829 {
830 decoder.initializeStates();
831 // Each sample of history[] carries its history.
832 // So we only have to process every iRate-th sample.
833 const unsigned step = decoder.iRate();
834 // input pointer
835 const uint32_t *ip = history + step - 1;
836 // output pointers
837 char *op = target.begin();
838 const char *const opt = target.end();
839 // table pointers
840 const float* match = matchCostTable;
841 const float* mismatch = mismatchCostTable;
842 size_t oCount = 0;
843 while (op<opt) {
844 // Viterbi algorithm
845 assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
846 assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
847 const ViterbiTCH_AFS7_95::vCand &minCost = decoder.step(*ip, match, mismatch);
848 ip += step;
849 match += step;
850 mismatch += step;
851 // output
852 if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
853 oCount++;
854 }
855 }
856}
857
858
859
860ViterbiTCH_AFS7_4::ViterbiTCH_AFS7_4()
861{
862 assert(mDeferral < 32);
863 mCoeffs[0] = 0x01b;
864 mCoeffsFB[0] = 0x01f;
865 mCoeffs[1] = 0x015;
866 mCoeffsFB[1] = 0x01f;
867 mCoeffs[2] = 0x01f;
868 mCoeffsFB[2] = 0x01f;
869 for (unsigned i = 0; i < mIRate; i++) {
870 computeStateTables(i);
871 }
872 computeGeneratorTable();
873}
874
875
876
877
878void ViterbiTCH_AFS7_4::initializeStates()
879{
880 for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
881 for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
882}
883
884
885
886void ViterbiTCH_AFS7_4::computeStateTables(unsigned g)
887{
888 assert(g<mIRate);
889 for (unsigned state=0; state<mIStates; state++) {
890 for (unsigned in = 0; in <= 1; in++) {
891 uint32_t inputVal = (state<<1) | in;
892 mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
893 }
894 }
895}
896
897void ViterbiTCH_AFS7_4::computeGeneratorTable()
898{
899 for (unsigned index=0; index<mIStates*2; index++) {
900 uint32_t t = 0;
901 for (unsigned i = 0; i < mIRate; i++) {
902 t = (t << 1) | mStateTable[i][index];
903 }
904 mGeneratorTable[index] = t;
905 }
906}
907
908
909
910
911
912
913void ViterbiTCH_AFS7_4::branchCandidates()
914{
915 // Branch to generate new input states.
916 const vCand *sp = mSurvivors;
917 for (unsigned cand=0; cand<mNumCands; cand+=2) {
918 uint32_t oStateShifted = (sp->oState) << mIRate;
919 for (unsigned in = 0; in <= 1; in++) {
920 mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
921 mCandidates[cand+in].cost = sp->cost;
922 uint32_t outputs = oStateShifted;
923 for (unsigned out = 0; out < mIRate; out++) {
924 char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
925 char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
926 mCandidates[cand+in].rState[out] = rState;
927 outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
928 }
929 mCandidates[cand+in].oState = outputs;
930 }
931 sp++;
932 }
933}
934
935
936void ViterbiTCH_AFS7_4::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
937{
938 const float *cTab[2] = {matchCost,mismatchCost};
939 for (unsigned i=0; i<mNumCands; i++) {
940 vCand& thisCand = mCandidates[i];
941 const unsigned mismatched = inSample ^ (thisCand.oState);
942 for (unsigned i = 0; i < mIRate; i++) {
943 thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
944 }
945 }
946}
947
948
949void ViterbiTCH_AFS7_4::pruneCandidates()
950{
951 const vCand* c1 = mCandidates; // 0-prefix
952 const vCand* c2 = mCandidates + mIStates; // 1-prefix
953 for (unsigned i=0; i<mIStates; i++) {
954 if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
955 else mSurvivors[i] = c2[i];
956 }
957}
958
959
960const ViterbiTCH_AFS7_4::vCand& ViterbiTCH_AFS7_4::minCost() const
961{
962 int minIndex = 0;
963 float minCost = mSurvivors[0].cost;
964 for (unsigned i=1; i<mIStates; i++) {
965 const float thisCost = mSurvivors[i].cost;
966 if (thisCost>=minCost) continue;
967 minCost = thisCost;
968 minIndex=i;
969 }
970 return mSurvivors[minIndex];
971}
972
973
974const ViterbiTCH_AFS7_4::vCand& ViterbiTCH_AFS7_4::step(uint32_t inSample, const float *probs, const float *iprobs)
975{
976 branchCandidates();
977 getSoftCostMetrics(inSample,probs,iprobs);
978 pruneCandidates();
979 return minCost();
980}
981
982
983
984void ViterbiTCH_AFS7_4::decode(const SoftVector &in, BitVector& target)
985{
986 ViterbiTCH_AFS7_4 &decoder = *this;
987 const size_t sz = in.size() - 12;
988 const unsigned deferral = decoder.deferral();
989 const size_t ctsz = sz + deferral*decoder.iRate();
990 assert(sz == decoder.iRate()*target.size());
991
992 // Build a "history" array where each element contains the full history.
993 uint32_t history[ctsz];
994 {
995 BitVector bits = in.sliced();
996 uint32_t accum = 0;
997 for (size_t i=0; i<sz; i++) {
998 accum = (accum<<1) | bits.bit(i);
999 history[i] = accum;
1000 }
1001 // Repeat last bit at the end.
1002 for (size_t i=sz; i<ctsz; i++) {
1003 accum = (accum<<1) | (accum & 0x01);
1004 history[i] = accum;
1005 }
1006 }
1007
1008 // Precompute metric tables.
1009 float matchCostTable[ctsz];
1010 float mismatchCostTable[ctsz];
1011 {
1012 const float *dp = in.begin();
1013 for (size_t i=0; i<sz; i++) {
1014 // pVal is the probability that a bit is correct.
1015 // ipVal is the probability that a bit is incorrect.
1016 float pVal = dp[i];
1017 if (pVal>0.5F) pVal = 1.0F-pVal;
1018 float ipVal = 1.0F-pVal;
1019 // This is a cheap approximation to an ideal cost function.
1020 if (pVal<0.01F) pVal = 0.01;
1021 if (ipVal<0.01F) ipVal = 0.01;
1022 matchCostTable[i] = 0.25F/ipVal;
1023 mismatchCostTable[i] = 0.25F/pVal;
1024 }
1025
1026 // pad end of table with unknowns
1027 for (size_t i=sz; i<ctsz; i++) {
1028 matchCostTable[i] = 0.5F;
1029 mismatchCostTable[i] = 0.5F;
1030 }
1031 }
1032
1033 {
1034 decoder.initializeStates();
1035 // Each sample of history[] carries its history.
1036 // So we only have to process every iRate-th sample.
1037 const unsigned step = decoder.iRate();
1038 // input pointer
1039 const uint32_t *ip = history + step - 1;
1040 // output pointers
1041 char *op = target.begin();
1042 const char *const opt = target.end();
1043 // table pointers
1044 const float* match = matchCostTable;
1045 const float* mismatch = mismatchCostTable;
1046 size_t oCount = 0;
1047 while (op<opt) {
1048 // Viterbi algorithm
1049 assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
1050 assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
1051 const ViterbiTCH_AFS7_4::vCand &minCost = decoder.step(*ip, match, mismatch);
1052 ip += step;
1053 match += step;
1054 mismatch += step;
1055 // output
1056 if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
1057 oCount++;
1058 }
1059 }
1060}
1061
1062
1063
1064ViterbiTCH_AFS6_7::ViterbiTCH_AFS6_7()
1065{
1066 assert(mDeferral < 32);
1067 mCoeffs[0] = 0x01b;
1068 mCoeffsFB[0] = 0x01f;
1069 mCoeffs[1] = 0x015;
1070 mCoeffsFB[1] = 0x01f;
1071 mCoeffs[2] = 0x01f;
1072 mCoeffsFB[2] = 0x01f;
1073 mCoeffs[3] = 0x01f;
1074 mCoeffsFB[3] = 0x01f;
1075 for (unsigned i = 0; i < mIRate; i++) {
1076 computeStateTables(i);
1077 }
1078 computeGeneratorTable();
1079}
1080
1081
1082
1083
1084void ViterbiTCH_AFS6_7::initializeStates()
1085{
1086 for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
1087 for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
1088}
1089
1090
1091
1092void ViterbiTCH_AFS6_7::computeStateTables(unsigned g)
1093{
1094 assert(g<mIRate);
1095 for (unsigned state=0; state<mIStates; state++) {
1096 for (unsigned in = 0; in <= 1; in++) {
1097 uint32_t inputVal = (state<<1) | in;
1098 mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
1099 }
1100 }
1101}
1102
1103void ViterbiTCH_AFS6_7::computeGeneratorTable()
1104{
1105 for (unsigned index=0; index<mIStates*2; index++) {
1106 uint32_t t = 0;
1107 for (unsigned i = 0; i < mIRate; i++) {
1108 t = (t << 1) | mStateTable[i][index];
1109 }
1110 mGeneratorTable[index] = t;
1111 }
1112}
1113
1114
1115
1116
1117
1118
1119void ViterbiTCH_AFS6_7::branchCandidates()
1120{
1121 // Branch to generate new input states.
1122 const vCand *sp = mSurvivors;
1123 for (unsigned cand=0; cand<mNumCands; cand+=2) {
1124 uint32_t oStateShifted = (sp->oState) << mIRate;
1125 for (unsigned in = 0; in <= 1; in++) {
1126 mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
1127 mCandidates[cand+in].cost = sp->cost;
1128 uint32_t outputs = oStateShifted;
1129 for (unsigned out = 0; out < mIRate; out++) {
1130 char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
1131 char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
1132 mCandidates[cand+in].rState[out] = rState;
1133 outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
1134 }
1135 mCandidates[cand+in].oState = outputs;
1136 }
1137 sp++;
1138 }
1139}
1140
1141
1142void ViterbiTCH_AFS6_7::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
1143{
1144 const float *cTab[2] = {matchCost,mismatchCost};
1145 for (unsigned i=0; i<mNumCands; i++) {
1146 vCand& thisCand = mCandidates[i];
1147 const unsigned mismatched = inSample ^ (thisCand.oState);
1148 for (unsigned i = 0; i < mIRate; i++) {
1149 thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
1150 }
1151 }
1152}
1153
1154
1155void ViterbiTCH_AFS6_7::pruneCandidates()
1156{
1157 const vCand* c1 = mCandidates; // 0-prefix
1158 const vCand* c2 = mCandidates + mIStates; // 1-prefix
1159 for (unsigned i=0; i<mIStates; i++) {
1160 if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
1161 else mSurvivors[i] = c2[i];
1162 }
1163}
1164
1165
1166const ViterbiTCH_AFS6_7::vCand& ViterbiTCH_AFS6_7::minCost() const
1167{
1168 int minIndex = 0;
1169 float minCost = mSurvivors[0].cost;
1170 for (unsigned i=1; i<mIStates; i++) {
1171 const float thisCost = mSurvivors[i].cost;
1172 if (thisCost>=minCost) continue;
1173 minCost = thisCost;
1174 minIndex=i;
1175 }
1176 return mSurvivors[minIndex];
1177}
1178
1179
1180const ViterbiTCH_AFS6_7::vCand& ViterbiTCH_AFS6_7::step(uint32_t inSample, const float *probs, const float *iprobs)
1181{
1182 branchCandidates();
1183 getSoftCostMetrics(inSample,probs,iprobs);
1184 pruneCandidates();
1185 return minCost();
1186}
1187
1188
1189
1190void ViterbiTCH_AFS6_7::decode(const SoftVector &in, BitVector& target)
1191{
1192 ViterbiTCH_AFS6_7 &decoder = *this;
1193 const size_t sz = in.size() - 16;
1194 const unsigned deferral = decoder.deferral();
1195 const size_t ctsz = sz + deferral*decoder.iRate();
1196 assert(sz == decoder.iRate()*target.size());
1197
1198 // Build a "history" array where each element contains the full history.
1199 uint32_t history[ctsz];
1200 {
1201 BitVector bits = in.sliced();
1202 uint32_t accum = 0;
1203 for (size_t i=0; i<sz; i++) {
1204 accum = (accum<<1) | bits.bit(i);
1205 history[i] = accum;
1206 }
1207 // Repeat last bit at the end.
1208 for (size_t i=sz; i<ctsz; i++) {
1209 accum = (accum<<1) | (accum & 0x01);
1210 history[i] = accum;
1211 }
1212 }
1213
1214 // Precompute metric tables.
1215 float matchCostTable[ctsz];
1216 float mismatchCostTable[ctsz];
1217 {
1218 const float *dp = in.begin();
1219 for (size_t i=0; i<sz; i++) {
1220 // pVal is the probability that a bit is correct.
1221 // ipVal is the probability that a bit is incorrect.
1222 float pVal = dp[i];
1223 if (pVal>0.5F) pVal = 1.0F-pVal;
1224 float ipVal = 1.0F-pVal;
1225 // This is a cheap approximation to an ideal cost function.
1226 if (pVal<0.01F) pVal = 0.01;
1227 if (ipVal<0.01F) ipVal = 0.01;
1228 matchCostTable[i] = 0.25F/ipVal;
1229 mismatchCostTable[i] = 0.25F/pVal;
1230 }
1231
1232 // pad end of table with unknowns
1233 for (size_t i=sz; i<ctsz; i++) {
1234 matchCostTable[i] = 0.5F;
1235 mismatchCostTable[i] = 0.5F;
1236 }
1237 }
1238
1239 {
1240 decoder.initializeStates();
1241 // Each sample of history[] carries its history.
1242 // So we only have to process every iRate-th sample.
1243 const unsigned step = decoder.iRate();
1244 // input pointer
1245 const uint32_t *ip = history + step - 1;
1246 // output pointers
1247 char *op = target.begin();
1248 const char *const opt = target.end();
1249 // table pointers
1250 const float* match = matchCostTable;
1251 const float* mismatch = mismatchCostTable;
1252 size_t oCount = 0;
1253 while (op<opt) {
1254 // Viterbi algorithm
1255 assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
1256 assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
1257 const ViterbiTCH_AFS6_7::vCand &minCost = decoder.step(*ip, match, mismatch);
1258 ip += step;
1259 match += step;
1260 mismatch += step;
1261 // output
1262 if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
1263 oCount++;
1264 }
1265 }
1266}
1267
1268
1269
1270ViterbiTCH_AFS5_9::ViterbiTCH_AFS5_9()
1271{
1272 assert(mDeferral < 32);
1273 mCoeffs[0] = 0x06d;
1274 mCoeffsFB[0] = 0x05f;
1275 mCoeffs[1] = 0x053;
1276 mCoeffsFB[1] = 0x05f;
1277 mCoeffs[2] = 0x05f;
1278 mCoeffsFB[2] = 0x05f;
1279 mCoeffs[3] = 0x05f;
1280 mCoeffsFB[3] = 0x05f;
1281 for (unsigned i = 0; i < mIRate; i++) {
1282 computeStateTables(i);
1283 }
1284 computeGeneratorTable();
1285}
1286
1287
1288
1289
1290void ViterbiTCH_AFS5_9::initializeStates()
1291{
1292 for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
1293 for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
1294}
1295
1296
1297
1298void ViterbiTCH_AFS5_9::computeStateTables(unsigned g)
1299{
1300 assert(g<mIRate);
1301 for (unsigned state=0; state<mIStates; state++) {
1302 for (unsigned in = 0; in <= 1; in++) {
1303 uint32_t inputVal = (state<<1) | in;
1304 mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
1305 }
1306 }
1307}
1308
1309void ViterbiTCH_AFS5_9::computeGeneratorTable()
1310{
1311 for (unsigned index=0; index<mIStates*2; index++) {
1312 uint32_t t = 0;
1313 for (unsigned i = 0; i < mIRate; i++) {
1314 t = (t << 1) | mStateTable[i][index];
1315 }
1316 mGeneratorTable[index] = t;
1317 }
1318}
1319
1320
1321
1322
1323
1324
1325void ViterbiTCH_AFS5_9::branchCandidates()
1326{
1327 // Branch to generate new input states.
1328 const vCand *sp = mSurvivors;
1329 for (unsigned cand=0; cand<mNumCands; cand+=2) {
1330 uint32_t oStateShifted = (sp->oState) << mIRate;
1331 for (unsigned in = 0; in <= 1; in++) {
1332 mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
1333 mCandidates[cand+in].cost = sp->cost;
1334 uint32_t outputs = oStateShifted;
1335 for (unsigned out = 0; out < mIRate; out++) {
1336 char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
1337 char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
1338 mCandidates[cand+in].rState[out] = rState;
1339 outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
1340 }
1341 mCandidates[cand+in].oState = outputs;
1342 }
1343 sp++;
1344 }
1345}
1346
1347
1348void ViterbiTCH_AFS5_9::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
1349{
1350 const float *cTab[2] = {matchCost,mismatchCost};
1351 for (unsigned i=0; i<mNumCands; i++) {
1352 vCand& thisCand = mCandidates[i];
1353 const unsigned mismatched = inSample ^ (thisCand.oState);
1354 for (unsigned i = 0; i < mIRate; i++) {
1355 thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
1356 }
1357 }
1358}
1359
1360
1361void ViterbiTCH_AFS5_9::pruneCandidates()
1362{
1363 const vCand* c1 = mCandidates; // 0-prefix
1364 const vCand* c2 = mCandidates + mIStates; // 1-prefix
1365 for (unsigned i=0; i<mIStates; i++) {
1366 if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
1367 else mSurvivors[i] = c2[i];
1368 }
1369}
1370
1371
1372const ViterbiTCH_AFS5_9::vCand& ViterbiTCH_AFS5_9::minCost() const
1373{
1374 int minIndex = 0;
1375 float minCost = mSurvivors[0].cost;
1376 for (unsigned i=1; i<mIStates; i++) {
1377 const float thisCost = mSurvivors[i].cost;
1378 if (thisCost>=minCost) continue;
1379 minCost = thisCost;
1380 minIndex=i;
1381 }
1382 return mSurvivors[minIndex];
1383}
1384
1385
1386const ViterbiTCH_AFS5_9::vCand& ViterbiTCH_AFS5_9::step(uint32_t inSample, const float *probs, const float *iprobs)
1387{
1388 branchCandidates();
1389 getSoftCostMetrics(inSample,probs,iprobs);
1390 pruneCandidates();
1391 return minCost();
1392}
1393
1394
1395
1396void ViterbiTCH_AFS5_9::decode(const SoftVector &in, BitVector& target)
1397{
1398 ViterbiTCH_AFS5_9 &decoder = *this;
1399 const size_t sz = in.size() - 24;
1400 const unsigned deferral = decoder.deferral();
1401 const size_t ctsz = sz + deferral*decoder.iRate();
1402 assert(sz == decoder.iRate()*target.size());
1403
1404 // Build a "history" array where each element contains the full history.
1405 uint32_t history[ctsz];
1406 {
1407 BitVector bits = in.sliced();
1408 uint32_t accum = 0;
1409 for (size_t i=0; i<sz; i++) {
1410 accum = (accum<<1) | bits.bit(i);
1411 history[i] = accum;
1412 }
1413 // Repeat last bit at the end.
1414 for (size_t i=sz; i<ctsz; i++) {
1415 accum = (accum<<1) | (accum & 0x01);
1416 history[i] = accum;
1417 }
1418 }
1419
1420 // Precompute metric tables.
1421 float matchCostTable[ctsz];
1422 float mismatchCostTable[ctsz];
1423 {
1424 const float *dp = in.begin();
1425 for (size_t i=0; i<sz; i++) {
1426 // pVal is the probability that a bit is correct.
1427 // ipVal is the probability that a bit is incorrect.
1428 float pVal = dp[i];
1429 if (pVal>0.5F) pVal = 1.0F-pVal;
1430 float ipVal = 1.0F-pVal;
1431 // This is a cheap approximation to an ideal cost function.
1432 if (pVal<0.01F) pVal = 0.01;
1433 if (ipVal<0.01F) ipVal = 0.01;
1434 matchCostTable[i] = 0.25F/ipVal;
1435 mismatchCostTable[i] = 0.25F/pVal;
1436 }
1437
1438 // pad end of table with unknowns
1439 for (size_t i=sz; i<ctsz; i++) {
1440 matchCostTable[i] = 0.5F;
1441 mismatchCostTable[i] = 0.5F;
1442 }
1443 }
1444
1445 {
1446 decoder.initializeStates();
1447 // Each sample of history[] carries its history.
1448 // So we only have to process every iRate-th sample.
1449 const unsigned step = decoder.iRate();
1450 // input pointer
1451 const uint32_t *ip = history + step - 1;
1452 // output pointers
1453 char *op = target.begin();
1454 const char *const opt = target.end();
1455 // table pointers
1456 const float* match = matchCostTable;
1457 const float* mismatch = mismatchCostTable;
1458 size_t oCount = 0;
1459 while (op<opt) {
1460 // Viterbi algorithm
1461 assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
1462 assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
1463 const ViterbiTCH_AFS5_9::vCand &minCost = decoder.step(*ip, match, mismatch);
1464 ip += step;
1465 match += step;
1466 mismatch += step;
1467 // output
1468 if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
1469 oCount++;
1470 }
1471 }
1472}
1473
1474
1475
1476ViterbiTCH_AFS5_15::ViterbiTCH_AFS5_15()
1477{
1478 assert(mDeferral < 32);
1479 mCoeffs[0] = 0x01b;
1480 mCoeffsFB[0] = 0x01f;
1481 mCoeffs[1] = 0x01b;
1482 mCoeffsFB[1] = 0x01f;
1483 mCoeffs[2] = 0x015;
1484 mCoeffsFB[2] = 0x01f;
1485 mCoeffs[3] = 0x01f;
1486 mCoeffsFB[3] = 0x01f;
1487 mCoeffs[4] = 0x01f;
1488 mCoeffsFB[4] = 0x01f;
1489 for (unsigned i = 0; i < mIRate; i++) {
1490 computeStateTables(i);
1491 }
1492 computeGeneratorTable();
1493}
1494
1495
1496
1497
1498void ViterbiTCH_AFS5_15::initializeStates()
1499{
1500 for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
1501 for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
1502}
1503
1504
1505
1506void ViterbiTCH_AFS5_15::computeStateTables(unsigned g)
1507{
1508 assert(g<mIRate);
1509 for (unsigned state=0; state<mIStates; state++) {
1510 for (unsigned in = 0; in <= 1; in++) {
1511 uint32_t inputVal = (state<<1) | in;
1512 mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
1513 }
1514 }
1515}
1516
1517void ViterbiTCH_AFS5_15::computeGeneratorTable()
1518{
1519 for (unsigned index=0; index<mIStates*2; index++) {
1520 uint32_t t = 0;
1521 for (unsigned i = 0; i < mIRate; i++) {
1522 t = (t << 1) | mStateTable[i][index];
1523 }
1524 mGeneratorTable[index] = t;
1525 }
1526}
1527
1528
1529
1530
1531
1532
1533void ViterbiTCH_AFS5_15::branchCandidates()
1534{
1535 // Branch to generate new input states.
1536 const vCand *sp = mSurvivors;
1537 for (unsigned cand=0; cand<mNumCands; cand+=2) {
1538 uint32_t oStateShifted = (sp->oState) << mIRate;
1539 for (unsigned in = 0; in <= 1; in++) {
1540 mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
1541 mCandidates[cand+in].cost = sp->cost;
1542 uint32_t outputs = oStateShifted;
1543 for (unsigned out = 0; out < mIRate; out++) {
1544 char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
1545 char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
1546 mCandidates[cand+in].rState[out] = rState;
1547 outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
1548 }
1549 mCandidates[cand+in].oState = outputs;
1550 }
1551 sp++;
1552 }
1553}
1554
1555
1556void ViterbiTCH_AFS5_15::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
1557{
1558 const float *cTab[2] = {matchCost,mismatchCost};
1559 for (unsigned i=0; i<mNumCands; i++) {
1560 vCand& thisCand = mCandidates[i];
1561 const unsigned mismatched = inSample ^ (thisCand.oState);
1562 for (unsigned i = 0; i < mIRate; i++) {
1563 thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
1564 }
1565 }
1566}
1567
1568
1569void ViterbiTCH_AFS5_15::pruneCandidates()
1570{
1571 const vCand* c1 = mCandidates; // 0-prefix
1572 const vCand* c2 = mCandidates + mIStates; // 1-prefix
1573 for (unsigned i=0; i<mIStates; i++) {
1574 if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
1575 else mSurvivors[i] = c2[i];
1576 }
1577}
1578
1579
1580const ViterbiTCH_AFS5_15::vCand& ViterbiTCH_AFS5_15::minCost() const
1581{
1582 int minIndex = 0;
1583 float minCost = mSurvivors[0].cost;
1584 for (unsigned i=1; i<mIStates; i++) {
1585 const float thisCost = mSurvivors[i].cost;
1586 if (thisCost>=minCost) continue;
1587 minCost = thisCost;
1588 minIndex=i;
1589 }
1590 return mSurvivors[minIndex];
1591}
1592
1593
1594const ViterbiTCH_AFS5_15::vCand& ViterbiTCH_AFS5_15::step(uint32_t inSample, const float *probs, const float *iprobs)
1595{
1596 branchCandidates();
1597 getSoftCostMetrics(inSample,probs,iprobs);
1598 pruneCandidates();
1599 return minCost();
1600}
1601
1602
1603
1604void ViterbiTCH_AFS5_15::decode(const SoftVector &in, BitVector& target)
1605{
1606 ViterbiTCH_AFS5_15 &decoder = *this;
1607 const size_t sz = in.size() - 20;
1608 const unsigned deferral = decoder.deferral();
1609 const size_t ctsz = sz + deferral*decoder.iRate();
1610 assert(sz == decoder.iRate()*target.size());
1611
1612 // Build a "history" array where each element contains the full history.
1613 uint32_t history[ctsz];
1614 {
1615 BitVector bits = in.sliced();
1616 uint32_t accum = 0;
1617 for (size_t i=0; i<sz; i++) {
1618 accum = (accum<<1) | bits.bit(i);
1619 history[i] = accum;
1620 }
1621 // Repeat last bit at the end.
1622 for (size_t i=sz; i<ctsz; i++) {
1623 accum = (accum<<1) | (accum & 0x01);
1624 history[i] = accum;
1625 }
1626 }
1627
1628 // Precompute metric tables.
1629 float matchCostTable[ctsz];
1630 float mismatchCostTable[ctsz];
1631 {
1632 const float *dp = in.begin();
1633 for (size_t i=0; i<sz; i++) {
1634 // pVal is the probability that a bit is correct.
1635 // ipVal is the probability that a bit is incorrect.
1636 float pVal = dp[i];
1637 if (pVal>0.5F) pVal = 1.0F-pVal;
1638 float ipVal = 1.0F-pVal;
1639 // This is a cheap approximation to an ideal cost function.
1640 if (pVal<0.01F) pVal = 0.01;
1641 if (ipVal<0.01F) ipVal = 0.01;
1642 matchCostTable[i] = 0.25F/ipVal;
1643 mismatchCostTable[i] = 0.25F/pVal;
1644 }
1645
1646 // pad end of table with unknowns
1647 for (size_t i=sz; i<ctsz; i++) {
1648 matchCostTable[i] = 0.5F;
1649 mismatchCostTable[i] = 0.5F;
1650 }
1651 }
1652
1653 {
1654 decoder.initializeStates();
1655 // Each sample of history[] carries its history.
1656 // So we only have to process every iRate-th sample.
1657 const unsigned step = decoder.iRate();
1658 // input pointer
1659 const uint32_t *ip = history + step - 1;
1660 // output pointers
1661 char *op = target.begin();
1662 const char *const opt = target.end();
1663 // table pointers
1664 const float* match = matchCostTable;
1665 const float* mismatch = mismatchCostTable;
1666 size_t oCount = 0;
1667 while (op<opt) {
1668 // Viterbi algorithm
1669 assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
1670 assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
1671 const ViterbiTCH_AFS5_15::vCand &minCost = decoder.step(*ip, match, mismatch);
1672 ip += step;
1673 match += step;
1674 mismatch += step;
1675 // output
1676 if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
1677 oCount++;
1678 }
1679 }
1680}
1681
1682
1683
1684ViterbiTCH_AFS4_75::ViterbiTCH_AFS4_75()
1685{
1686 assert(mDeferral < 32);
1687 mCoeffs[0] = 0x06d;
1688 mCoeffsFB[0] = 0x05f;
1689 mCoeffs[1] = 0x06d;
1690 mCoeffsFB[1] = 0x05f;
1691 mCoeffs[2] = 0x053;
1692 mCoeffsFB[2] = 0x05f;
1693 mCoeffs[3] = 0x05f;
1694 mCoeffsFB[3] = 0x05f;
1695 mCoeffs[4] = 0x05f;
1696 mCoeffsFB[4] = 0x05f;
1697 for (unsigned i = 0; i < mIRate; i++) {
1698 computeStateTables(i);
1699 }
1700 computeGeneratorTable();
1701}
1702
1703
1704
1705
1706void ViterbiTCH_AFS4_75::initializeStates()
1707{
1708 for (unsigned i=0; i<mIStates; i++) vitClear(mSurvivors[i]);
1709 for (unsigned i=0; i<mNumCands; i++) vitClear(mCandidates[i]);
1710}
1711
1712
1713
1714void ViterbiTCH_AFS4_75::computeStateTables(unsigned g)
1715{
1716 assert(g<mIRate);
1717 for (unsigned state=0; state<mIStates; state++) {
1718 for (unsigned in = 0; in <= 1; in++) {
1719 uint32_t inputVal = (state<<1) | in;
1720 mStateTable[g][inputVal] = applyPoly(inputVal, mCoeffs[g] ^ mCoeffsFB[g], mOrder+1) ^ in;
1721 }
1722 }
1723}
1724
1725void ViterbiTCH_AFS4_75::computeGeneratorTable()
1726{
1727 for (unsigned index=0; index<mIStates*2; index++) {
1728 uint32_t t = 0;
1729 for (unsigned i = 0; i < mIRate; i++) {
1730 t = (t << 1) | mStateTable[i][index];
1731 }
1732 mGeneratorTable[index] = t;
1733 }
1734}
1735
1736
1737
1738
1739
1740
1741void ViterbiTCH_AFS4_75::branchCandidates()
1742{
1743 // Branch to generate new input states.
1744 const vCand *sp = mSurvivors;
1745 for (unsigned cand=0; cand<mNumCands; cand+=2) {
1746 uint32_t oStateShifted = (sp->oState) << mIRate;
1747 for (unsigned in = 0; in <= 1; in++) {
1748 mCandidates[cand+in].iState = ((sp->iState) << 1) | in;
1749 mCandidates[cand+in].cost = sp->cost;
1750 uint32_t outputs = oStateShifted;
1751 for (unsigned out = 0; out < mIRate; out++) {
1752 char feedback = applyPoly(sp->rState[out], mCoeffsFB[out] ^ 1, mOrder+1);
1753 char rState = (((sp->rState[out]) ^ feedback) << 1) | in;
1754 mCandidates[cand+in].rState[out] = rState;
1755 outputs |= (mGeneratorTable[rState & mCMask] & (1 << (mIRate - out - 1)));
1756 }
1757 mCandidates[cand+in].oState = outputs;
1758 }
1759 sp++;
1760 }
1761}
1762
1763
1764void ViterbiTCH_AFS4_75::getSoftCostMetrics(const uint32_t inSample, const float *matchCost, const float *mismatchCost)
1765{
1766 const float *cTab[2] = {matchCost,mismatchCost};
1767 for (unsigned i=0; i<mNumCands; i++) {
1768 vCand& thisCand = mCandidates[i];
1769 const unsigned mismatched = inSample ^ (thisCand.oState);
1770 for (unsigned i = 0; i < mIRate; i++) {
1771 thisCand.cost += cTab[(mismatched>>i)&0x01][mIRate-i-1];
1772 }
1773 }
1774}
1775
1776
1777void ViterbiTCH_AFS4_75::pruneCandidates()
1778{
1779 const vCand* c1 = mCandidates; // 0-prefix
1780 const vCand* c2 = mCandidates + mIStates; // 1-prefix
1781 for (unsigned i=0; i<mIStates; i++) {
1782 if (c1[i].cost < c2[i].cost) mSurvivors[i] = c1[i];
1783 else mSurvivors[i] = c2[i];
1784 }
1785}
1786
1787
1788const ViterbiTCH_AFS4_75::vCand& ViterbiTCH_AFS4_75::minCost() const
1789{
1790 int minIndex = 0;
1791 float minCost = mSurvivors[0].cost;
1792 for (unsigned i=1; i<mIStates; i++) {
1793 const float thisCost = mSurvivors[i].cost;
1794 if (thisCost>=minCost) continue;
1795 minCost = thisCost;
1796 minIndex=i;
1797 }
1798 return mSurvivors[minIndex];
1799}
1800
1801
1802const ViterbiTCH_AFS4_75::vCand& ViterbiTCH_AFS4_75::step(uint32_t inSample, const float *probs, const float *iprobs)
1803{
1804 branchCandidates();
1805 getSoftCostMetrics(inSample,probs,iprobs);
1806 pruneCandidates();
1807 return minCost();
1808}
1809
1810
1811
1812void ViterbiTCH_AFS4_75::decode(const SoftVector &in, BitVector& target)
1813{
1814 ViterbiTCH_AFS4_75 &decoder = *this;
1815 const size_t sz = in.size() - 30;
1816 const unsigned deferral = decoder.deferral();
1817 const size_t ctsz = sz + deferral*decoder.iRate();
1818 assert(sz == decoder.iRate()*target.size());
1819
1820 // Build a "history" array where each element contains the full history.
1821 uint32_t history[ctsz];
1822 {
1823 BitVector bits = in.sliced();
1824 uint32_t accum = 0;
1825 for (size_t i=0; i<sz; i++) {
1826 accum = (accum<<1) | bits.bit(i);
1827 history[i] = accum;
1828 }
1829 // Repeat last bit at the end.
1830 for (size_t i=sz; i<ctsz; i++) {
1831 accum = (accum<<1) | (accum & 0x01);
1832 history[i] = accum;
1833 }
1834 }
1835
1836 // Precompute metric tables.
1837 float matchCostTable[ctsz];
1838 float mismatchCostTable[ctsz];
1839 {
1840 const float *dp = in.begin();
1841 for (size_t i=0; i<sz; i++) {
1842 // pVal is the probability that a bit is correct.
1843 // ipVal is the probability that a bit is incorrect.
1844 float pVal = dp[i];
1845 if (pVal>0.5F) pVal = 1.0F-pVal;
1846 float ipVal = 1.0F-pVal;
1847 // This is a cheap approximation to an ideal cost function.
1848 if (pVal<0.01F) pVal = 0.01;
1849 if (ipVal<0.01F) ipVal = 0.01;
1850 matchCostTable[i] = 0.25F/ipVal;
1851 mismatchCostTable[i] = 0.25F/pVal;
1852 }
1853
1854 // pad end of table with unknowns
1855 for (size_t i=sz; i<ctsz; i++) {
1856 matchCostTable[i] = 0.5F;
1857 mismatchCostTable[i] = 0.5F;
1858 }
1859 }
1860
1861 {
1862 decoder.initializeStates();
1863 // Each sample of history[] carries its history.
1864 // So we only have to process every iRate-th sample.
1865 const unsigned step = decoder.iRate();
1866 // input pointer
1867 const uint32_t *ip = history + step - 1;
1868 // output pointers
1869 char *op = target.begin();
1870 const char *const opt = target.end();
1871 // table pointers
1872 const float* match = matchCostTable;
1873 const float* mismatch = mismatchCostTable;
1874 size_t oCount = 0;
1875 while (op<opt) {
1876 // Viterbi algorithm
1877 assert(match-matchCostTable<(int)(sizeof(matchCostTable)/sizeof(matchCostTable[0])-1));
1878 assert(mismatch-mismatchCostTable<(int)(sizeof(mismatchCostTable)/sizeof(mismatchCostTable[0])-1));
1879 const ViterbiTCH_AFS4_75::vCand &minCost = decoder.step(*ip, match, mismatch);
1880 ip += step;
1881 match += step;
1882 mismatch += step;
1883 // output
1884 if (oCount>=deferral) *op++ = (minCost.iState >> deferral)&0x01;
1885 oCount++;
1886 }
1887 }
1888}
1889
1890
1891