This file is indexed.

/usr/include/shogun/distributions/HMM.h is in libshogun-dev 3.2.0-7.3build4.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

   1
   2
   3
   4
   5
   6
   7
   8
   9
  10
  11
  12
  13
  14
  15
  16
  17
  18
  19
  20
  21
  22
  23
  24
  25
  26
  27
  28
  29
  30
  31
  32
  33
  34
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
/*
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 3 of the License, or
 * (at your option) any later version.
 *
 * Written (W) 1999-2009 Soeren Sonnenburg
 * Written (W) 1999-2008 Gunnar Raetsch
 * Copyright (C) 1999-2009 Fraunhofer Institute FIRST and Max-Planck-Society
 */

#ifndef __CHMM_H__
#define __CHMM_H__

#include <shogun/mathematics/Math.h>
#include <shogun/lib/common.h>
#include <shogun/io/SGIO.h>
#include <shogun/lib/config.h>
#include <shogun/features/Features.h>
#include <shogun/features/StringFeatures.h>
#include <shogun/distributions/Distribution.h>

#include <stdio.h>

#ifdef USE_HMMPARALLEL
#define USE_HMMPARALLEL_STRUCTURES 1
#endif

namespace shogun
{
	class CFeatures;
	template <class ST> class CStringFeatures;
/**@name HMM specific types*/
//@{

/// type for alpha/beta caching table
typedef float64_t T_ALPHA_BETA_TABLE;

#ifndef DOXYGEN_SHOULD_SKIP_THIS
/// type for alpha/beta table
struct T_ALPHA_BETA
{
	/// dimension for that alpha/beta table was generated
	int32_t dimension;

	/// perversely huge alpha/beta cache table
	T_ALPHA_BETA_TABLE* table;

	/// true if table is valid
	bool updated;

	/// sum over all paths == model_probability for this dimension
	float64_t sum;
};
#endif // DOXYGEN_SHOULD_SKIP_THIS

/** type that is used for states.
 * Probably uint8_t is enough if you have at most 256 states,
 * however uint16_t/long/... is also possible although you might quickly run into memory problems
 */
#ifdef USE_BIGSTATES
typedef uint16_t T_STATES ;
#else
typedef uint8_t T_STATES ;
#endif
typedef T_STATES* P_STATES ;

//@}

/** Training type */
enum BaumWelchViterbiType
{
	/// standard baum welch
	BW_NORMAL,
	/// baum welch only for specified transitions
	BW_TRANS,
	/// baum welch only for defined transitions/observations
	BW_DEFINED,
	/// standard viterbi
	VIT_NORMAL,
	/// viterbi only for defined transitions/observations
	VIT_DEFINED
};


/** @brief class Model */
class Model
{
	public:
		/// Constructor - initializes all variables/structures
		Model();

		/// Destructor - cleans up
		virtual ~Model();

		/// sorts learn_a matrix
		inline void sort_learn_a()
		{
			CMath::sort(learn_a,2) ;
		}

		/// sorts learn_b matrix
		inline void sort_learn_b()
		{
			CMath::sort(learn_b,2) ;
		}

		/**@name read access functions.
		 * For learn arrays and const arrays
		 */
		//@{
		/// get entry out of learn_a matrix
		inline int32_t get_learn_a(int32_t line, int32_t column) const
		{
			return learn_a[line*2 + column];
		}

		/// get entry out of learn_b matrix
		inline int32_t get_learn_b(int32_t line, int32_t column) const
		{
			return learn_b[line*2 + column];
		}

		/// get entry out of learn_p vector
		inline int32_t get_learn_p(int32_t offset) const
		{
			return learn_p[offset];
		}

		/// get entry out of learn_q vector
		inline int32_t get_learn_q(int32_t offset) const
		{
			return learn_q[offset];
		}

		/// get entry out of const_a matrix
		inline int32_t get_const_a(int32_t line, int32_t column) const
		{
			return const_a[line*2 + column];
		}

		/// get entry out of const_b matrix
		inline int32_t get_const_b(int32_t line, int32_t column) const
		{
			return const_b[line*2 + column];
		}

		/// get entry out of const_p vector
		inline int32_t get_const_p(int32_t offset) const
		{
			return const_p[offset];
		}

		/// get entry out of const_q vector
		inline int32_t get_const_q(int32_t offset) const
		{
			return const_q[offset];
		}

		/// get value out of const_a_val vector
		inline float64_t get_const_a_val(int32_t line) const
		{
			return const_a_val[line];
		}

		/// get value out of const_b_val vector
		inline float64_t get_const_b_val(int32_t line) const
		{
			return const_b_val[line];
		}

		/// get value out of const_p_val vector
		inline float64_t get_const_p_val(int32_t offset) const
		{
			return const_p_val[offset];
		}

		/// get value out of const_q_val vector
		inline float64_t get_const_q_val(int32_t offset) const
		{
			return const_q_val[offset];
		}
#ifdef FIX_POS
		/// get value out of fix_pos_state array
		inline char get_fix_pos_state(int32_t pos, T_STATES state, T_STATES num_states)
		{
#ifdef HMM_DEBUG
			if ((pos<0)||(pos*num_states+state>65336))
				SG_DEBUG("index out of range in get_fix_pos_state(%i,%i,%i) \n", pos,state,num_states)
#endif
			return fix_pos_state[pos*num_states+state] ;
		}
#endif
		//@}

		/**@name write access functions
		 * For learn and const arrays
		 */
		//@{
		/// set value in learn_a matrix
		inline void set_learn_a(int32_t offset, int32_t value)
		{
			learn_a[offset]=value;
		}

		/// set value in learn_b matrix
		inline void set_learn_b(int32_t offset, int32_t value)
		{
			learn_b[offset]=value;
		}

		/// set value in learn_p vector
		inline void set_learn_p(int32_t offset, int32_t value)
		{
			learn_p[offset]=value;
		}

		/// set value in learn_q vector
		inline void set_learn_q(int32_t offset, int32_t value)
		{
			learn_q[offset]=value;
		}

		/// set value in const_a matrix
		inline void set_const_a(int32_t offset, int32_t value)
		{
			const_a[offset]=value;
		}

		/// set value in const_b matrix
		inline void set_const_b(int32_t offset, int32_t value)
		{
			const_b[offset]=value;
		}

		/// set value in const_p vector
		inline void set_const_p(int32_t offset, int32_t value)
		{
			const_p[offset]=value;
		}

		/// set value in const_q vector
		inline void set_const_q(int32_t offset, int32_t value)
		{
			const_q[offset]=value;
		}

		/// set value in const_a_val vector
		inline void set_const_a_val(int32_t offset, float64_t value)
		{
			const_a_val[offset]=value;
		}

		/// set value in const_b_val vector
		inline void set_const_b_val(int32_t offset, float64_t value)
		{
			const_b_val[offset]=value;
		}

		/// set value in const_p_val vector
		inline void set_const_p_val(int32_t offset, float64_t value)
		{
			const_p_val[offset]=value;
		}

		/// set value in const_q_val vector
		inline void set_const_q_val(int32_t offset, float64_t value)
		{
			const_q_val[offset]=value;
		}
#ifdef FIX_POS
		/// set value in fix_pos_state vector
		inline void set_fix_pos_state(
			int32_t pos, T_STATES state, T_STATES num_states, char value)
		{
#ifdef HMM_DEBUG
			if ((pos<0)||(pos*num_states+state>65336))
				SG_DEBUG("index out of range in set_fix_pos_state(%i,%i,%i,%i) [%i]\n", pos,state,num_states,(int)value, pos*num_states+state)
#endif
			fix_pos_state[pos*num_states+state]=value;
			if (value==FIX_ALLOWED)
				for (int32_t i=0; i<num_states; i++)
					if (get_fix_pos_state(pos,i,num_states)==FIX_DEFAULT)
						set_fix_pos_state(pos,i,num_states,FIX_DISALLOWED) ;
		}
		//@}

		/// FIX_DISALLOWED - state is forbidden and will be penalized with DISALLOWED_PENALTY
		const static char FIX_DISALLOWED ;

		/// FIX_ALLOWED - state is allowed
		const static char FIX_ALLOWED ;

		/// FIX_DEFAULT - default value
		const static char FIX_DEFAULT ;

		/// DISALLOWED_PENALTY - states in FIX_DISALLOWED will be penalized with this value
		const static float64_t DISALLOWED_PENALTY ;
#endif
	protected:
		/**@name learn arrays.
		 * Everything that is to be learned is enumerated here.
		 * All values will be inititialized with random values
		 * and normalized to satisfy stochasticity.
		 */
		//@{
		/// transitions to be learned
		int32_t* learn_a;

		/// emissions to be learned
		int32_t* learn_b;

		/// start states to be learned
		int32_t* learn_p;

		/// end states to be learned
		int32_t* learn_q;
		//@}

		/**@name constant arrays.
		 * These arrays hold constant fields. All values that
		 * are not constant and will not be learned are initialized
		 * with 0.
		 */
		//@{
		/// transitions that have constant probability
		int32_t* const_a;

		/// emissions that have constant probability
		int32_t* const_b;

		/// start states that have constant probability
		int32_t* const_p;

		/// end states that have constant probability
		int32_t* const_q;


		/// values for transitions that have constant probability
		float64_t* const_a_val;

		/// values for emissions that have constant probability
		float64_t* const_b_val;

		/// values for start states that have constant probability
		float64_t* const_p_val;

		/// values for end states that have constant probability
		float64_t* const_q_val;

#ifdef FIX_POS
		/** states in whose the model has to be at specific times/states which the model has to avoid.
		 * only used in viterbi
		 */
		char* fix_pos_state;
#endif
		//@}
};


/** @brief Hidden Markov Model.
 *
 * Structure and Function collection.
 * This Class implements a Hidden Markov Model.
 * For a tutorial on HMMs see Rabiner et.al A Tutorial on Hidden Markov Models
 * and Selected Applications in Speech Recognition, 1989
 *
 * Several functions for tasks such as training,reading/writing models, reading observations,
 * calculation of derivatives are supplied.
 */
class CHMM : public CDistribution
{
	private:

		T_STATES trans_list_len ;
		T_STATES **trans_list_forward  ;
		T_STATES *trans_list_forward_cnt  ;
		float64_t **trans_list_forward_val ;
		T_STATES **trans_list_backward  ;
		T_STATES *trans_list_backward_cnt  ;
		bool mem_initialized ;

#ifdef USE_HMMPARALLEL_STRUCTURES

		/// Datatype that is used in parrallel computation of viterbi
		struct S_DIM_THREAD_PARAM
		{
			CHMM* hmm;
			int32_t dim;
			float64_t prob_sum;
		};

		/// Datatype that is used in parrallel baum welch model estimation
		struct S_BW_THREAD_PARAM
		{
			CHMM* hmm;
			int32_t dim_start;
			int32_t dim_stop;

			float64_t ret;

			float64_t* p_buf;
			float64_t* q_buf;
			float64_t* a_buf;
			float64_t* b_buf;
		};

		inline T_ALPHA_BETA & ALPHA_CACHE(int32_t dim) {
			return alpha_cache[dim%parallel->get_num_threads()] ; } ;
		inline T_ALPHA_BETA & BETA_CACHE(int32_t dim) {
			return beta_cache[dim%parallel->get_num_threads()] ; } ;
#ifdef USE_LOGSUMARRAY
		inline float64_t* ARRAYS(int32_t dim) {
			return arrayS[dim%parallel->get_num_threads()] ; } ;
#endif
		inline float64_t* ARRAYN1(int32_t dim) {
			return arrayN1[dim%parallel->get_num_threads()] ; } ;
		inline float64_t* ARRAYN2(int32_t dim) {
			return arrayN2[dim%parallel->get_num_threads()] ; } ;
		inline T_STATES* STATES_PER_OBSERVATION_PSI(int32_t dim) {
			return states_per_observation_psi[dim%parallel->get_num_threads()] ; } ;
		inline const T_STATES* STATES_PER_OBSERVATION_PSI(int32_t dim) const {
			return states_per_observation_psi[dim%parallel->get_num_threads()] ; } ;
		inline T_STATES* PATH(int32_t dim) {
			return path[dim%parallel->get_num_threads()] ; } ;
		inline bool & PATH_PROB_UPDATED(int32_t dim) {
			return path_prob_updated[dim%parallel->get_num_threads()] ; } ;
		inline int32_t & PATH_PROB_DIMENSION(int32_t dim) {
			return path_prob_dimension[dim%parallel->get_num_threads()] ; } ;
#else
		inline T_ALPHA_BETA & ALPHA_CACHE(int32_t /*dim*/) {
			return alpha_cache ; } ;
		inline T_ALPHA_BETA & BETA_CACHE(int32_t /*dim*/) {
			return beta_cache ; } ;
#ifdef USE_LOGSUMARRAY
		inline float64_t* ARRAYS(int32_t dim) {
			return arrayS ; } ;
#endif
		inline float64_t* ARRAYN1(int32_t /*dim*/) {
			return arrayN1 ; } ;
		inline float64_t* ARRAYN2(int32_t /*dim*/) {
			return arrayN2 ; } ;
		inline T_STATES* STATES_PER_OBSERVATION_PSI(int32_t /*dim*/) {
			return states_per_observation_psi ; } ;
		inline const T_STATES* STATES_PER_OBSERVATION_PSI(int32_t /*dim*/) const {
			return states_per_observation_psi ; } ;
		inline T_STATES* PATH(int32_t /*dim*/) {
			return path ; } ;
		inline bool & PATH_PROB_UPDATED(int32_t /*dim*/) {
			return path_prob_updated ; } ;
		inline int32_t & PATH_PROB_DIMENSION(int32_t /*dim*/) {
			return path_prob_dimension ; } ;
#endif

		/** Determines if algorithm has converged
		 * @param x value to check against y
		 * @param y value to check against x
		 */
		bool converged(float64_t x, float64_t y);

		/** Train definitions.
		 * Encapsulates Modelparameters that are constant/shall be learned.
		 * Consists of structures and access functions for learning only defined transitions and constants.
		 */

	public:
		/** default constructor  */
		CHMM();

		/**@name Constructor/Destructor and helper function
		*/
		//@{
		/** Constructor
		 * @param N number of states
		 * @param M number of emissions
		 * @param model model which holds definitions of states to be learned + consts
		 * @param PSEUDO Pseudo Value
		 */

		CHMM(
			int32_t N, int32_t M, Model* model, float64_t PSEUDO);
		CHMM(
			CStringFeatures<uint16_t>* obs, int32_t N, int32_t M,
			float64_t PSEUDO);
		CHMM(
			int32_t N, float64_t* p, float64_t* q, float64_t* a);
		CHMM(
			int32_t N, float64_t* p, float64_t* q, int32_t num_trans,
			float64_t* a_trans);

		/** Constructor - Initialization from model file.
		 * @param model_file Filehandle to a hmm model file (*.mod)
		 * @param PSEUDO Pseudo Value
		 */
		CHMM(FILE* model_file, float64_t PSEUDO);

		/// Constructor - Clone model h
		CHMM(CHMM* h);

		/// Destructor - Cleanup
		virtual ~CHMM();

		/** learn distribution
		 *
		 * @param data training data (parameter can be avoided if distance or
		 * kernel-based classifiers are used and distance/kernels are
		 * initialized with train data)
		 *
		 * @return whether training was successful
		 */
		virtual bool train(CFeatures* data=NULL);
		virtual int32_t get_num_model_parameters() { return N*(N+M+2); }
		virtual float64_t get_log_model_parameter(int32_t num_param);
		virtual float64_t get_log_derivative(int32_t num_param, int32_t num_example);
		virtual float64_t get_log_likelihood_example(int32_t num_example)
		{
			return model_probability(num_example);
		}

		/** initialization function - gets called by constructors.
		 * @param model model which holds definitions of states to be learned + consts
		 * @param PSEUDO Pseudo Value
		 * @param model_file Filehandle to a hmm model file (*.mod)
		 */
		bool initialize(Model* model, float64_t PSEUDO, FILE* model_file=NULL);
		//@}

		/// allocates memory that depends on N
		bool alloc_state_dependend_arrays();

		/// free memory that depends on N
		void free_state_dependend_arrays();

		/**@name probability functions.
		 * forward/backward/viterbi algorithm
		 */
		//@{
		/** forward algorithm.
		 * calculates Pr[O_0,O_1, ..., O_t, q_time=S_i| lambda] for 0<= time <= T-1
		 * Pr[O|lambda] for time > T
		 * @param time t
		 * @param state i
		 * @param dimension dimension of observation (observations are a matrix, where a row stands for one dimension i.e. 0_0,O_1,...,O_{T-1}
		 */
		float64_t forward_comp(int32_t time, int32_t state, int32_t dimension);
		float64_t forward_comp_old(
			int32_t time, int32_t state, int32_t dimension);

		/** backward algorithm.
		 * calculates Pr[O_t+1,O_t+2, ..., O_T-1| q_time=S_i, lambda] for 0<= time <= T-1
		 * Pr[O|lambda] for time >= T
		 * @param time t
		 * @param state i
		 * @param dimension dimension of observation (observations are a matrix, where a row stands for one dimension i.e. 0_0,O_1,...,O_{T-1}
		 */
		float64_t backward_comp(int32_t time, int32_t state, int32_t dimension);
		float64_t backward_comp_old(
			int32_t time, int32_t state, int32_t dimension);

		/** calculates probability of best state sequence s_0,...,s_T-1 AND path itself using viterbi algorithm.
		 * The path can be found in the array PATH(dimension)[0..T-1] afterwards
		 * @param dimension dimension of observation for which the most probable path is calculated (observations are a matrix, where a row stands for one dimension i.e. 0_0,O_1,...,O_{T-1}
		 */
		float64_t best_path(int32_t dimension);
		inline uint16_t get_best_path_state(int32_t dim, int32_t t)
		{
			ASSERT(PATH(dim))
			return PATH(dim)[t];
		}

		/// calculates probability that observations were generated
		/// by the model using forward algorithm.
		float64_t model_probability_comp() ;

		/// inline proxy for model probability.
		inline float64_t model_probability(int32_t dimension=-1)
		{
			//for faster calculation cache model probability
			if (dimension==-1)
			{
				if (mod_prob_updated)
					return mod_prob/p_observations->get_num_vectors();
				else
					return model_probability_comp()/p_observations->get_num_vectors();
			}
			else
				return forward(p_observations->get_vector_length(dimension), 0, dimension);
		}

		/** calculates likelihood for linear model
		 * on observations in MEMORY
		 * @param dimension dimension for which probability is calculated
		 * @return model probability
		 */
		inline float64_t linear_model_probability(int32_t dimension)
		{
			float64_t lik=0;
			int32_t len=0;
			bool free_vec;
			uint16_t* o=p_observations->get_feature_vector(dimension, len, free_vec);
			float64_t* obs_b=observation_matrix_b;

			ASSERT(N==len)

			for (int32_t i=0; i<N; i++)
			{
				lik+=obs_b[*o++];
				obs_b+=M;
			}
			p_observations->free_feature_vector(o, dimension, free_vec);
			return lik;

			// sorry, the above code is the speed optimized version of :
			/*	float64_t lik=0;

				for (int32_t i=0; i<N; i++)
				lik+=get_b(i, p_observations->get_feature(dimension, i));
				return lik;
				*/
			// : that
		}

		//@}

		/**@name convergence criteria
		 */
		inline bool set_iterations(int32_t num) { iterations=num; return true; }
		inline int32_t get_iterations() { return iterations; }
		inline bool set_epsilon (float64_t eps) { epsilon=eps; return true; }
		inline float64_t get_epsilon() { return epsilon; }

		/** interface for e.g. GUIHMM to run BaumWelch or Viterbi training
		 * @param type type of BaumWelch/Viterbi training
		 */
		bool baum_welch_viterbi_train(BaumWelchViterbiType type);

		/**@name model training
		*/
		//@{
		/** uses baum-welch-algorithm to train a fully connected HMM.
		 * @param train model from which the new model is estimated
		 */
		void estimate_model_baum_welch(CHMM* train);
		void estimate_model_baum_welch_trans(CHMM* train);

#ifdef USE_HMMPARALLEL_STRUCTURES
		void ab_buf_comp(
			float64_t* p_buf, float64_t* q_buf, float64_t* a_buf,
			float64_t* b_buf, int32_t dim) ;
#else
		void estimate_model_baum_welch_old(CHMM* train);
#endif

		/** uses baum-welch-algorithm to train the defined transitions etc.
		 * @param train model from which the new model is estimated
		 */
		void estimate_model_baum_welch_defined(CHMM* train);

		/** uses viterbi training to train a fully connected HMM
		 * @param train model from which the new model is estimated
		 */
		void estimate_model_viterbi(CHMM* train);

		/** uses viterbi training to train the defined transitions etc.
		 * @param train model from which the new model is estimated
		 */
		void estimate_model_viterbi_defined(CHMM* train);

		//@}

		/// estimates linear model from observations.
		bool linear_train(bool right_align=false);

		/// compute permutation entropy
		bool permutation_entropy(int32_t window_width, int32_t sequence_number);

		/**@name output functions.*/
		//@{
		/** prints the model parameters on screen.
		 * @param verbose when false only the model probability will be printed
		 * when true the whole model will be printed additionally
		 */
		void output_model(bool verbose=false);

		/// performs output_model only for the defined transitions etc
		void output_model_defined(bool verbose=false);
		//@}


		/**@name model helper functions.*/
		//@{

		/// normalize the model to satisfy stochasticity
		void normalize(bool keep_dead_states=false);

		/// increases the number of states by num_states
		/// the new a/b/p/q values are given the value default_val
		/// where 0<=default_val<=1
		void add_states(int32_t num_states, float64_t default_val=0);

		/// appends the append_model to the current hmm, i.e.
		/// two extra states are created. one is the end state of
		/// the current hmm with outputs cur_out (of size M) and
		/// the other state is the start state of the append_model.
		/// transition probability from state 1 to states 1 is 1
		bool append_model(
			CHMM* append_model, float64_t* cur_out, float64_t* app_out);

		/// appends the append_model to the current hmm, here
		/// no extra states are created. former q_i are multiplied by q_ji
		/// to give the a_ij from the current hmm to the append_model
		bool append_model(CHMM* append_model);

		/// set any model parameter with probability smaller than value to ZERO
		void chop(float64_t value);

		/// convert model to log probabilities
		void convert_to_log();

		/// init model with random values
		void init_model_random();

		/** init model according to const_x, learn_x.
		 * first model is initialized with 0 for all parameters
		 * then parameters in learn_x are initialized with random values
		 * finally const_x parameters are set and model is normalized.
		 */
		void init_model_defined();

		/// initializes model with log(PSEUDO)
		void clear_model();

		/// initializes only parameters in learn_x with log(PSEUDO)
		void clear_model_defined();

		/// copies the the modelparameters from l
		void copy_model(CHMM* l);

		/** invalidates all caches.
		 * this function has to be called when direct changes to the model have been made.
		 * this is necessary for the forward/backward/viterbi algorithms to not work with old tables
		 */
		void invalidate_model();

		/** get status
		 * @return true if everything is ok, else false
		 */
		inline bool get_status() const
		{
			return status;
		}

		/// returns current pseudo value
		inline float64_t get_pseudo() const
		{
			return PSEUDO ;
		}

		/// sets current pseudo value
		inline void set_pseudo(float64_t pseudo)
		{
			PSEUDO=pseudo ;
		}

#ifdef USE_HMMPARALLEL_STRUCTURES
		static void* bw_dim_prefetch(void * params);
		static void* bw_single_dim_prefetch(void * params);
		static void* vit_dim_prefetch(void * params);
#endif

#ifdef FIX_POS
		/** access function to set value in fix_pos_state vector in underlying model
		 * @see Model
		 */
		inline bool set_fix_pos_state(int32_t pos, T_STATES state, char value)
		{
			if (!model)
				return false ;
			model->set_fix_pos_state(pos, state, N, value) ;
			return true ;
		} ;
#endif
		//@}

		/** observation functions
		 * set/get observation matrix
		 */
		//@{
		/** set new observations
		 * sets the observation pointer and initializes observation-dependent caches
		 * if hmm is given, then the caches of the model hmm are used
		 */
		void set_observations(CStringFeatures<uint16_t>* obs, CHMM* hmm=NULL);

		/** set new observations
		 * only set the observation pointer and drop caches if there were any
		 */
		void set_observation_nocache(CStringFeatures<uint16_t>* obs);

		/// return observation pointer
		inline CStringFeatures<uint16_t>* get_observations()
		{
			SG_REF(p_observations);
			return p_observations;
		}
		//@}

		/**@name load/save functions.
		 * for observations/model/traindefinitions
		 */
		//@{
		/** read definitions file (learn_x,const_x) used for training.
		 * -format specs: definition_file (train.def)
		 % HMM-TRAIN - specification
		 % learn_a - elements in state_transition_matrix to be learned
		 % learn_b - elements in oberservation_per_state_matrix to be learned
		 %			note: each line stands for
		 %				state, observation(0), observation(1)...observation(NOW)
		 % learn_p - elements in initial distribution to be learned
		 % learn_q - elements in the end-state distribution to be learned
		 %
		 % const_x - specifies initial values of elements
		 %				rest is assumed to be 0.0
		 %
		 %	NOTE: IMPLICIT DEFINES:
		 %		define A 0
		 %		define C 1
		 %		define G 2
		 %		define T 3

		 learn_a=[ [int32_t,int32_t];
		 [int32_t,int32_t];
		 [int32_t,int32_t];
		 ........
		 [int32_t,int32_t];
		 [-1,-1];
		 ];

		 learn_b=[ [int32_t,int32_t,int32_t,...,int32_t];
		 [int32_t,int32_t,int32_t,...,int32_t];
		 [int32_t,int32_t,int32_t,...,int32_t];
		 ........
		 [int32_t,int32_t,int32_t,...,int32_t];
		 [-1,-1];
		 ];

		 learn_p= [ int32_t, ... , int32_t, -1 ];

		 learn_q= [ int32_t, ... , int32_t, -1 ];


		 const_a=[ [int32_t,int32_t,float64_t];
		 [int32_t,int32_t,float64_t];
		 [int32_t,int32_t,float64_t];
		 ........
		 [int32_t,int32_t,float64_t];
		 [-1,-1,-1];
		 ];

		 const_b=[ [int32_t,int32_t,int32_t,...,int32_t,float64_t];
		 [int32_t,int32_t,int32_t,...,int32_t,float64_t];
		 [int32_t,int32_t,int32_t,...,int32_t,<DOUBLE];
		 ........
		 [int32_t,int32_t,int32_t,...,int32_t,float64_t];
		 [-1,-1,-1];
		 ];

		 const_p[]=[ [int32_t, float64_t], ... , [int32_t,float64_t], [-1,-1] ];
		 const_q[]=[ [int32_t, float64_t], ... , [int32_t,float64_t], [-1,-1] ];

		 * @param file filehandle to definitions file
		 * @param verbose true for verbose messages
		 * @param initialize true to initialize to underlying HMM
		 */
		bool load_definitions(FILE* file, bool verbose, bool initialize=true);

		/** read model from file.
		 -format specs: model_file (model.hmm)
		 % HMM - specification
		 % N  - number of states
		 % M  - number of observation_tokens
		 % a is state_transition_matrix
		 % size(a)= [N,N]
		 %
		 % b is observation_per_state_matrix
		 % size(b)= [N,M]
		 %
		 % p is initial distribution
		 % size(p)= [1, N]

		 N=int32_t;
		 M=int32_t;

		 p=[float64_t,float64_t...float64_t];
		 q=[float64_t,float64_t...float64_t];

		 a=[ [float64_t,float64_t...float64_t];
		 [float64_t,float64_t...float64_t];
		 [float64_t,float64_t...float64_t];
		 [float64_t,float64_t...float64_t];
		 [float64_t,float64_t...float64_t];
		 ];

		 b=[ [float64_t,float64_t...float64_t];
		 [float64_t,float64_t...float64_t];
		 [float64_t,float64_t...float64_t];
		 [float64_t,float64_t...float64_t];
		 [float64_t,float64_t...float64_t];
		 ];
		 * @param file filehandle to model file
		 */
		bool load_model(FILE* file);

		/** save model to file.
		 * @param file filehandle to model file
		 */
		bool save_model(FILE* file);

		/** save model derivatives to file in ascii format.
		 * @param file filehandle
		 */
		bool save_model_derivatives(FILE* file);

		/** save model derivatives to file in binary format.
		 * @param file filehandle
		 */
		bool save_model_derivatives_bin(FILE* file);

		/** save model in binary format.
		 * @param file filehandle
		 */
		bool save_model_bin(FILE* file);

		/// numerically check whether derivates were calculated right
		bool check_model_derivatives() ;
		bool check_model_derivatives_combined() ;

		/** get viterbi path and path probability
		 * @param dim dimension for which to obtain best path
		 * @param prob likelihood of path
		 * @return viterbi path
		 */
		T_STATES* get_path(int32_t dim, float64_t& prob);

		/** save viterbi path in ascii format
		 * @param file filehandle
		 */
		bool save_path(FILE* file);

		/** save viterbi path in ascii format
		 * @param file filehandle
		 */
		bool save_path_derivatives(FILE* file);

		/** save viterbi path in binary format
		 * @param file filehandle
		 */
		bool save_path_derivatives_bin(FILE* file);

#ifdef USE_HMMDEBUG
		/// numerically check whether derivates were calculated right
		bool check_path_derivatives() ;
#endif //USE_HMMDEBUG

		/** save model probability in binary format
		 * @param file filehandle
		 */
		bool save_likelihood_bin(FILE* file);

		/** save model probability in ascii format
		 * @param file filehandle
		 */
		bool save_likelihood(FILE* file);
		//@}

		/**@name access functions for model parameters
		 * for all the arrays a,b,p,q,A,B,psi
		 * and scalar model parameters like N,M
		 */
		//@{

		/// access function for number of states N
		inline T_STATES get_N() const { return N ; }

		/// access function for number of observations M
		inline int32_t get_M() const { return M ; }

		/** access function for probability of end states
		 * @param offset index 0...N-1
		 * @param value value to be set
		 */
		inline void set_q(T_STATES offset, float64_t value)
		{
#ifdef HMM_DEBUG
			if (offset>=N)
				SG_DEBUG("index out of range in set_q(%i,%e) [%i]\n", offset,value,N)
#endif
			end_state_distribution_q[offset]=value;
		}

		/** access function for probability of first state
		 * @param offset index 0...N-1
		 * @param value value to be set
		 */
		inline void set_p(T_STATES offset, float64_t value)
		{
#ifdef HMM_DEBUG
			if (offset>=N)
				SG_DEBUG("index out of range in set_p(%i,.) [%i]\n", offset,N)
#endif
			initial_state_distribution_p[offset]=value;
		}

		/** access function for matrix A
		 * @param line_ row in matrix 0...N-1
		 * @param column column in matrix 0...N-1
		 * @param value value to be set
		 */
		inline void set_A(T_STATES line_, T_STATES column, float64_t value)
		{
#ifdef HMM_DEBUG
			if ((line_>N)||(column>N))
				SG_DEBUG("index out of range in set_A(%i,%i,.) [%i,%i]\n",line_,column,N,N)
#endif
			transition_matrix_A[line_+column*N]=value;
		}

		/** access function for matrix a
		 * @param line_ row in matrix 0...N-1
		 * @param column column in matrix 0...N-1
		 * @param value value to be set
		 */
		inline void set_a(T_STATES line_, T_STATES column, float64_t value)
		{
#ifdef HMM_DEBUG
			if ((line_>N)||(column>N))
				SG_DEBUG("index out of range in set_a(%i,%i,.) [%i,%i]\n",line_,column,N,N)
#endif
			transition_matrix_a[line_+column*N]=value; // look also best_path!
		}

		/** access function for matrix B
		 * @param line_ row in matrix 0...N-1
		 * @param column column in matrix 0...M-1
		 * @param value value to be set
		 */
		inline void set_B(T_STATES line_, uint16_t column, float64_t value)
		{
#ifdef HMM_DEBUG
			if ((line_>=N)||(column>=M))
				SG_DEBUG("index out of range in set_B(%i,%i) [%i,%i]\n", line_, column,N,M)
#endif
			observation_matrix_B[line_*M+column]=value;
		}

		/** access function for matrix b
		 * @param line_ row in matrix 0...N-1
		 * @param column column in matrix 0...M-1
		 * @param value value to be set
		 */
		inline void set_b(T_STATES line_, uint16_t column, float64_t value)
		{
#ifdef HMM_DEBUG
			if ((line_>=N)||(column>=M))
				SG_DEBUG("index out of range in set_b(%i,%i) [%i,%i]\n", line_, column,N,M)
#endif
			observation_matrix_b[line_*M+column]=value;
		}

		/** access function for backtracking table psi
		 * @param time time 0...T-1
		 * @param state state 0...N-1
		 * @param value value to be set
		 * @param dimension dimension of observations 0...DIMENSION-1
		 */
		inline void set_psi(
			int32_t time, T_STATES state, T_STATES value, int32_t dimension)
		{
#ifdef HMM_DEBUG
			if ((time>=p_observations->get_max_vector_length())||(state>N))
				SG_DEBUG("index out of range in set_psi(%i,%i,.) [%i,%i]\n",time,state,p_observations->get_max_vector_length(),N)
#endif
			STATES_PER_OBSERVATION_PSI(dimension)[time*N+state]=value;
		}

		/** access function for probability of end states
		 * @param offset index 0...N-1
		 * @return value at offset
		 */
		inline float64_t get_q(T_STATES offset) const
		{
#ifdef HMM_DEBUG
			if (offset>=N)
				SG_DEBUG("index out of range in %e=get_q(%i) [%i]\n", end_state_distribution_q[offset],offset,N)
#endif
			return end_state_distribution_q[offset];
		}

		/** access function for probability of initial states
		 * @param offset index 0...N-1
		 * @return value at offset
		 */
		inline float64_t get_p(T_STATES offset) const
		{
#ifdef HMM_DEBUG
			if (offset>=N)
				SG_DEBUG("index out of range in get_p(%i,.) [%i]\n", offset,N)
#endif
			return initial_state_distribution_p[offset];
		}

		/** access function for matrix A
		 * @param line_ row in matrix 0...N-1
		 * @param column column in matrix 0...N-1
		 * @return value at position line colum
		 */
		inline float64_t get_A(T_STATES line_, T_STATES column) const
		{
#ifdef HMM_DEBUG
			if ((line_>N)||(column>N))
				SG_DEBUG("index out of range in get_A(%i,%i) [%i,%i]\n",line_,column,N,N)
#endif
			return transition_matrix_A[line_+column*N];
		}

		/** access function for matrix a
		 * @param line_ row in matrix 0...N-1
		 * @param column column in matrix 0...N-1
		 * @return value at position line colum
		 */
		inline float64_t get_a(T_STATES line_, T_STATES column) const
		{
#ifdef HMM_DEBUG
			if ((line_>N)||(column>N))
				SG_DEBUG("index out of range in get_a(%i,%i) [%i,%i]\n",line_,column,N,N)
#endif
			return transition_matrix_a[line_+column*N]; // look also best_path()!
		}

		/** access function for matrix B
		 * @param line_ row in matrix 0...N-1
		 * @param column column in matrix 0...M-1
		 * @return value at position line colum
		 */
		inline float64_t get_B(T_STATES line_, uint16_t column) const
		{
#ifdef HMM_DEBUG
			if ((line_>=N)||(column>=M))
				SG_DEBUG("index out of range in get_B(%i,%i) [%i,%i]\n", line_, column,N,M)
#endif
			return observation_matrix_B[line_*M+column];
		}

		/** access function for matrix b
		 * @param line_ row in matrix 0...N-1
		 * @param column column in matrix 0...M-1
		 * @return value at position line colum
		 */
		inline float64_t get_b(T_STATES line_, uint16_t column) const
		{
#ifdef HMM_DEBUG
			if ((line_>=N)||(column>=M))
				SG_DEBUG("index out of range in get_b(%i,%i) [%i,%i]\n", line_, column,N,M)
#endif
			//SG_PRINT("idx %d\n", line_*M+column)
			return observation_matrix_b[line_*M+column];
		}

		/** access function for backtracking table psi
		 * @param time time 0...T-1
		 * @param state state 0...N-1
		 * @param dimension dimension of observations 0...DIMENSION-1
		 * @return state at specified time and position
		 */
		inline T_STATES get_psi(
			int32_t time, T_STATES state, int32_t dimension) const
		{
#ifdef HMM_DEBUG
			if ((time>=p_observations->get_max_vector_length())||(state>N))
				SG_DEBUG("index out of range in get_psi(%i,%i) [%i,%i]\n",time,state,p_observations->get_max_vector_length(),N)
#endif
			return STATES_PER_OBSERVATION_PSI(dimension)[time*N+state];
		}

		//@}

		/** @return object name */
		virtual const char* get_name() const { return "HMM"; }

	protected:
		/**@name model specific variables.
		 * these are p,q,a,b,N,M etc
		 */
		//@{
		/// number of observation symbols eg. ACGT -> 0123
		int32_t M;

		/// number of states
		int32_t N;

		/// define pseudocounts against overfitting
		float64_t PSEUDO;

		// line number during processing input files
		int32_t line;

		/// observation matrix
		CStringFeatures<uint16_t>* p_observations;

		//train definition for HMM
		Model* model;

		/// matrix  of absolute counts of transitions
		float64_t* transition_matrix_A;

		/// matrix of absolute counts of observations within each state
		float64_t* observation_matrix_B;

		/// transition matrix
		float64_t* transition_matrix_a;

		/// initial distribution of states
		float64_t* initial_state_distribution_p;

		/// distribution of end-states
		float64_t* end_state_distribution_q;

		/// distribution of observations within each state
		float64_t* observation_matrix_b;

		/// convergence criterion iterations
		int32_t iterations;
		int32_t iteration_count;

		/// convergence criterion epsilon
		float64_t epsilon;
		int32_t conv_it;

		/// probability of best path
		float64_t all_pat_prob;

		/// probability of best path
		float64_t pat_prob;

		/// probability of model
		float64_t mod_prob;

		/// true if model probability is up to date
		bool mod_prob_updated;

		/// true if path probability is up to date
		bool all_path_prob_updated;

		/// dimension for which path_deriv was calculated
		int32_t path_deriv_dimension;

		/// true if path derivative is up to date
		bool path_deriv_updated;

		// true if model is using log likelihood
		bool loglikelihood;

		// true->ok, false->error
		bool status;

		// true->stolen from other HMMs, false->got own
		bool reused_caches;
		//@}

#ifdef USE_HMMPARALLEL_STRUCTURES
		/** array of size N*parallel.get_num_threads() for temporary calculations */
		float64_t** arrayN1 /*[parallel.get_num_threads()]*/ ;
		/** array of size N*parallel.get_num_threads() for temporary calculations */
		float64_t** arrayN2 /*[parallel.get_num_threads()]*/ ;
#else //USE_HMMPARALLEL_STRUCTURES
		/** array of size N for temporary calculations */
		float64_t* arrayN1;
		/** array of size N for temporary calculations */
		float64_t* arrayN2;
#endif //USE_HMMPARALLEL_STRUCTURES

#ifdef USE_LOGSUMARRAY
#ifdef USE_HMMPARALLEL_STRUCTURES
		/** array for for temporary calculations of log_sum */
		float64_t** arrayS /*[parallel.get_num_threads()]*/;
#else
		/** array for for temporary calculations of log_sum */
		float64_t* arrayS;
#endif // USE_HMMPARALLEL_STRUCTURES
#endif // USE_LOGSUMARRAY

#ifdef USE_HMMPARALLEL_STRUCTURES

		/// cache for forward variables can be terrible HUGE O(T*N)
		T_ALPHA_BETA* alpha_cache /*[parallel.get_num_threads()]*/ ;
		/// cache for backward variables can be terrible HUGE O(T*N)
		T_ALPHA_BETA* beta_cache /*[parallel.get_num_threads()]*/ ;

		/// backtracking table for viterbi can be terrible HUGE O(T*N)
		T_STATES** states_per_observation_psi /*[parallel.get_num_threads()]*/ ;

		/// best path (=state sequence) through model
		T_STATES** path /*[parallel.get_num_threads()]*/ ;

		/// true if path probability is up to date
		bool* path_prob_updated /*[parallel.get_num_threads()]*/;

		/// dimension for which path_prob was calculated
		int32_t* path_prob_dimension /*[parallel.get_num_threads()]*/ ;

#else //USE_HMMPARALLEL_STRUCTURES
		/// cache for forward variables can be terrible HUGE O(T*N)
		T_ALPHA_BETA alpha_cache;
		/// cache for backward variables can be terrible HUGE O(T*N)
		T_ALPHA_BETA beta_cache;

		/// backtracking table for viterbi can be terrible HUGE O(T*N)
		T_STATES* states_per_observation_psi;

		/// best path (=state sequence) through model
		T_STATES* path;

		/// true if path probability is up to date
		bool path_prob_updated;

		/// dimension for which path_prob was calculated
		int32_t path_prob_dimension;

#endif //USE_HMMPARALLEL_STRUCTURES
		//@}

		/** GOTN */
		static const int32_t GOTN;
		/** GOTM */
		static const int32_t GOTM;
		/** GOTO */
		static const int32_t GOTO;
		/** GOTa */
		static const int32_t GOTa;
		/** GOTb */
		static const int32_t GOTb;
		/** GOTp */
		static const int32_t GOTp;
		/** GOTq */
		static const int32_t GOTq;

		/** GOTlearn_a */
		static const int32_t GOTlearn_a;
		/** GOTlearn_b */
		static const int32_t GOTlearn_b;
		/** GOTlearn_p */
		static const int32_t GOTlearn_p;
		/** GOTlearn_q */
		static const int32_t GOTlearn_q;
		/** GOTconst_a */
		static const int32_t GOTconst_a;
		/** GOTconst_b */
		static const int32_t GOTconst_b;
		/** GOTconst_p */
		static const int32_t GOTconst_p;
		/** GOTconst_q */
		static const int32_t GOTconst_q;

		public:
		/**@name functions for observations
		 * management and access functions for observation matrix
		 */
		//@{

		/// calculates probability of being in state i at time t for dimension
inline float64_t state_probability(
	int32_t time, int32_t state, int32_t dimension)
{
	return forward(time, state, dimension) + backward(time, state, dimension) - model_probability(dimension);
}

/// calculates probability of being in state i at time t and state j at time t+1 for dimension
inline float64_t transition_probability(
	int32_t time, int32_t state_i, int32_t state_j, int32_t dimension)
{
	return forward(time, state_i, dimension) +
		backward(time+1, state_j, dimension) +
		get_a(state_i,state_j) + get_b(state_j,p_observations->get_feature(dimension ,time+1)) - model_probability(dimension);
}

/**@name derivatives of model probabilities.
 * computes log dp(lambda)/d lambda_i
 * @param dimension dimension for that derivatives are calculated
 * @param i,j parameter specific
 */
//@{

/** computes log dp(lambda)/d b_ij for linear model
*/
inline float64_t linear_model_derivative(
	T_STATES i, uint16_t j, int32_t dimension)
{
	float64_t der=0;

	for (int32_t k=0; k<N; k++)
	{
		if (k!=i || p_observations->get_feature(dimension, k) != j)
			der+=get_b(k, p_observations->get_feature(dimension, k));
	}

	return der;
}

/** computes log dp(lambda)/d p_i.
 * backward path downto time 0 multiplied by observing first symbol in path at state i
 */
inline float64_t model_derivative_p(T_STATES i, int32_t dimension)
{
	return backward(0,i,dimension)+get_b(i, p_observations->get_feature(dimension, 0));
}

/** computes log dp(lambda)/d q_i.
 * forward path upto time T-1
 */
inline float64_t model_derivative_q(T_STATES i, int32_t dimension)
{
	return forward(p_observations->get_vector_length(dimension)-1,i,dimension) ;
}

/// computes log dp(lambda)/d a_ij.
inline float64_t model_derivative_a(T_STATES i, T_STATES j, int32_t dimension)
{
	float64_t sum=-CMath::INFTY;
	for (int32_t t=0; t<p_observations->get_vector_length(dimension)-1; t++)
		sum= CMath::logarithmic_sum(sum, forward(t, i, dimension) + backward(t+1, j, dimension) + get_b(j, p_observations->get_feature(dimension,t+1)));

	return sum;
}


/// computes log dp(lambda)/d b_ij.
inline float64_t model_derivative_b(T_STATES i, uint16_t j, int32_t dimension)
{
	float64_t sum=-CMath::INFTY;
	for (int32_t t=0; t<p_observations->get_vector_length(dimension); t++)
	{
		if (p_observations->get_feature(dimension,t)==j)
			sum= CMath::logarithmic_sum(sum, forward(t,i,dimension)+backward(t,i,dimension)-get_b(i,p_observations->get_feature(dimension,t)));
	}
	//if (sum==-CMath::INFTY)
	// SG_DEBUG("log derivative is -inf: dim=%i, state=%i, obs=%i\n",dimension, i, j)
	return sum;
}
//@}

/**@name derivatives of path probabilities.
 * computes d log p(lambda,best_path)/d lambda_i
 * @param dimension dimension for that derivatives are calculated
 * @param i,j parameter specific
 */
//@{

///computes d log p(lambda,best_path)/d p_i
inline float64_t path_derivative_p(T_STATES i, int32_t dimension)
{
	best_path(dimension);
	return (i==PATH(dimension)[0]) ? (exp(-get_p(PATH(dimension)[0]))) : (0) ;
}

/// computes d log p(lambda,best_path)/d q_i
inline float64_t path_derivative_q(T_STATES i, int32_t dimension)
{
	best_path(dimension);
	return (i==PATH(dimension)[p_observations->get_vector_length(dimension)-1]) ? (exp(-get_q(PATH(dimension)[p_observations->get_vector_length(dimension)-1]))) : 0 ;
}

/// computes d log p(lambda,best_path)/d a_ij
inline float64_t path_derivative_a(T_STATES i, T_STATES j, int32_t dimension)
{
	prepare_path_derivative(dimension) ;
	return (get_A(i,j)==0) ? (0) : (get_A(i,j)*exp(-get_a(i,j))) ;
}

/// computes d log p(lambda,best_path)/d b_ij
inline float64_t path_derivative_b(T_STATES i, uint16_t j, int32_t dimension)
{
	prepare_path_derivative(dimension) ;
	return (get_B(i,j)==0) ? (0) : (get_B(i,j)*exp(-get_b(i,j))) ;
}

//@}


protected:
	/**@name input helper functions.
	 * for reading model/definition/observation files
	 */
	//@{
	/// put a sequence of numbers into the buffer
	bool get_numbuffer(FILE* file, char* buffer, int32_t length);

	/// expect open bracket.
	void open_bracket(FILE* file);

	/// expect closing bracket
	void close_bracket(FILE* file);

	/// expect comma or space.
	bool comma_or_space(FILE* file);

	/// parse error messages
	inline void error(int32_t p_line, const char* str)
	{
		if (p_line)
			SG_ERROR("error in line %d %s\n", p_line, str)
		else
			SG_ERROR("error %s\n", str)
	}
	//@}

	/// initialization function that is called before path_derivatives are calculated
	inline void prepare_path_derivative(int32_t dim)
	{
		if (path_deriv_updated && (path_deriv_dimension==dim))
			return ;
		int32_t i,j,t ;
		best_path(dim);
		//initialize with zeros
		for (i=0; i<N; i++)
		{
			for (j=0; j<N; j++)
				set_A(i,j, 0);
			for (j=0; j<M; j++)
				set_B(i,j, 0);
		}

		//counting occurences for A and B
		for (t=0; t<p_observations->get_vector_length(dim)-1; t++)
		{
			set_A(PATH(dim)[t], PATH(dim)[t+1], get_A(PATH(dim)[t], PATH(dim)[t+1])+1);
			set_B(PATH(dim)[t], p_observations->get_feature(dim,t),  get_B(PATH(dim)[t], p_observations->get_feature(dim,t))+1);
		}
		set_B(PATH(dim)[p_observations->get_vector_length(dim)-1], p_observations->get_feature(dim,p_observations->get_vector_length(dim)-1),  get_B(PATH(dim)[p_observations->get_vector_length(dim)-1], p_observations->get_feature(dim,p_observations->get_vector_length(dim)-1)) + 1);
		path_deriv_dimension=dim ;
		path_deriv_updated=true ;
	} ;
	//@}

	/// inline proxies for forward pass
	inline float64_t forward(int32_t time, int32_t state, int32_t dimension)
	{
		if (time<1)
			time=0;

		if (ALPHA_CACHE(dimension).table && (dimension==ALPHA_CACHE(dimension).dimension) && ALPHA_CACHE(dimension).updated)
		{
			if (time<p_observations->get_vector_length(dimension))
				return ALPHA_CACHE(dimension).table[time*N+state];
			else
				return ALPHA_CACHE(dimension).sum;
		}
		else
			return forward_comp(time, state, dimension) ;
	}

	/// inline proxies for backward pass
	inline float64_t backward(int32_t time, int32_t state, int32_t dimension)
	{
		if (BETA_CACHE(dimension).table && (dimension==BETA_CACHE(dimension).dimension) && (BETA_CACHE(dimension).updated))
		{
			if (time<0)
				return BETA_CACHE(dimension).sum;
			if (time<p_observations->get_vector_length(dimension))
				return BETA_CACHE(dimension).table[time*N+state];
			else
				return -CMath::INFTY;
		}
		else
			return backward_comp(time, state, dimension) ;
	}

};
}
#endif