Você não pode selecionar mais de 25 tópicos Os tópicos devem começar com uma letra ou um número, podem incluir traços ('-') e podem ter até 35 caracteres.

kautodiff.c 72KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444
  1. #include "config.h"
  2. #include <stdlib.h>
  3. #include <assert.h>
  4. #include <stdarg.h>
  5. #include <string.h>
  6. #include <float.h>
  7. #include <math.h>
  8. #include "kautodiff.h"
  9. typedef struct {
  10. uint64_t s[2];
  11. double n_gset;
  12. int n_iset;
  13. volatile int lock;
  14. } kad_rng_t;
  15. /**********************
  16. * Graph construction *
  17. **********************/
  18. static inline kad_node_t *kad_new_core(int n_d, int op, int n_child)
  19. {
  20. kad_node_t *s;
  21. if (n_d >= KAD_MAX_DIM) return 0;
  22. s = (kad_node_t*)calloc(1, sizeof(kad_node_t));
  23. s->n_d = n_d, s->op = op, s->n_child = n_child;
  24. if (s->n_child) s->child = (kad_node_t**)calloc(s->n_child, sizeof(kad_node_t*));
  25. return s;
  26. }
  27. static inline kad_node_t *kad_vleaf(uint8_t flag, float *x, float *g, int n_d, va_list ap)
  28. {
  29. int i;
  30. kad_node_t *p;
  31. if (n_d > KAD_MAX_DIM) return 0;
  32. p = (kad_node_t*)calloc(1, sizeof(kad_node_t));
  33. p->n_d = n_d;
  34. for (i = 0; i < n_d; ++i)
  35. p->d[i] = va_arg(ap, int32_t);
  36. p->x = x, p->g = g, p->flag = flag;
  37. return p;
  38. }
  39. kad_node_t *kad_const(float *x, int n_d, ...)
  40. {
  41. kad_node_t *p;
  42. va_list ap;
  43. va_start(ap, n_d); p = kad_vleaf(KAD_CONST, x, 0, n_d, ap); va_end(ap);
  44. return p;
  45. }
  46. kad_node_t *kad_feed(int n_d, ...)
  47. {
  48. kad_node_t *p;
  49. va_list ap;
  50. va_start(ap, n_d); p = kad_vleaf(0, 0, 0, n_d, ap); va_end(ap);
  51. return p;
  52. }
  53. kad_node_t *kad_var(float *x, float *g, int n_d, ...)
  54. {
  55. kad_node_t *p;
  56. va_list ap;
  57. va_start(ap, n_d); p = kad_vleaf(KAD_VAR, x, g, n_d, ap); va_end(ap);
  58. return p;
  59. }
  60. static inline kad_node_t *kad_finalize_node(kad_node_t *s) /* a helper function */
  61. {
  62. int i;
  63. if (kad_op_list[s->op](s, KAD_SYNC_DIM) < 0) { /* check dimension */
  64. if (s->ptr) free(s->ptr);
  65. free(s->child); free(s);
  66. return 0;
  67. }
  68. for (i = 0; i < s->n_child; ++i)
  69. if (kad_is_back(s->child[i]))
  70. break;
  71. if (i < s->n_child) s->flag |= KAD_VAR;
  72. return s;
  73. }
  74. /********** Simple arithmetic **********/
  75. static inline kad_node_t *kad_op2_core(int op, kad_node_t *x, kad_node_t *y)
  76. {
  77. kad_node_t *s;
  78. s = kad_new_core(0, op, 2);
  79. s->child[0] = x, s->child[1] = y;
  80. return kad_finalize_node(s);
  81. }
  82. static inline kad_node_t *kad_op1_core(int op, kad_node_t *x)
  83. {
  84. kad_node_t *s;
  85. s = kad_new_core(0, op, 1);
  86. s->child[0] = x;
  87. return kad_finalize_node(s);
  88. }
  89. #define KAD_FUNC_OP2(fname, op) kad_node_t *fname(kad_node_t *x, kad_node_t *y) { return kad_op2_core((op), x, y); }
  90. KAD_FUNC_OP2(kad_add, 1)
  91. KAD_FUNC_OP2(kad_sub, 23)
  92. KAD_FUNC_OP2(kad_mul, 2)
  93. KAD_FUNC_OP2(kad_cmul, 3)
  94. KAD_FUNC_OP2(kad_matmul, 9)
  95. KAD_FUNC_OP2(kad_ce_multi, 13)
  96. KAD_FUNC_OP2(kad_ce_bin, 22)
  97. KAD_FUNC_OP2(kad_ce_bin_neg, 4)
  98. KAD_FUNC_OP2(kad_mse, 29)
  99. #define KAD_FUNC_OP1(fname, op) kad_node_t *fname(kad_node_t *x) { return kad_op1_core((op), x); }
  100. KAD_FUNC_OP1(kad_log, 27)
  101. KAD_FUNC_OP1(kad_exp, 33)
  102. KAD_FUNC_OP1(kad_sin, 34)
  103. KAD_FUNC_OP1(kad_square, 5)
  104. KAD_FUNC_OP1(kad_sigm, 6)
  105. KAD_FUNC_OP1(kad_tanh, 7)
  106. KAD_FUNC_OP1(kad_relu, 8)
  107. KAD_FUNC_OP1(kad_1minus, 11)
  108. KAD_FUNC_OP1(kad_softmax, 14)
  109. KAD_FUNC_OP1(kad_stdnorm, 32)
  110. kad_node_t *kad_ce_multi_weighted(kad_node_t *pred, kad_node_t *truth, kad_node_t *weight)
  111. {
  112. kad_node_t *s;
  113. s = kad_new_core(0, 13, 3);
  114. s->child[0] = pred, s->child[1] = truth, s->child[2] = weight;
  115. return kad_finalize_node(s);
  116. }
  117. /********** Convolution **********/
  118. /* compute output dimension and padding sizes on both sides */
  119. static inline int conv_find_par(int in_size, int kernel_size, int stride, int pad0, int *new_pad0, int *new_pad1)
  120. {
  121. int out_size, pad_both;
  122. /* key equation: out_size = (in_size - kernel_size + pad_both) / stride + 1 */
  123. if (pad0 == KAD_PAD_SAME && stride == 1) out_size = in_size;
  124. else out_size = (in_size - kernel_size + (pad0 > 0? pad0 : 0) + stride - 1) / stride + 1;
  125. pad_both = (out_size - 1) * stride + kernel_size - in_size;
  126. *new_pad0 = pad_both / 2;
  127. *new_pad1 = pad_both - *new_pad0;
  128. return out_size;
  129. }
  130. typedef struct {
  131. int kernel_size, stride, pad[2];
  132. } conv_conf_t;
  133. static inline conv_conf_t *conv2d_gen_aux(int in_row, int in_col, int kernel_r, int kernel_c, int stride_r, int stride_c, int top_pad, int left_pad)
  134. {
  135. conv_conf_t *cnn;
  136. cnn = (conv_conf_t*)calloc(2, sizeof(conv_conf_t));
  137. cnn[0].kernel_size = kernel_r, cnn[0].stride = stride_r;
  138. cnn[1].kernel_size = kernel_c, cnn[1].stride = stride_c;
  139. conv_find_par(in_row, kernel_r, stride_r, top_pad, &cnn[0].pad[0], &cnn[0].pad[1]);
  140. conv_find_par(in_col, kernel_c, stride_c, left_pad, &cnn[1].pad[0], &cnn[1].pad[1]);
  141. return cnn;
  142. }
  143. kad_node_t *kad_conv2d(kad_node_t *x, kad_node_t *w, int stride_r, int stride_c, int top_pad, int left_pad)
  144. {
  145. kad_node_t *s;
  146. if (x->n_d != 4 || w->n_d != 4) return 0;
  147. s = kad_new_core(0, 16, 2);
  148. s->child[0] = x, s->child[1] = w;
  149. s->ptr = conv2d_gen_aux(x->d[2], x->d[3], w->d[2], w->d[3], stride_r, stride_c, top_pad, left_pad);
  150. s->ptr_size = sizeof(conv_conf_t) * 2;
  151. return kad_finalize_node(s);
  152. }
  153. kad_node_t *kad_max2d(kad_node_t *x, int kernel_r, int kernel_c, int stride_r, int stride_c, int top_pad, int left_pad)
  154. {
  155. kad_node_t *s;
  156. if (x->n_d != 4) return 0;
  157. s = kad_new_core(0, 17, 1);
  158. s->child[0] = x;
  159. s->ptr = conv2d_gen_aux(x->d[2], x->d[3], kernel_r, kernel_c, stride_r, stride_c, top_pad, left_pad);
  160. s->ptr_size = sizeof(conv_conf_t) * 2;
  161. return kad_finalize_node(s);
  162. }
  163. static inline conv_conf_t *conv1d_gen_aux(int in_col, int kernel_c, int stride_c, int left_pad)
  164. {
  165. conv_conf_t *cnn;
  166. cnn = (conv_conf_t*)calloc(1, sizeof(conv_conf_t));
  167. cnn->kernel_size = kernel_c, cnn->stride = stride_c;
  168. conv_find_par(in_col, kernel_c, stride_c, left_pad, &cnn->pad[0], &cnn->pad[1]);
  169. return cnn;
  170. }
  171. kad_node_t *kad_conv1d(kad_node_t *x, kad_node_t *w, int stride, int left_pad)
  172. {
  173. kad_node_t *s;
  174. if (x->n_d != 3 || w->n_d != 3) return 0;
  175. s = kad_new_core(0, 18, 2);
  176. s->child[0] = x, s->child[1] = w;
  177. s->ptr = conv1d_gen_aux(x->d[2], w->d[2], stride, left_pad);
  178. s->ptr_size = sizeof(conv_conf_t);
  179. return kad_finalize_node(s);
  180. }
  181. kad_node_t *kad_max1d(kad_node_t *x, int kernel_size, int stride, int left_pad)
  182. {
  183. kad_node_t *s;
  184. if (x->n_d != 3) return 0;
  185. s = kad_new_core(0, 19, 1);
  186. s->child[0] = x;
  187. s->ptr = conv1d_gen_aux(x->d[2], kernel_size, stride, left_pad);
  188. s->ptr_size = sizeof(conv_conf_t);
  189. return kad_finalize_node(s);
  190. }
  191. kad_node_t *kad_avg1d(kad_node_t *x, int kernel_size, int stride, int left_pad)
  192. {
  193. kad_node_t *s;
  194. if (x->n_d != 3) return 0;
  195. s = kad_new_core(0, 28, 1);
  196. s->child[0] = x;
  197. s->ptr = conv1d_gen_aux(x->d[2], kernel_size, stride, left_pad);
  198. s->ptr_size = sizeof(conv_conf_t);
  199. return kad_finalize_node(s);
  200. }
  201. /********** Multi-node pooling **********/
  202. static kad_node_t *kad_pooling_general(int op, int n, kad_node_t **x)
  203. {
  204. int i;
  205. kad_node_t *s;
  206. s = kad_new_core(0, op, n);
  207. s->flag |= KAD_POOL;
  208. for (i = 0; i < n; ++i)
  209. s->child[i] = x[i];
  210. return kad_finalize_node(s);
  211. }
  212. kad_node_t *kad_avg(int n, kad_node_t **x) { return kad_pooling_general(10, n, x); }
  213. kad_node_t *kad_max(int n, kad_node_t **x) { return kad_pooling_general(21, n, x); }
  214. kad_node_t *kad_stack(int n, kad_node_t **x) { return kad_pooling_general(35, n, x); }
  215. kad_node_t *kad_select(int n, kad_node_t **x, int which)
  216. {
  217. kad_node_t *s;
  218. int32_t i, *aux;
  219. aux = (int32_t*)calloc(1, 4);
  220. *aux = which;
  221. s = kad_new_core(0, 12, n);
  222. for (i = 0; i < n; ++i) s->child[i] = x[i];
  223. s->flag |= KAD_POOL, s->ptr = aux, s->ptr_size = 4;
  224. return kad_finalize_node(s);
  225. }
  226. /********** Dimension reduction **********/
  227. static kad_node_t *kad_reduce_general(int op, kad_node_t *x, int axis)
  228. {
  229. kad_node_t *s;
  230. int32_t *aux;
  231. aux = (int32_t*)malloc(4);
  232. aux[0] = axis;
  233. s = kad_new_core(0, op, 1);
  234. s->child[0] = x;
  235. s->ptr = aux, s->ptr_size = 4;
  236. return kad_finalize_node(s);
  237. }
  238. kad_node_t *kad_reduce_sum(kad_node_t *x, int axis) { return kad_reduce_general(25, x, axis); }
  239. kad_node_t *kad_reduce_mean(kad_node_t *x, int axis) { return kad_reduce_general(26, x, axis); }
  240. /********** Sampling related **********/
  241. kad_node_t *kad_dropout(kad_node_t *x, kad_node_t *y)
  242. {
  243. kad_node_t *z;
  244. z = kad_op2_core(15, x, y);
  245. z->ptr = kad_rng(), z->ptr_size = sizeof(kad_rng_t);
  246. return z;
  247. }
  248. kad_node_t *kad_sample_normal(kad_node_t *x)
  249. {
  250. kad_node_t *z;
  251. z = kad_op1_core(24, x);
  252. z->ptr = kad_rng(), z->ptr_size = sizeof(kad_rng_t);
  253. return z;
  254. }
  255. /********** Miscellaneous **********/
  256. kad_node_t *kad_slice(kad_node_t *x, int axis, int start, int end)
  257. {
  258. kad_node_t *s;
  259. int32_t *aux;
  260. if (end < start || start < 0) return 0;
  261. aux = (int32_t*)malloc(3 * 4);
  262. aux[0] = axis, aux[1] = start, aux[2] = end;
  263. s = kad_new_core(0, 20, 1);
  264. s->child[0] = x;
  265. s->ptr = aux, s->ptr_size = 3 * 4;
  266. return kad_finalize_node(s);
  267. }
  268. kad_node_t *kad_concat_array(int axis, int n, kad_node_t **p)
  269. {
  270. kad_node_t *s;
  271. int32_t i, *aux;
  272. aux = (int32_t*)malloc(4);
  273. aux[0] = axis;
  274. s = kad_new_core(0, 31, n);
  275. for (i = 0; i < n; ++i)
  276. s->child[i] = p[i];
  277. s->ptr = aux, s->ptr_size = 4;
  278. return kad_finalize_node(s);
  279. }
  280. kad_node_t *kad_concat(int axis, int n, ...)
  281. {
  282. int i;
  283. kad_node_t **p, *s;
  284. va_list ap;
  285. p = (kad_node_t**)malloc(n * sizeof(kad_node_t*));
  286. va_start(ap, n);
  287. for (i = 0; i < n; ++i) p[i] = va_arg(ap, kad_node_p);
  288. va_end(ap);
  289. s = kad_concat_array(axis, n, p);
  290. free(p);
  291. return s;
  292. }
  293. kad_node_t *kad_reshape(kad_node_t *x, int n_d, int *d)
  294. {
  295. kad_node_t *s;
  296. int32_t i, *aux = 0;
  297. if (n_d > 0) {
  298. aux = (int32_t*)malloc(n_d * 4);
  299. for (i = 0; i < n_d; ++i) aux[i] = d? d[i] : -1;
  300. }
  301. s = kad_new_core(0, 30, 1);
  302. s->child[0] = x, s->ptr = aux, s->ptr_size = n_d * 4;
  303. return kad_finalize_node(s);
  304. }
  305. kad_node_t *kad_reverse(kad_node_t *x, int axis)
  306. {
  307. kad_node_t *s;
  308. int32_t *aux;
  309. aux = (int32_t*)malloc(4);
  310. *aux = axis;
  311. s = kad_new_core(0, 36, 1);
  312. s->child[0] = x, s->ptr = aux, s->ptr_size = 4;
  313. return kad_finalize_node(s);
  314. }
  315. kad_node_t *kad_switch(int n, kad_node_t **p)
  316. {
  317. kad_node_t *s;
  318. int32_t i, *aux;
  319. aux = (int32_t*)calloc(1, 4);
  320. s = kad_new_core(0, 12, n);
  321. for (i = 0; i < n; ++i)
  322. s->child[i] = p[i];
  323. s->ptr = aux, s->ptr_size = 4;
  324. return kad_finalize_node(s);
  325. }
  326. /***********************
  327. * Graph linearization *
  328. ***********************/
  329. static void kad_mark_back(int n, kad_node_t **v)
  330. {
  331. int i, j;
  332. for (i = 0; i < n; ++i) {
  333. if (v[i]->n_child == 0) continue;
  334. for (j = 0; j < v[i]->n_child; ++j)
  335. if (kad_is_back(v[i]->child[j]))
  336. break;
  337. if (j < v[i]->n_child) v[i]->flag |= KAD_VAR;
  338. else v[i]->flag &= ~KAD_VAR;
  339. }
  340. }
  341. static void kad_allocate_internal(int n, kad_node_t **v)
  342. {
  343. int i;
  344. kad_mark_back(n, v);
  345. for (i = 0; i < n; ++i) {
  346. kad_node_t *p = v[i];
  347. if (p->n_child == 0) continue;
  348. p->x = (float*)realloc(p->x, kad_len(p) * sizeof(float));
  349. if (kad_is_back(p)) {
  350. p->g = (float*)realloc(p->g, kad_len(p) * sizeof(float));
  351. kad_op_list[p->op](p, KAD_ALLOC);
  352. }
  353. }
  354. }
  355. int kad_sync_dim(int n, kad_node_t **v, int batch_size)
  356. {
  357. int i, req_alloc = 0, req_sync = 0, old_size = 0;
  358. for (i = 0; i < n; ++i) {
  359. if (kad_is_feed(v[i])) {
  360. old_size = v[i]->d[0]; /* TODO: check if all feeds have the same batch size */
  361. if (batch_size > 0 && v[i]->d[0] != batch_size)
  362. v[i]->d[0] = batch_size, req_sync = 1;
  363. } else if (v[i]->n_child > 0 && req_sync)
  364. kad_op_list[v[i]->op](v[i], KAD_SYNC_DIM);
  365. }
  366. if (old_size < batch_size) req_alloc = 1;
  367. for (i = 0; i < n; ++i)
  368. if (v[i]->n_child > 0 && v[i]->x == 0) req_alloc = 1;
  369. if (req_alloc) kad_allocate_internal(n, v);
  370. return batch_size > 0? batch_size : old_size;
  371. }
  372. #define kvec_t(type) struct { size_t n, m; type *a; }
  373. #define kv_pop(v) ((v).a[--(v).n])
  374. #define kv_push(type, v, x) do { \
  375. if ((v).n == (v).m) { \
  376. (v).m = (v).m? (v).m<<1 : 2; \
  377. (v).a = (type*)realloc((v).a, sizeof(type) * (v).m); \
  378. } \
  379. (v).a[(v).n++] = (x); \
  380. } while (0)
  381. /* IMPORTANT: kad_node_t::tmp MUST BE set to zero before calling this function */
  382. kad_node_t **kad_compile_array(int *n_node, int n_roots, kad_node_t **roots)
  383. {
  384. int i;
  385. kvec_t(kad_node_p) stack = {0,0,0}, a = {0,0,0};
  386. /* generate kad_node_t::tmp, the count of the parent nodes; shifted by 1; lowest bit to detect fake roots */
  387. for (i = 0; i < n_roots; ++i) {
  388. roots[i]->tmp = 1; /* mark the root */
  389. kv_push(kad_node_p, stack, roots[i]);
  390. }
  391. while (stack.n) {
  392. kad_node_t *p = kv_pop(stack);
  393. for (i = 0; i < p->n_child; ++i) {
  394. kad_node_t *q = p->child[i];
  395. if (q->tmp == 0) kv_push(kad_node_p, stack, q);
  396. q->tmp += 1<<1;
  397. }
  398. }
  399. /* topological sorting (Kahn's algorithm) */
  400. for (i = 0; i < n_roots; ++i)
  401. if (roots[i]->tmp>>1 == 0) /* if roots[i]->tmp>>1 != 0, it is not a real root */
  402. kv_push(kad_node_p, stack, roots[i]);
  403. while (stack.n) {
  404. kad_node_t *p = kv_pop(stack);
  405. kv_push(kad_node_p, a, p);
  406. for (i = 0; i < p->n_child; ++i) {
  407. p->child[i]->tmp -= 1<<1;
  408. if (p->child[i]->tmp>>1 == 0)
  409. kv_push(kad_node_p, stack, p->child[i]);
  410. }
  411. }
  412. free(stack.a);
  413. for (i = 0; i < (int)a.n; ++i) { /* check cycles; no cycles if constructed with kad_add() etc */
  414. assert(a.a[i]->tmp>>1 == 0);
  415. a.a[i]->tmp = 0;
  416. }
  417. /* reverse */
  418. for (i = 0; i < (int)a.n>>1; ++i) { /* reverse a.a[] */
  419. kad_node_p t;
  420. t = a.a[i], a.a[i] = a.a[a.n-1-i], a.a[a.n-1-i] = t;
  421. }
  422. kad_allocate_internal(a.n, a.a);
  423. *n_node = a.n;
  424. return a.a;
  425. }
  426. kad_node_t **kad_compile(int *n_node, int n_roots, ...)
  427. {
  428. int i;
  429. kad_node_t **roots, **ret;
  430. va_list ap;
  431. roots = (kad_node_t**)malloc(n_roots * sizeof(kad_node_t*));
  432. va_start(ap, n_roots);
  433. for (i = 0; i < n_roots; ++i) roots[i] = va_arg(ap, kad_node_p);
  434. va_end(ap);
  435. ret = kad_compile_array(n_node, n_roots, roots);
  436. free(roots);
  437. return ret;
  438. }
  439. /************************************
  440. * Miscellaneous on compiled graphs *
  441. ************************************/
  442. void kad_delete(int n, kad_node_t **a)
  443. {
  444. int i;
  445. for (i = 0; i < n; ++i) {
  446. kad_node_t *p = a[i];
  447. if (p->n_child) {
  448. free(p->x); free(p->g);
  449. }
  450. free(p->child); free(p->ptr); free(p->gtmp); free(p);
  451. }
  452. free(a);
  453. }
  454. int kad_size_var(int n, kad_node_t *const* v)
  455. {
  456. int c, i;
  457. for (i = c = 0; i < n; ++i)
  458. if (kad_is_var(v[i]))
  459. c += kad_len(v[i]);
  460. return c;
  461. }
  462. int kad_size_const(int n, kad_node_t *const* v)
  463. {
  464. int c, i;
  465. for (i = c = 0; i < n; ++i)
  466. if (kad_is_const(v[i]))
  467. c += kad_len(v[i]);
  468. return c;
  469. }
  470. /**********************************
  471. * Computate values and gradients *
  472. **********************************/
  473. static void kad_propagate_marks(int n, kad_node_t **a)
  474. {
  475. int i, j;
  476. for (i = n - 1; i >= 0; --i) {
  477. kad_node_t *p = a[i];
  478. if (p->tmp > 0) {
  479. if (kad_is_switch(p)) {
  480. int32_t *aux = (int32_t*)p->ptr;
  481. if (p->child[*aux]->tmp == 0)
  482. p->child[*aux]->tmp = 1;
  483. } else {
  484. for (j = 0; j < p->n_child; ++j)
  485. if (p->child[j]->tmp == 0)
  486. p->child[j]->tmp = 1;
  487. }
  488. }
  489. }
  490. }
  491. void kad_eval_marked(int n, kad_node_t **a)
  492. {
  493. int i;
  494. kad_propagate_marks(n, a);
  495. for (i = 0; i < n; ++i)
  496. if (a[i]->n_child && a[i]->tmp > 0)
  497. kad_op_list[a[i]->op](a[i], KAD_FORWARD);
  498. for (i = 0; i < n; ++i) a[i]->tmp = 0;
  499. }
  500. const float *kad_eval_at(int n, kad_node_t **a, int from)
  501. {
  502. int i;
  503. if (from < 0 || from >= n) from = n - 1;
  504. for (i = 0; i < n; ++i) a[i]->tmp = (i == from);
  505. kad_eval_marked(n, a);
  506. return a[from]->x;
  507. }
  508. void kad_grad(int n, kad_node_t **a, int from)
  509. {
  510. int i;
  511. if (from < 0 || from >= n) from = n - 1;
  512. assert(a[from]->n_d == 0);
  513. for (i = 0; i < n; ++i) a[i]->tmp = (i == from);
  514. kad_propagate_marks(n, a);
  515. for (i = 0; i <= from; ++i) /* set all grandients to zero */
  516. if (a[i]->g && a[i]->tmp > 0)
  517. memset(a[i]->g, 0, kad_len(a[i]) * sizeof(float));
  518. for (i = from, a[i]->g[0] = 1.0f; i >= 0; --i) /* backprop */
  519. if (a[i]->n_child && a[i]->tmp > 0)
  520. kad_op_list[a[i]->op](a[i], KAD_BACKWARD);
  521. for (i = 0; i <= from; ++i) a[i]->tmp = 0;
  522. }
  523. /***********************
  524. * Load and save graph *
  525. ***********************/
  526. static void kad_save1(FILE *fp, const kad_node_t *p)
  527. {
  528. fwrite(&p->ext_label, 4, 1, fp);
  529. fwrite(&p->ext_flag, 4, 1, fp);
  530. fwrite(&p->flag, 1, 1, fp);
  531. fwrite(&p->n_child, 4, 1, fp);
  532. if (p->n_child) {
  533. int32_t j, pre = p->pre? p->pre->tmp : -1;
  534. fwrite(&p->op, 2, 1, fp);
  535. for (j = 0; j < p->n_child; ++j)
  536. fwrite(&p->child[j]->tmp, 4, 1, fp);
  537. fwrite(&pre, 4, 1, fp);
  538. fwrite(&p->ptr_size, 4, 1, fp);
  539. if (p->ptr_size > 0 && p->ptr)
  540. fwrite(p->ptr, p->ptr_size, 1, fp);
  541. } else {
  542. fwrite(&p->n_d, 1, 1, fp);
  543. if (p->n_d) fwrite(p->d, 4, p->n_d, fp);
  544. }
  545. }
  546. static kad_node_t *kad_load1(FILE *fp, kad_node_t **node)
  547. {
  548. kad_node_t *p;
  549. p = (kad_node_t*)calloc(1, sizeof(kad_node_t));
  550. (void) !fread(&p->ext_label, 4, 1, fp);
  551. (void) !fread(&p->ext_flag, 4, 1, fp);
  552. (void) !fread(&p->flag, 1, 1, fp);
  553. (void) !fread(&p->n_child, 4, 1, fp);
  554. if (p->n_child) {
  555. int32_t j, k;
  556. p->child = (kad_node_t**)calloc(p->n_child, sizeof(kad_node_t*));
  557. (void) !fread(&p->op, 2, 1, fp);
  558. for (j = 0; j < p->n_child; ++j) {
  559. (void) !fread(&k, 4, 1, fp);
  560. p->child[j] = node? node[k] : 0;
  561. }
  562. (void) !fread(&k, 4, 1, fp);
  563. if (k >= 0) p->pre = node[k];
  564. (void) !fread(&p->ptr_size, 4, 1, fp);
  565. if (p->ptr_size > 0) {
  566. p->ptr = malloc(p->ptr_size);
  567. (void) !fread(p->ptr, p->ptr_size, 1, fp);
  568. }
  569. } else {
  570. (void) !fread(&p->n_d, 1, 1, fp);
  571. if (p->n_d) (void) !fread(p->d, 4, p->n_d, fp);
  572. }
  573. return p;
  574. }
  575. int kad_save(FILE *fp, int n_node, kad_node_t **node)
  576. {
  577. int32_t i, k = n_node;
  578. fwrite(&k, 4, 1, fp);
  579. for (i = 0; i < n_node; ++i) node[i]->tmp = i;
  580. for (i = 0; i < n_node; ++i) kad_save1(fp, node[i]);
  581. for (i = 0; i < n_node; ++i) node[i]->tmp = 0;
  582. return 0;
  583. }
  584. kad_node_t **kad_load(FILE *fp, int *_n_node)
  585. {
  586. int32_t i, n_node;
  587. kad_node_t **node;
  588. (void) !fread(&n_node, 4, 1, fp);
  589. node = (kad_node_t**)malloc(n_node * sizeof(kad_node_t*));
  590. for (i = 0; i < n_node; ++i) {
  591. kad_node_t *p;
  592. p = node[i] = kad_load1(fp, node);
  593. if (p->n_child) {
  594. kad_op_list[p->op](p, KAD_ALLOC);
  595. kad_op_list[p->op](p, KAD_SYNC_DIM);
  596. }
  597. }
  598. *_n_node = n_node;
  599. kad_mark_back(n_node, node);
  600. return node;
  601. }
  602. /***************
  603. * Graph clone *
  604. ***************/
  605. static inline kad_node_t *kad_dup1(const kad_node_t *p)
  606. {
  607. kad_node_t *q;
  608. q = (kad_node_t*)malloc(sizeof(kad_node_t));
  609. memcpy(q, p, sizeof(kad_node_t));
  610. q->pre = 0, q->tmp = 0, q->gtmp = 0;
  611. if (p->ptr && p->ptr_size > 0) {
  612. if (kad_use_rng(p) && !(p->flag & KAD_SHARE_RNG) && p->ptr_size == sizeof(kad_rng_t)) {
  613. q->ptr = kad_rng(); /* each time step uses a different RNG */
  614. } else {
  615. q->ptr = malloc(p->ptr_size);
  616. memcpy(q->ptr, p->ptr, p->ptr_size);
  617. }
  618. }
  619. if (q->n_child) {
  620. q->x = q->g = 0;
  621. q->child = (kad_node_t**)calloc(q->n_child, sizeof(kad_node_t*));
  622. }
  623. return q;
  624. }
  625. kad_node_t **kad_clone(int n, kad_node_t **v, int batch_size)
  626. {
  627. int i, j;
  628. kad_node_t **u;
  629. u = (kad_node_t**)calloc(n, sizeof(kad_node_t*));
  630. for (i = 0; i < n; ++i) v[i]->tmp = i;
  631. for (i = 0; i < n; ++i) {
  632. kad_node_t *p = v[i], *q;
  633. q = u[i] = kad_dup1(p);
  634. if (p->pre) q->pre = u[p->pre->tmp];
  635. if (p->n_child) {
  636. for (j = 0; j < p->n_child; ++j)
  637. q->child[j] = u[p->child[j]->tmp];
  638. } else if (!kad_is_feed(p)) {
  639. q->x = (float*)malloc(kad_len(p) * sizeof(float));
  640. memcpy(q->x, p->x, kad_len(p) * sizeof(float));
  641. q->g = 0;
  642. }
  643. }
  644. for (i = 0; i < n; ++i) v[i]->tmp = 0;
  645. kad_sync_dim(n, u, batch_size); /* this will allocate x[] and g[] at internal nodes */
  646. return u;
  647. }
  648. /**************
  649. * Unroll RNN *
  650. **************/
  651. typedef struct {
  652. int32_t n, m;
  653. kad_node_t **v;
  654. } nodes_t;
  655. static inline void push_nodes(nodes_t *w, kad_node_t *p)
  656. {
  657. if (w->n == w->m) {
  658. w->m = w->m? w->m<<1 : 16;
  659. w->v = (kad_node_t**)realloc(w->v, w->m * sizeof(kad_node_t*));
  660. }
  661. w->v[w->n++] = p;
  662. }
  663. static void kad_unroll_helper(int n_v, kad_node_t **v, int i_pivot, kad_node_t **t, int len, nodes_t *w)
  664. {
  665. int i, j, l;
  666. uint8_t *flag;
  667. kad_node_t **aux;
  668. assert(kad_is_pivot(v[i_pivot]) && t[i_pivot] == 0);
  669. t[i_pivot] = kad_dup1(v[i_pivot]);
  670. t[i_pivot]->n_child = len;
  671. t[i_pivot]->child = (kad_node_t**)realloc(t[i_pivot]->child, len * sizeof(kad_node_t*));
  672. flag = (uint8_t*)calloc(n_v, 1);
  673. for (i = i_pivot, flag[i] = 16; i >= 0; --i) {
  674. if (i < i_pivot && kad_is_pivot(v[i])) continue; /* don't trespass other pivots */
  675. if (flag[i]&16) /* flag 16: nodes to unroll */
  676. for (j = 0; j < v[i]->n_child; ++j)
  677. flag[v[i]->child[j]->tmp] = 16;
  678. }
  679. for (i = 0; i < i_pivot; ++i) {
  680. if (!(flag[i]&16)) continue;
  681. if (kad_is_var(v[i]) || kad_is_const(v[i]) || kad_is_pivot(v[i])) flag[i] |= 1; /* external nodes that should not be duplicated */
  682. if (v[i]->pre) flag[v[i]->pre->tmp] |= 2;
  683. }
  684. flag[v[i_pivot]->child[0]->tmp] |= 4;
  685. aux = (kad_node_t**)calloc(n_v, sizeof(kad_node_t*));
  686. for (l = 0; l < len; ++l) {
  687. for (i = 0; i < i_pivot; ++i) {
  688. if (!(flag[i]&16) || ((flag[i]&3) && t[i])) continue;
  689. t[i] = kad_dup1(v[i]);
  690. if (v[i]->n_child)
  691. for (j = 0; j < v[i]->n_child; ++j)
  692. t[i]->child[j] = t[v[i]->child[j]->tmp];
  693. if (flag[i]&4) t[i_pivot]->child[l] = t[i];
  694. if (l == 0 && (flag[i]&2)) aux[i] = t[i];
  695. if (v[i]->pre) {
  696. t[v[i]->pre->tmp] = t[i];
  697. if (l == len - 1) t[i]->pre = aux[v[i]->pre->tmp]; /* this forms a cycle! */
  698. }
  699. push_nodes(w, t[i]);
  700. }
  701. }
  702. push_nodes(w, t[i_pivot]);
  703. free(aux); free(flag);
  704. }
  705. int kad_n_pivots(int n_v, kad_node_t **v)
  706. {
  707. int i, n_pivots = 0;
  708. for (i = 0; i < n_v; ++i)
  709. if (kad_is_pivot(v[i])) ++n_pivots;
  710. return n_pivots;
  711. }
  712. kad_node_t **kad_unroll(int n_v, kad_node_t **v, int *new_n, int *len)
  713. {
  714. int i, j, n_pivots = 0;
  715. kad_node_t **t;
  716. nodes_t w = {0,0,0};
  717. t = (kad_node_t**)calloc(n_v, sizeof(kad_node_t*));
  718. n_pivots = kad_n_pivots(n_v, v);
  719. for (i = 0; i < n_v; ++i) v[i]->tmp = i;
  720. if (n_pivots) {
  721. int k, *i_pivots;
  722. i_pivots = (int*)calloc(n_pivots, sizeof(int));
  723. for (i = k = 0; i < n_v; ++i) /* collect pivots */
  724. if (kad_is_pivot(v[i])) i_pivots[k++] = i;
  725. for (i = 0; i < n_pivots; ++i) /* unroll each pivot, from the lowest to the highest */
  726. kad_unroll_helper(n_v, v, i_pivots[i], t, len[i], &w);
  727. free(i_pivots);
  728. }
  729. for (i = 0; i < n_v; ++i) { /* copy over the rest of nodes */
  730. if (t[i]) continue;
  731. t[i] = kad_dup1(v[i]);
  732. if (v[i]->n_child)
  733. for (j = 0; j < v[i]->n_child; ++j)
  734. t[i]->child[j] = t[v[i]->child[j]->tmp];
  735. push_nodes(&w, t[i]);
  736. }
  737. free(t);
  738. for (i = 0; i < n_v; ++i) v[i]->tmp = 0;
  739. for (i = 0; i < w.n; ++i) /* stack may change the output dimension */
  740. if (w.v[i]->n_child > 0)
  741. kad_op_list[w.v[i]->op](w.v[i], KAD_SYNC_DIM);
  742. kad_allocate_internal(w.n, w.v);
  743. *new_n = w.n;
  744. return w.v;
  745. }
  746. /********************************
  747. * Vector and matrix operations *
  748. ********************************/
  749. #ifdef __SSE__
  750. #include <xmmintrin.h>
  751. static inline float kad_sdot(int n, const float *x, const float *y) /* BLAS sdot using SSE */
  752. {
  753. int i, n8 = n>>3<<3;
  754. __m128 vs1, vs2;
  755. float s, t[4];
  756. vs1 = _mm_setzero_ps();
  757. vs2 = _mm_setzero_ps();
  758. for (i = 0; i < n8; i += 8) {
  759. __m128 vx1, vx2, vy1, vy2;
  760. vx1 = _mm_loadu_ps(&x[i]);
  761. vx2 = _mm_loadu_ps(&x[i+4]);
  762. vy1 = _mm_loadu_ps(&y[i]);
  763. vy2 = _mm_loadu_ps(&y[i+4]);
  764. vs1 = _mm_add_ps(vs1, _mm_mul_ps(vx1, vy1));
  765. vs2 = _mm_add_ps(vs2, _mm_mul_ps(vx2, vy2));
  766. }
  767. for (s = 0.; i < n; ++i) s += x[i] * y[i];
  768. _mm_storeu_ps(t, vs1);
  769. s += t[0] + t[1] + t[2] + t[3];
  770. _mm_storeu_ps(t, vs2);
  771. s += t[0] + t[1] + t[2] + t[3];
  772. return s;
  773. }
  774. static inline void kad_saxpy_inlined(int n, float a, const float *x, float *y) /* BLAS saxpy using SSE */
  775. {
  776. int i, n8 = n>>3<<3;
  777. __m128 va;
  778. va = _mm_set1_ps(a);
  779. for (i = 0; i < n8; i += 8) {
  780. __m128 vx1, vx2, vy1, vy2, vt1, vt2;
  781. vx1 = _mm_loadu_ps(&x[i]);
  782. vx2 = _mm_loadu_ps(&x[i+4]);
  783. vy1 = _mm_loadu_ps(&y[i]);
  784. vy2 = _mm_loadu_ps(&y[i+4]);
  785. vt1 = _mm_add_ps(_mm_mul_ps(va, vx1), vy1);
  786. vt2 = _mm_add_ps(_mm_mul_ps(va, vx2), vy2);
  787. _mm_storeu_ps(&y[i], vt1);
  788. _mm_storeu_ps(&y[i+4], vt2);
  789. }
  790. for (; i < n; ++i) y[i] += a * x[i];
  791. }
  792. #else
  793. static inline float kad_sdot(int n, const float *x, const float *y) /* BLAS sdot */
  794. {
  795. int i;
  796. float s = 0.;
  797. for (i = 0; i < n; ++i) s += x[i] * y[i];
  798. return s;
  799. }
  800. static inline void kad_saxpy_inlined(int n, float a, const float *x, float *y) // BLAS saxpy
  801. {
  802. int i;
  803. for (i = 0; i < n; ++i) y[i] += a * x[i];
  804. }
  805. #endif
  806. void kad_vec_mul_sum(int n, float *a, const float *b, const float *c)
  807. {
  808. int i;
  809. for (i = 0; i < n; ++i) a[i] += b[i] * c[i];
  810. }
  811. void kad_saxpy(int n, float a, const float *x, float *y) { kad_saxpy_inlined(n, a, x, y); }
  812. #ifdef HAVE_CBLAS
  813. #ifndef __APPLE__
  814. /* As gfortran mangles names */
  815. #define ssyev ssyev_
  816. #endif
  817. extern void ssyev(const char* jobz, const char* uplo, int* n, float* a, int* lda, float* w, float* work, int* lwork, int* info);
  818. #ifdef HAVE_CBLAS_H
  819. #include "cblas.h"
  820. #else
  821. /* Poor man approach */
  822. enum CBLAS_ORDER {CblasRowMajor=101, CblasColMajor=102 };
  823. enum CBLAS_TRANSPOSE {CblasNoTrans=111, CblasTrans=112 };
  824. extern void cblas_sgemm(const enum CBLAS_ORDER Order,
  825. const enum CBLAS_TRANSPOSE TA,
  826. const enum CBLAS_TRANSPOSE TB,
  827. const int M, const int N, const int K,
  828. const float alpha, const float *A, const int lda,
  829. const float *B, const int ldb, const float beta,
  830. float *C, const int ldc);
  831. #endif
  832. void kad_sgemm_simple(int trans_A, int trans_B, int M, int N, int K, const float *A, const float *B, float *C)
  833. {
  834. cblas_sgemm(CblasRowMajor, trans_A? CblasTrans : CblasNoTrans, trans_B? CblasTrans : CblasNoTrans, M, N, K, 1.0f, A, trans_A? M : K, B, trans_B? K : N, 1.0f, C, N);
  835. }
  836. #else
  837. void kad_sgemm_simple(int trans_A, int trans_B, int M, int N, int K, const float *A, const float *B, float *C) /* simplified BLAS sgemm */
  838. {
  839. static const int x = 16;
  840. int i, j, k;
  841. if (!trans_A && trans_B) {
  842. for (i = 0; i < M; i += x)
  843. for (j = 0; j < N; j += x) {
  844. int ii, ie = M < i + x? M : i + x;
  845. int jj, je = N < j + x? N : j + x;
  846. for (ii = i; ii < ie; ++ii) { /* loop tiling */
  847. const float *aii = A + ii * K, *bjj;
  848. float *cii = C + ii * N;
  849. for (jj = j, bjj = B + j * K; jj < je; ++jj, bjj += K)
  850. cii[jj] += kad_sdot(K, aii, bjj);
  851. }
  852. }
  853. } else if (!trans_A && !trans_B) {
  854. for (i = 0; i < M; ++i)
  855. for (k = 0; k < K; ++k)
  856. kad_saxpy_inlined(N, A[i*K+k], &B[k*N], &C[i*N]);
  857. } else if (trans_A && !trans_B) {
  858. for (k = 0; k < K; ++k)
  859. for (i = 0; i < M; ++i)
  860. kad_saxpy_inlined(N, A[k*M+i], &B[k*N], &C[i*N]);
  861. } else abort(); /* not implemented for (trans_A && trans_B) */
  862. }
  863. #endif
  864. bool kad_ssyev_simple(int N, float *A, float *eigenvals)
  865. {
  866. #ifndef HAVE_CBLAS
  867. return false;
  868. #else
  869. int n = N, lda = N, info, lwork;
  870. float wkopt;
  871. float *work;
  872. /* Query and allocate the optimal workspace */
  873. lwork = -1;
  874. ssyev ("Vectors", "Upper", &n, A, &lda, eigenvals, &wkopt, &lwork, &info);
  875. lwork = wkopt;
  876. work = (float*) g_malloc(lwork * sizeof(double));
  877. ssyev ("Vectors", "Upper", &n, A, &lda, eigenvals, work, &lwork, &info);
  878. /* Check for convergence */
  879. if (info > 0) {
  880. g_free (work);
  881. return false;
  882. }
  883. g_free (work);
  884. return true;
  885. #endif
  886. }
  887. /***************************
  888. * Random number generator *
  889. ***************************/
  890. static kad_rng_t kad_rng_dat = { {0x50f5647d2380309dULL, 0x91ffa96fc4c62cceULL}, 0.0, 0, 0 };
  891. static inline uint64_t kad_splitmix64(uint64_t x)
  892. {
  893. uint64_t z = (x += 0x9E3779B97F4A7C15ULL);
  894. z = (z ^ (z >> 30)) * 0xBF58476D1CE4E5B9ULL;
  895. z = (z ^ (z >> 27)) * 0x94D049BB133111EBULL;
  896. return z ^ (z >> 31);
  897. }
  898. static inline uint64_t kad_xoroshiro128plus_next(kad_rng_t *r)
  899. {
  900. const uint64_t s0 = r->s[0];
  901. uint64_t s1 = r->s[1];
  902. const uint64_t result = s0 + s1;
  903. s1 ^= s0;
  904. r->s[0] = (s0 << 55 | s0 >> 9) ^ s1 ^ (s1 << 14);
  905. r->s[1] = s0 << 36 | s0 >> 28;
  906. return result;
  907. }
  908. static inline void kad_xoroshiro128plus_jump(kad_rng_t *r)
  909. {
  910. static const uint64_t JUMP[] = { 0xbeac0467eba5facbULL, 0xd86b048b86aa9922ULL };
  911. uint64_t s0 = 0, s1 = 0;
  912. int i, b;
  913. for (i = 0; i < 2; ++i)
  914. for (b = 0; b < 64; b++) {
  915. if (JUMP[i] & 1ULL << b)
  916. s0 ^= r->s[0], s1 ^= r->s[1];
  917. kad_xoroshiro128plus_next(r);
  918. }
  919. r->s[0] = s0, r->s[1] = s1;
  920. }
  921. void kad_srand(void *d, uint64_t seed)
  922. {
  923. kad_rng_t *r = d? (kad_rng_t*)d : &kad_rng_dat;
  924. r->n_gset = 0.0, r->n_iset = 0;
  925. r->s[0] = kad_splitmix64(seed);
  926. r->s[1] = kad_splitmix64(r->s[0]);
  927. }
  928. void *kad_rng(void)
  929. {
  930. kad_rng_t *r;
  931. r = (kad_rng_t*)calloc(1, sizeof(kad_rng_t));
  932. kad_xoroshiro128plus_jump(&kad_rng_dat);
  933. r->s[0] = kad_rng_dat.s[0], r->s[1] = kad_rng_dat.s[1];
  934. return r;
  935. }
  936. uint64_t kad_rand(void *d) { return kad_xoroshiro128plus_next(d? (kad_rng_t*)d : &kad_rng_dat); }
  937. double kad_drand(void *d)
  938. {
  939. union { uint64_t i; double d; } u;
  940. u.i = 0x3FFULL << 52 | kad_xoroshiro128plus_next(d? (kad_rng_t*)d : &kad_rng_dat) >> 12;
  941. return u.d - 1.0;
  942. }
  943. double kad_drand_normal(void *d)
  944. {
  945. kad_rng_t *r = d? (kad_rng_t*)d : &kad_rng_dat;
  946. if (r->n_iset == 0) {
  947. double fac, rsq, v1, v2;
  948. do {
  949. v1 = 2.0 * kad_drand(d) - 1.0;
  950. v2 = 2.0 * kad_drand(d) - 1.0;
  951. rsq = v1 * v1 + v2 * v2;
  952. } while (rsq >= 1.0 || rsq == 0.0);
  953. fac = sqrt(-2.0 * log(rsq) / rsq);
  954. r->n_gset = v1 * fac;
  955. r->n_iset = 1;
  956. return v2 * fac;
  957. } else {
  958. r->n_iset = 0;
  959. return r->n_gset;
  960. }
  961. }
  962. /*************
  963. * Operators *
  964. *************/
  965. static inline void kad_copy_dim1(kad_node_t *dst, const kad_node_t *src) /* set the dimension/shape of dst to src */
  966. {
  967. dst->n_d = src->n_d;
  968. if (src->n_d) memcpy(dst->d, src->d, src->n_d * sizeof(int));
  969. }
  970. /********** Arithmetic operations **********/
  971. int kad_op_add(kad_node_t *p, int action)
  972. {
  973. int i, n0, n1;
  974. kad_node_t *q[2];
  975. q[0] = p->child[0], n0 = kad_len(q[0]);
  976. q[1] = p->child[1], n1 = kad_len(q[1]);
  977. if (action == KAD_SYNC_DIM) {
  978. if (n0 % n1 != 0) return -1;
  979. kad_copy_dim1(p, q[0]);
  980. } else if (action == KAD_FORWARD) {
  981. assert(n0 >= n1);
  982. memcpy(p->x, q[0]->x, n0 * sizeof(float));
  983. for (i = 0; i < n0; i += n1)
  984. kad_saxpy(n1, 1.0f, q[1]->x, p->x + i);
  985. } else if (action == KAD_BACKWARD) {
  986. if (kad_is_back(q[0])) kad_saxpy(n0, 1.0f, p->g, q[0]->g);
  987. if (kad_is_back(q[1]))
  988. for (i = 0; i < n0; i += n1)
  989. kad_saxpy(n1, 1.0f, p->g + i, q[1]->g);
  990. }
  991. return 0;
  992. }
  993. int kad_op_sub(kad_node_t *p, int action)
  994. {
  995. int i, n0, n1;
  996. kad_node_t *q[2];
  997. q[0] = p->child[0], n0 = kad_len(q[0]);
  998. q[1] = p->child[1], n1 = kad_len(q[1]);
  999. if (action == KAD_SYNC_DIM) {
  1000. if (n0 % n1 != 0) return -1;
  1001. kad_copy_dim1(p, q[0]);
  1002. } else if (action == KAD_FORWARD) {
  1003. assert(n0 >= n1);
  1004. memcpy(p->x, q[0]->x, n0 * sizeof(float));
  1005. for (i = 0; i < n0; i += n1)
  1006. kad_saxpy(n1, -1.0f, q[1]->x, p->x + i);
  1007. } else if (action == KAD_BACKWARD) {
  1008. if (kad_is_back(q[0])) kad_saxpy(n0, 1.0f, p->g, q[0]->g);
  1009. if (kad_is_back(q[1]))
  1010. for (i = 0; i < n0; i += n1)
  1011. kad_saxpy(n1, -1.0f, p->g + i, q[1]->g);
  1012. }
  1013. return 0;
  1014. }
  1015. int kad_op_mul(kad_node_t *p, int action)
  1016. {
  1017. int i, n0, n1;
  1018. kad_node_t *q[2];
  1019. q[0] = p->child[0], n0 = kad_len(q[0]);
  1020. q[1] = p->child[1], n1 = kad_len(q[1]);
  1021. if (action == KAD_SYNC_DIM) {
  1022. if (n0 % n1 != 0) return -1;
  1023. kad_copy_dim1(p, q[0]);
  1024. } else if (action == KAD_FORWARD) {
  1025. assert(n0 >= n1);
  1026. memset(p->x, 0, n0 * sizeof(float));
  1027. if (q[0]->x != 0 && q[1]->x != 0)
  1028. for (i = 0; i < n0; i += n1) /* TODO: optimize when n1==1 */
  1029. kad_vec_mul_sum(n1, p->x + i, q[0]->x + i, q[1]->x);
  1030. } else if (action == KAD_BACKWARD) {
  1031. if (kad_is_back(q[0]) && q[1]->x)
  1032. for (i = 0; i < n0; i += n1)
  1033. kad_vec_mul_sum(n1, q[0]->g + i, p->g + i, q[1]->x);
  1034. if (kad_is_back(q[1]) && q[0]->x)
  1035. for (i = 0; i < n0; i += n1)
  1036. kad_vec_mul_sum(n1, q[1]->g, p->g + i, q[0]->x + i);
  1037. }
  1038. return 0;
  1039. }
  1040. int kad_op_cmul(kad_node_t *p, int action)
  1041. {
  1042. int i, n_a_row, n_b_row, n_col, n_a_col = 1, n_b_col = 1;
  1043. kad_node_t *q[2];
  1044. q[0] = p->child[0], q[1] = p->child[1];
  1045. n_col = q[0]->d[q[0]->n_d - 1] > q[1]->d[q[1]->n_d - 1]? q[0]->d[q[0]->n_d - 1] : q[1]->d[q[1]->n_d - 1];
  1046. for (i = q[0]->n_d - 1; i >= 0; --i) if (n_a_col < n_col) n_a_col *= q[0]->d[i];
  1047. for (i = q[1]->n_d - 1; i >= 0; --i) if (n_b_col < n_col) n_b_col *= q[1]->d[i];
  1048. n_a_row = kad_len(q[0]) / n_a_col, n_b_row = kad_len(q[1]) / n_b_col;
  1049. if (action == KAD_SYNC_DIM) {
  1050. if (n_a_col != n_b_col) return -1;
  1051. p->n_d = 2, p->d[0] = n_a_row, p->d[1] = n_b_row;
  1052. } else if (action == KAD_FORWARD) {
  1053. memset(p->x, 0, n_a_row * n_b_row * sizeof(float));
  1054. if (q[0]->x && q[1]->x)
  1055. kad_sgemm_simple(0, 1, n_a_row, n_b_row, n_col, q[0]->x, q[1]->x, p->x); /* Y = X * trans(W) */
  1056. } else if (action == KAD_BACKWARD) {
  1057. if (kad_is_back(q[0]) && q[1]->x)
  1058. kad_sgemm_simple(0, 0, n_a_row, n_col, n_b_row, p->g, q[1]->x, q[0]->g); /* G_x <- G_y * W */
  1059. if (kad_is_back(q[1]) && q[0]->x)
  1060. kad_sgemm_simple(1, 0, n_b_row, n_col, n_a_row, p->g, q[0]->x, q[1]->g); /* G_w <- trans(G_y) * X */
  1061. }
  1062. return 0;
  1063. }
  1064. int kad_op_matmul(kad_node_t *p, int action) /* TODO: matmul and cmul have different broadcasting rules */
  1065. {
  1066. int n_a_row, n_b_row, n_a_col, n_b_col;
  1067. kad_node_t *q[2];
  1068. q[0] = p->child[0];
  1069. q[1] = p->child[1];
  1070. n_a_row = q[0]->n_d == 1? 1 : q[0]->d[0];
  1071. n_b_row = q[1]->n_d == 1? 1 : q[1]->d[0];
  1072. n_a_col = kad_len(q[0]) / n_a_row;
  1073. n_b_col = kad_len(q[1]) / n_b_row;
  1074. if (action == KAD_SYNC_DIM) {
  1075. if (n_a_col != n_b_row) return -1;
  1076. p->n_d = 2, p->d[0] = n_a_row, p->d[1] = n_b_col;
  1077. } else if (action == KAD_FORWARD) {
  1078. memset(p->x, 0, n_a_row * n_b_col * sizeof(float));
  1079. if (q[0]->x && q[1]->x)
  1080. kad_sgemm_simple(0, 0, n_a_row, n_b_col, n_a_col, q[0]->x, q[1]->x, p->x); /* Y = X * W */
  1081. } else if (action == KAD_BACKWARD) {
  1082. if (kad_is_back(q[0]) && q[1]->x)
  1083. kad_sgemm_simple(0, 1, n_a_row, n_a_col, n_b_col, p->g, q[1]->x, q[0]->g); /* G_x <- G_y * trans(W) */
  1084. if (kad_is_back(q[1]) && q[0]->x)
  1085. kad_sgemm_simple(1, 0, n_b_row, n_b_col, n_a_row, q[0]->x, p->g, q[1]->g); /* G_y <- trans(A) * G_y */
  1086. }
  1087. return 0;
  1088. }
  1089. int kad_op_square(kad_node_t *p, int action)
  1090. {
  1091. int i, n;
  1092. kad_node_t *q = p->child[0];
  1093. n = kad_len(q);
  1094. if (action == KAD_SYNC_DIM) {
  1095. kad_copy_dim1(p, q);
  1096. } else if (action == KAD_FORWARD) {
  1097. for (i = 0; i < n; ++i)
  1098. p->x[i] = q->x[i] * q->x[i];
  1099. } else if (action == KAD_BACKWARD && kad_is_back(q)) {
  1100. for (i = 0; i < n; ++i)
  1101. q->g[i] += p->g[i] * (q->x[i] + q->x[i]);
  1102. }
  1103. return 0;
  1104. }
  1105. int kad_op_1minus(kad_node_t *p, int action)
  1106. {
  1107. int i, n;
  1108. kad_node_t *q = p->child[0];
  1109. n = kad_len(q);
  1110. if (action == KAD_SYNC_DIM) {
  1111. kad_copy_dim1(p, q);
  1112. } else if (action == KAD_FORWARD) {
  1113. for (i = 0; i < n; ++i) p->x[i] = 1.0f - q->x[i];
  1114. } else if (action == KAD_BACKWARD && kad_is_back(q)) {
  1115. kad_saxpy(n, -1.0f, p->g, q->g);
  1116. }
  1117. return 0;
  1118. }
  1119. int kad_op_exp(kad_node_t *p, int action)
  1120. {
  1121. int i, n;
  1122. kad_node_t *q = p->child[0];
  1123. n = kad_len(q);
  1124. if (action == KAD_SYNC_DIM) {
  1125. kad_copy_dim1(p, q);
  1126. } else if (action == KAD_FORWARD) {
  1127. for (i = 0; i < n; ++i) p->x[i] = expf(q->x[i]);
  1128. } else if (action == KAD_BACKWARD && kad_is_back(q)) {
  1129. for (i = 0; i < n; ++i)
  1130. q->g[i] += p->g[i] * p->x[i];
  1131. }
  1132. return 0;
  1133. }
  1134. int kad_op_log(kad_node_t *p, int action)
  1135. {
  1136. int i, n;
  1137. kad_node_t *q = p->child[0];
  1138. n = kad_len(q);
  1139. if (action == KAD_SYNC_DIM) {
  1140. kad_copy_dim1(p, q);
  1141. } else if (action == KAD_FORWARD) {
  1142. for (i = 0; i < n; ++i) p->x[i] = logf(q->x[i]);
  1143. } else if (action == KAD_BACKWARD && kad_is_back(q)) {
  1144. for (i = 0; i < n; ++i)
  1145. q->g[i] += p->g[i] / q->x[i];
  1146. }
  1147. return 0;
  1148. }
  1149. int kad_op_reduce_sum(kad_node_t *p, int action)
  1150. {
  1151. kad_node_t *q = p->child[0];
  1152. int i, j, k, axis, d0, d1;
  1153. assert(p->ptr);
  1154. axis = *(int32_t*)p->ptr;
  1155. if (axis < 0 || axis >= q->n_d) return -1;
  1156. for (i = 0, d0 = 1; i < axis; ++i) d0 *= q->d[i];
  1157. for (i = axis + 1, d1 = 1; i < q->n_d; ++i) d1 *= q->d[i];
  1158. if (action == KAD_SYNC_DIM) {
  1159. p->n_d = q->n_d - 1;
  1160. for (i = j = 0; i < q->n_d; ++i)
  1161. if (i != axis) p->d[j++] = q->d[i];
  1162. } else if (action == KAD_FORWARD) {
  1163. memset(p->x, 0, kad_len(p) * sizeof(float));
  1164. for (i = 0; i < d0; ++i)
  1165. for (j = 0; j < q->d[axis]; ++j)
  1166. for (k = 0; k < d1; ++k)
  1167. p->x[i * d1 + k] += q->x[(i * q->d[axis] + j) * d1 + k];
  1168. } else if (action == KAD_BACKWARD && kad_is_back(q)) {
  1169. for (i = 0; i < d0; ++i)
  1170. for (j = 0; j < q->d[axis]; ++j)
  1171. for (k = 0; k < d1; ++k)
  1172. q->g[(i * q->d[axis] + j) * d1 + k] += p->g[i * d1 + k];
  1173. }
  1174. return 0;
  1175. }
  1176. int kad_op_reduce_mean(kad_node_t *p, int action)
  1177. {
  1178. kad_node_t *q = p->child[0];
  1179. int i, j, k, axis, d0, d1;
  1180. assert(p->ptr);
  1181. axis = *(int32_t*)p->ptr;
  1182. if (axis < 0 || axis >= q->n_d) return -1;
  1183. for (i = 0, d0 = 1; i < axis; ++i) d0 *= q->d[i];
  1184. for (i = axis + 1, d1 = 1; i < q->n_d; ++i) d1 *= q->d[i];
  1185. if (action == KAD_SYNC_DIM) {
  1186. p->n_d = q->n_d - 1;
  1187. for (i = j = 0; i < q->n_d; ++i)
  1188. if (i != axis) p->d[j++] = q->d[i];
  1189. } else if (action == KAD_FORWARD) {
  1190. float t = 1.0f / q->d[axis];
  1191. memset(p->x, 0, kad_len(p) * sizeof(float));
  1192. for (i = 0; i < d0; ++i)
  1193. for (j = 0; j < q->d[axis]; ++j)
  1194. for (k = 0; k < d1; ++k)
  1195. p->x[i * d1 + k] += t * q->x[(i * q->d[axis] + j) * d1 + k];
  1196. } else if (action == KAD_BACKWARD && kad_is_back(q)) {
  1197. float t = 1.0f / q->d[axis];
  1198. for (i = 0; i < d0; ++i)
  1199. for (j = 0; j < q->d[axis]; ++j)
  1200. for (k = 0; k < d1; ++k)
  1201. q->g[(i * q->d[axis] + j) * d1 + k] += t * p->g[i * d1 + k];
  1202. }
  1203. return 0;
  1204. }
  1205. /********** Miscellaneous **********/
  1206. int kad_op_dropout(kad_node_t *p, int action)
  1207. {
  1208. int i, n;
  1209. kad_node_t *q = p->child[0];
  1210. assert(p->child[1]->n_d == 0);
  1211. n = kad_len(q);
  1212. if (action == KAD_SYNC_DIM) {
  1213. kad_copy_dim1(p, q);
  1214. } else if (action == KAD_ALLOC) {
  1215. if (kad_is_back(p->child[0]))
  1216. p->gtmp = realloc(p->gtmp, n);
  1217. } else if (action == KAD_FORWARD) {
  1218. float r = kad_is_const(q) || kad_is_var(q)? 0.0f : *p->child[1]->x, z = 1.0f / (1.0f - r);
  1219. uint8_t *flag = (uint8_t*)p->gtmp;
  1220. for (i = 0; i < n; ++i) {
  1221. int kept = (kad_drand(p->ptr) >= r);
  1222. p->x[i] = kept? q->x[i] * z : 0.0f;
  1223. if (flag) flag[i] = kept;
  1224. }
  1225. } else if (action == KAD_BACKWARD && kad_is_back(p->child[0])) {
  1226. float r = kad_is_const(q) || kad_is_var(q)? 0.0f : *p->child[1]->x, z = 1.0f / (1.0f - r);
  1227. uint8_t *flag = (uint8_t*)p->gtmp;
  1228. for (i = 0; i < n; ++i)
  1229. if (flag[i]) q->g[i] += z * p->g[i];
  1230. }
  1231. return 0;
  1232. }
  1233. int kad_op_sample_normal(kad_node_t *p, int action) /* not tested */
  1234. {
  1235. int i, n;
  1236. kad_node_t *q = p->child[0];
  1237. n = kad_len(q);
  1238. if (action == KAD_SYNC_DIM) {
  1239. kad_copy_dim1(p, q);
  1240. } else if (action == KAD_ALLOC) {
  1241. if (kad_is_back(p->child[0]))
  1242. p->gtmp = realloc(p->gtmp, n * sizeof(float));
  1243. } else if (action == KAD_FORWARD) {
  1244. float *r = (float*)p->gtmp;
  1245. for (i = 0; i < n; ++i) {
  1246. float z;
  1247. z = (float)kad_drand_normal(p->ptr);
  1248. p->x[i] = q->x[i] * z;
  1249. if (r) r[i] = z;
  1250. }
  1251. } else if (action == KAD_BACKWARD && kad_is_back(p->child[0])) {
  1252. float *r = (float*)p->gtmp;
  1253. for (i = 0; i < n; ++i)
  1254. q->g[i] += p->g[i] * r[i];
  1255. }
  1256. return 0;
  1257. }
  1258. int kad_op_slice(kad_node_t *p, int action)
  1259. {
  1260. kad_node_t *q = p->child[0];
  1261. int32_t *aux, *range;
  1262. int i, axis, d0, d1;
  1263. assert(p->ptr);
  1264. aux = (int32_t*)p->ptr, axis = aux[0], range = aux + 1;
  1265. if (axis < 0 || axis >= q->n_d) return -1;
  1266. for (i = 0, d0 = 1; i < axis; ++i) d0 *= q->d[i];
  1267. for (i = axis + 1, d1 = 1; i < q->n_d; ++i) d1 *= q->d[i];
  1268. if (action == KAD_SYNC_DIM) {
  1269. if (range[0] >= range[1] || range[0] < 0 || range[1] > q->d[axis]) return -1;
  1270. kad_copy_dim1(p, q);
  1271. p->d[axis] = range[1] - range[0];
  1272. } else if (action == KAD_FORWARD) {
  1273. for (i = 0; i < d0; ++i)
  1274. memcpy(&p->x[i * p->d[axis] * d1], &q->x[(i * q->d[axis] + range[0]) * d1], (range[1] - range[0]) * d1 * sizeof(float));
  1275. } else if (action == KAD_BACKWARD && kad_is_back(q)) {
  1276. for (i = 0; i < d0; ++i)
  1277. kad_saxpy((range[1] - range[0]) * d1, 1.0f, &p->g[i * p->d[axis] * d1], &q->g[(i * q->d[axis] + range[0]) * d1]);
  1278. }
  1279. return 0;
  1280. }
  1281. int kad_op_concat(kad_node_t *p, int action)
  1282. {
  1283. kad_node_t *q = p->child[0];
  1284. int32_t *aux;
  1285. int i, j, k, axis, d0, d1;
  1286. assert(p->ptr);
  1287. aux = (int32_t*)p->ptr, axis = aux[0];
  1288. for (i = 0, d0 = 1; i < axis; ++i) d0 *= q->d[i];
  1289. for (i = axis + 1, d1 = 1; i < q->n_d; ++i) d1 *= q->d[i];
  1290. if (action == KAD_SYNC_DIM) {
  1291. for (i = 1; i < p->n_child; ++i) {
  1292. if (p->child[i]->n_d != q->n_d) return -1;
  1293. for (j = 0; j < q->n_d; ++j)
  1294. if (j != axis && q->d[j] != p->child[i]->d[j]) return -1;
  1295. }
  1296. kad_copy_dim1(p, q);
  1297. for (i = 1; i < p->n_child; ++i)
  1298. p->d[axis] += p->child[i]->d[axis];
  1299. } else if (action == KAD_FORWARD) {
  1300. for (i = 0; i < d0; ++i)
  1301. for (j = k = 0; j < p->n_child; ++j) {
  1302. q = p->child[j];
  1303. memcpy(&p->x[(i * p->d[axis] + k) * d1], &q->x[i * q->d[axis] * d1], q->d[axis] * d1 * sizeof(float));
  1304. k += q->d[axis];
  1305. }
  1306. } else if (action == KAD_BACKWARD) {
  1307. for (i = 0; i < d0; ++i)
  1308. for (j = k = 0; j < p->n_child; ++j) {
  1309. q = p->child[j];
  1310. if (!kad_is_back(q)) continue;
  1311. kad_saxpy(q->d[axis] * d1, 1.0f, &p->g[(i * p->d[axis] + k) * d1], &q->g[i * q->d[axis] * d1]);
  1312. k += q->d[axis];
  1313. }
  1314. }
  1315. return 0;
  1316. }
  1317. int kad_op_reshape(kad_node_t *p, int action)
  1318. {
  1319. kad_node_t *q = p->child[0];
  1320. if (action == KAD_SYNC_DIM) {
  1321. if (p->ptr) {
  1322. int32_t *aux = (int32_t*)p->ptr;
  1323. int i, len = 1, n_missing = 0;
  1324. p->n_d = p->ptr_size / 4;
  1325. for (i = 0; i < p->n_d; ++i) p->d[i] = aux[i];
  1326. for (i = 0; i < p->n_d; ++i)
  1327. if (p->d[i] <= 0) ++n_missing;
  1328. else len *= p->d[i];
  1329. if (n_missing == 0 && len != kad_len(q)) return -1;
  1330. if (n_missing > 1) { /* attempt to infer missing dimensions except the last one */
  1331. for (i = 0; i < p->n_d; ++i)
  1332. if (p->d[i] <= 0 && i < q->n_d) {
  1333. p->d[i] = q->d[i], len *= p->d[i];
  1334. if (--n_missing == 1) break;
  1335. }
  1336. if (n_missing > 1) return -1;
  1337. }
  1338. if (n_missing == 1) { /* infer the last missing dimension */
  1339. if (kad_len(q) % len != 0) return -1;
  1340. for (i = 0; i < p->n_d; ++i)
  1341. if (p->d[i] <= 0) p->d[i] = kad_len(q) / len;
  1342. }
  1343. } else kad_copy_dim1(p, q);
  1344. } else if (action == KAD_FORWARD) {
  1345. memcpy(p->x, q->x, kad_len(p) * sizeof(float));
  1346. } else if (action == KAD_BACKWARD && kad_is_back(q)) {
  1347. kad_saxpy(kad_len(p), 1.0f, p->g, q->g);
  1348. }
  1349. return 0;
  1350. }
  1351. int kad_op_reverse(kad_node_t *p, int action)
  1352. {
  1353. kad_node_t *q = p->child[0];
  1354. int axis, i, j, n, d0, d1;
  1355. axis = p->ptr? *(int32_t*)p->ptr : 0;
  1356. if (axis < 0) axis += q->n_d;
  1357. assert(axis >= 0 && axis < q->n_d);
  1358. for (i = 0, d0 = 1; i < axis; ++i) d0 *= q->d[i];
  1359. n = q->d[axis];
  1360. for (i = axis + 1, d1 = 1; i < q->n_d; ++i) d1 *= q->d[i];
  1361. if (action == KAD_SYNC_DIM) {
  1362. kad_copy_dim1(p, q);
  1363. } else if (action == KAD_FORWARD) {
  1364. for (i = 0; i < d0; ++i)
  1365. for (j = 0; j < n; ++j)
  1366. memcpy(&p->x[(i * n + n - 1 - j) * d1], &q->x[(i * n + j) * d1], d1 * sizeof(float));
  1367. } else if (action == KAD_BACKWARD && kad_is_back(q)) {
  1368. for (i = 0; i < d0; ++i)
  1369. for (j = 0; j < n; ++j)
  1370. kad_saxpy(d1, 1.0f, &p->g[(i * n + n - 1 - j) * d1], &q->g[(i * n + j) * d1]);
  1371. }
  1372. return 0;
  1373. }
  1374. /********** Cost functions **********/
  1375. int kad_op_mse(kad_node_t *p, int action)
  1376. {
  1377. kad_node_t *y1 = p->child[0]; /* test */
  1378. kad_node_t *y0 = p->child[1]; /* truth */
  1379. int i, n;
  1380. n = kad_len(y0);
  1381. if (action == KAD_SYNC_DIM) {
  1382. if (n != kad_len(y1)) return -1;
  1383. p->n_d = 0;
  1384. } else if (action == KAD_FORWARD) {
  1385. double cost = 0.0;
  1386. for (i = 0; i < n; ++i)
  1387. cost += (y1->x[i] - y0->x[i]) * (y1->x[i] - y0->x[i]);
  1388. p->x[0] = (float)(cost / n);
  1389. } else if (action == KAD_BACKWARD && kad_is_back(y1)) {
  1390. float t = 2.0f * p->g[0] / n;
  1391. for (i = 0; i < n; ++i)
  1392. y1->g[i] += t * (y1->x[i] - y0->x[i]);
  1393. }
  1394. return 0;
  1395. }
  1396. int kad_op_ce_bin(kad_node_t *p, int action)
  1397. {
  1398. static const float tiny = 1e-9f;
  1399. kad_node_t *y1 = p->child[0]; /* test */
  1400. kad_node_t *y0 = p->child[1]; /* truth */
  1401. int i, n;
  1402. n = kad_len(y0);
  1403. if (action == KAD_SYNC_DIM) {
  1404. if (n != kad_len(y1)) return -1;
  1405. p->n_d = 0;
  1406. } else if (action == KAD_FORWARD) {
  1407. double cost = 0.0;
  1408. for (i = 0; i < n; ++i) {
  1409. if (y0->x[i] > 0.0f)
  1410. cost += y0->x[i] * log(y0->x[i] / (y1->x[i] > tiny? y1->x[i] : tiny));
  1411. if (1.0f - y0->x[i] > 0.0f)
  1412. cost += (1.0f - y0->x[i]) * log((1.0f - y0->x[i]) / (1.0f - y1->x[i] > tiny? 1.0f - y1->x[i] : tiny));
  1413. }
  1414. p->x[0] = (float)(cost / n);
  1415. } else if (action == KAD_BACKWARD && kad_is_back(y1)) {
  1416. float t = p->g[0] / n;
  1417. for (i = 0; i < n; ++i) {
  1418. if (y0->x[i] > 0.0f)
  1419. y1->g[i] -= t * y0->x[i] / (y1->x[i] > tiny? y1->x[i] : tiny);
  1420. if (1.0f - y0->x[i] > 0.0f)
  1421. y1->g[i] += t * (1.0f - y0->x[i]) / (1.0f - y1->x[i] > tiny? 1.0f - y1->x[i] : tiny);
  1422. }
  1423. }
  1424. return 0;
  1425. }
  1426. int kad_op_ce_bin_neg(kad_node_t *p, int action)
  1427. {
  1428. static const float tiny = 1e-9f;
  1429. kad_node_t *y1 = p->child[0]; /* test */
  1430. kad_node_t *y0 = p->child[1]; /* truth */
  1431. int i, n;
  1432. n = kad_len(y0);
  1433. if (action == KAD_SYNC_DIM) {
  1434. if (n != kad_len(y1)) return -1;
  1435. p->n_d = 0;
  1436. } else if (action == KAD_FORWARD) {
  1437. double cost = 0.0;
  1438. for (i = 0; i < n; ++i) {
  1439. if (1.0f + y0->x[i] > 0.0f)
  1440. cost += .5f * (1.0f + y0->x[i]) * log((1.0f + y0->x[i]) / (1.0f + y1->x[i] > tiny? 1.0f + y1->x[i] : tiny));
  1441. if (1.0f - y0->x[i] > 0.0f)
  1442. cost += .5f * (1.0f - y0->x[i]) * log((1.0f - y0->x[i]) / (1.0f - y1->x[i] > tiny? 1.0f - y1->x[i] : tiny));
  1443. }
  1444. p->x[0] = (float)(cost / n);
  1445. } else if (action == KAD_BACKWARD && kad_is_back(y1)) {
  1446. float t = p->g[0] / n;
  1447. for (i = 0; i < n; ++i) {
  1448. if (1.0f + y0->x[i] > 0.0f)
  1449. y1->g[i] -= .5f * t * (1.0f + y0->x[i]) / (1.0f + y1->x[i] > tiny? 1.0f + y1->x[i] : tiny);
  1450. if (1.0f - y0->x[i] > 0.0f)
  1451. y1->g[i] += .5f * t * (1.0f - y0->x[i]) / (1.0f - y1->x[i] > tiny? 1.0f - y1->x[i] : tiny);
  1452. }
  1453. }
  1454. return 0;
  1455. }
  1456. int kad_op_ce_multi(kad_node_t *p, int action)
  1457. {
  1458. static const float tiny = 1e-9f;
  1459. kad_node_t *y1 = p->child[0]; /* test */
  1460. kad_node_t *y0 = p->child[1]; /* truth */
  1461. kad_node_t *c = 0;
  1462. int i, j, n1, d0;
  1463. n1 = y0->d[y0->n_d - 1];
  1464. d0 = kad_len(y0) / n1;
  1465. if (p->n_child == 3) {
  1466. c = p->child[2];
  1467. assert(c->n_d == 1 && c->d[0] == n1);
  1468. }
  1469. if (action == KAD_SYNC_DIM) {
  1470. if (kad_len(y0) != kad_len(y1) || y0->d[y0->n_d - 1] != y1->d[y1->n_d - 1]) return -1;
  1471. p->n_d = 0;
  1472. } else if (action == KAD_FORWARD) {
  1473. double cost = 0.0;
  1474. if (c == 0) {
  1475. for (j = 0; j < d0; ++j) {
  1476. float *x1 = &y1->x[j * n1], *x0 = &y0->x[j * n1];
  1477. for (i = 0; i < n1; ++i)
  1478. if (x0[i] > 0.0f)
  1479. cost += x0[i] * log(x0[i] / (x1[i] > tiny? x1[i] : tiny));
  1480. }
  1481. } else {
  1482. for (j = 0; j < d0; ++j) {
  1483. float *x1 = &y1->x[j * n1], *x0 = &y0->x[j * n1];
  1484. for (i = 0; i < n1; ++i)
  1485. if (x0[i] > 0.0f)
  1486. cost += c->x[i] * x0[i] * log(x0[i] / (x1[i] > tiny? x1[i] : tiny));
  1487. }
  1488. }
  1489. p->x[0] = (float)(cost / d0);
  1490. } else if (action == KAD_BACKWARD && kad_is_back(y1)) {
  1491. float t = p->g[0] / d0;
  1492. if (c == 0) {
  1493. for (j = 0; j < d0; ++j) {
  1494. float *g = &y1->g[j * n1], *x1 = &y1->x[j * n1], *x0 = &y0->x[j * n1];
  1495. for (i = 0; i < n1; ++i)
  1496. g[i] -= t * x0[i] / (x1[i] > tiny? x1[i] : tiny);
  1497. }
  1498. } else {
  1499. for (j = 0; j < d0; ++j) {
  1500. float *g = &y1->g[j * n1], *x1 = &y1->x[j * n1], *x0 = &y0->x[j * n1];
  1501. for (i = 0; i < n1; ++i)
  1502. g[i] -= t * c->x[i] * x0[i] / (x1[i] > tiny? x1[i] : tiny);
  1503. }
  1504. }
  1505. }
  1506. return 0;
  1507. }
  1508. /********** Normalization **********/
  1509. int kad_op_stdnorm(kad_node_t *p, int action)
  1510. {
  1511. int i, j, n, m;
  1512. kad_node_t *q = p->child[0];
  1513. assert(q->n_d > 0);
  1514. n = q->d[q->n_d - 1];
  1515. m = kad_len(q) / n;
  1516. if (action == KAD_SYNC_DIM) {
  1517. kad_copy_dim1(p, q);
  1518. } else if (action == KAD_ALLOC) {
  1519. p->gtmp = realloc(p->gtmp, m * sizeof(float));
  1520. } else if (action == KAD_FORWARD) {
  1521. float *si = (float*)p->gtmp;
  1522. for (j = 0; j < m; ++j) {
  1523. float *px = &p->x[j * n], *qx = &q->x[j * n];
  1524. float avg, std_inv;
  1525. double s;
  1526. for (i = 0, s = 0.0; i < n; ++i) s += qx[i];
  1527. avg = (float)(s / n);
  1528. for (i = 0; i < n; ++i) px[i] = qx[i] - avg;
  1529. for (i = 0, s = 0.0; i < n; ++i) s += px[i] * px[i];
  1530. std_inv = s == 0.0? 1.0f : (float)(1.0 / sqrt(s / n));
  1531. for (i = 0; i < n; ++i) px[i] *= std_inv;
  1532. si[j] = std_inv;
  1533. }
  1534. } else if (action == KAD_BACKWARD && kad_is_back(q)) {
  1535. float *si = (float*)p->gtmp;
  1536. for (j = 0; j < m; ++j) {
  1537. float *pg = &p->g[j * n], *qg = &q->g[j * n], *px = &p->x[j * n], std_inv = si[j];
  1538. double s, t;
  1539. for (i = 0, s = t = 0.0; i < n; ++i)
  1540. s += pg[i], t += px[i] * pg[i];
  1541. s /= n, t /= n;
  1542. for (i = 0; i < n; ++i)
  1543. qg[i] += std_inv * (pg[i] - s - px[i] * t);
  1544. }
  1545. }
  1546. return 0;
  1547. }
  1548. /********** Activation functions **********/
  1549. int kad_op_sigm(kad_node_t *p, int action)
  1550. {
  1551. int i, n;
  1552. kad_node_t *q = p->child[0];
  1553. n = kad_len(q);
  1554. if (action == KAD_SYNC_DIM) {
  1555. kad_copy_dim1(p, q);
  1556. } else if (action == KAD_FORWARD) {
  1557. for (i = 0; i < n; ++i)
  1558. p->x[i] = 1.0f / (1.0f + expf(-q->x[i]));
  1559. } else if (action == KAD_BACKWARD && kad_is_back(q)) {
  1560. for (i = 0; i < n; ++i)
  1561. q->g[i] += p->g[i] * (p->x[i] * (1.0f - p->x[i]));
  1562. }
  1563. return 0;
  1564. }
  1565. int kad_op_tanh(kad_node_t *p, int action)
  1566. {
  1567. int i, n;
  1568. kad_node_t *q = p->child[0];
  1569. n = kad_len(q);
  1570. if (action == KAD_SYNC_DIM) {
  1571. kad_copy_dim1(p, q);
  1572. } else if (action == KAD_FORWARD) {
  1573. for (i = 0; i < n; ++i) {
  1574. if (q->x[i] < -20.0f) p->x[i] = -1.0f;
  1575. else {
  1576. float y;
  1577. y = expf(-2.0f * q->x[i]);
  1578. p->x[i] = (1.0f - y) / (1.0f + y);
  1579. }
  1580. }
  1581. } else if (action == KAD_BACKWARD && kad_is_back(q)) {
  1582. for (i = 0; i < n; ++i)
  1583. q->g[i] += p->g[i] * (1.0f - p->x[i] * p->x[i]);
  1584. }
  1585. return 0;
  1586. }
  1587. int kad_op_relu(kad_node_t *p, int action)
  1588. {
  1589. int i, n;
  1590. kad_node_t *q = p->child[0];
  1591. n = kad_len(q);
  1592. if (action == KAD_SYNC_DIM) {
  1593. kad_copy_dim1(p, q);
  1594. } else if (action == KAD_FORWARD) {
  1595. for (i = 0; i < n; ++i)
  1596. p->x[i] = q->x[i] > 0.0f? q->x[i] : 0.0f;
  1597. } else if (action == KAD_BACKWARD && kad_is_back(q)) {
  1598. for (i = 0; i < n; ++i)
  1599. if (q->x[i] > 0.0f)
  1600. q->g[i] += p->g[i];
  1601. }
  1602. return 0;
  1603. }
  1604. int kad_op_sin(kad_node_t *p, int action)
  1605. {
  1606. int i, n;
  1607. kad_node_t *q = p->child[0];
  1608. n = kad_len(q);
  1609. if (action == KAD_SYNC_DIM) {
  1610. kad_copy_dim1(p, q);
  1611. } else if (action == KAD_FORWARD) {
  1612. for (i = 0; i < n; ++i) p->x[i] = sinf(q->x[i]);
  1613. } else if (action == KAD_BACKWARD && kad_is_back(q)) {
  1614. for (i = 0; i < n; ++i)
  1615. q->g[i] += p->g[i] * cosf(q->x[i]);
  1616. }
  1617. return 0;
  1618. }
  1619. int kad_op_softmax(kad_node_t *p, int action)
  1620. {
  1621. int i, j, n1, d0;
  1622. kad_node_t *q = p->child[0];
  1623. n1 = q->d[q->n_d - 1];
  1624. d0 = kad_len(q) / n1;
  1625. if (action == KAD_SYNC_DIM) {
  1626. kad_copy_dim1(p, q);
  1627. } else if (action == KAD_FORWARD) {
  1628. for (j = 0; j < d0; ++j) {
  1629. float s, max, *x = &q->x[j * n1], *y = &p->x[j * n1];
  1630. for (i = 0, max = -FLT_MAX; i < n1; ++i)
  1631. max = max > x[i]? max : x[i];
  1632. for (i = 0, s = 0.0f; i < n1; ++i) {
  1633. y[i] = expf(x[i] - max);
  1634. s += y[i];
  1635. }
  1636. for (i = 0, s = 1.0f / s; i < n1; ++i) y[i] *= s;
  1637. }
  1638. } else if (action == KAD_BACKWARD && kad_is_back(q)) {
  1639. for (j = 0; j < d0; ++j) {
  1640. float s, *g = &p->g[j * n1], *y = &p->x[j * n1], *h = &q->g[j * n1];
  1641. for (i = 0, s = 0.0f; i < n1; ++i)
  1642. s += g[i] * y[i];
  1643. for (i = 0; i < n1; ++i)
  1644. h[i] += y[i] * (g[i] - s);
  1645. }
  1646. }
  1647. return 0;
  1648. }
  1649. /********** Multi-node pooling **********/
  1650. int kad_op_avg(kad_node_t *p, int action)
  1651. {
  1652. int i, n;
  1653. float tmp;
  1654. kad_node_t *q;
  1655. assert(p->n_child > 0);
  1656. tmp = 1.0f / p->n_child;
  1657. q = p->child[0];
  1658. n = kad_len(q);
  1659. if (action == KAD_SYNC_DIM) {
  1660. for (i = 1; i < p->n_child; ++i)
  1661. if (kad_len(p->child[i]) != n) return -1;
  1662. kad_copy_dim1(p, q);
  1663. } else if (action == KAD_FORWARD) {
  1664. memcpy(p->x, q->x, n * sizeof(float));
  1665. for (i = 1; i < p->n_child; ++i)
  1666. kad_saxpy(n, 1.0f, p->child[i]->x, p->x);
  1667. for (i = 0; i < n; ++i) p->x[i] *= tmp;
  1668. } else if (action == KAD_BACKWARD) {
  1669. for (i = 0; i < p->n_child; ++i)
  1670. if (kad_is_back(p->child[i]))
  1671. kad_saxpy(n, tmp, p->g, p->child[i]->g);
  1672. }
  1673. return 0;
  1674. }
  1675. int kad_op_max(kad_node_t *p, int action)
  1676. {
  1677. int i, n;
  1678. kad_node_t *q = p->child[0];
  1679. n = kad_len(q);
  1680. if (action == KAD_SYNC_DIM) {
  1681. int *max_j;
  1682. for (i = 1; i < p->n_child; ++i)
  1683. if (kad_len(p->child[i]) != n) return -1;
  1684. kad_copy_dim1(p, q);
  1685. max_j = (int*)calloc(n, sizeof(int));
  1686. p->gtmp = max_j;
  1687. } else if (action == KAD_FORWARD) {
  1688. int j, *max_j = (int*)p->gtmp;
  1689. memset(max_j, 0, n * sizeof(int));
  1690. memcpy(p->x, q->x, n * sizeof(float));
  1691. for (j = 1; j < p->n_child; ++j)
  1692. for (i = 0, q = p->child[j]; i < n; ++i)
  1693. if (q->x[i] > p->x[i]) p->x[i] = q->x[i], max_j[i] = j;
  1694. } else if (action == KAD_BACKWARD) {
  1695. int *max_j = (int*)p->gtmp;
  1696. for (i = 0; i < n; ++i)
  1697. p->child[max_j[i]]->g[i] += p->g[i];
  1698. }
  1699. return 0;
  1700. }
  1701. int kad_op_stack(kad_node_t *p, int action) /* TODO: allow axis, as in TensorFlow */
  1702. {
  1703. int i, n, axis = 0;
  1704. kad_node_t *q;
  1705. assert(p->n_child > 0);
  1706. q = p->child[0];
  1707. n = kad_len(q);
  1708. if (action == KAD_SYNC_DIM) {
  1709. for (i = 1; i < p->n_child; ++i)
  1710. if (kad_len(p->child[i]) != n) return -1;
  1711. p->n_d = q->n_d + 1;
  1712. for (i = 0; i < axis; ++i) p->d[i] = q->d[i];
  1713. p->d[axis] = p->n_child;
  1714. for (; i < q->n_d; ++i) p->d[i+1] = q->d[i];
  1715. } else if (action == KAD_FORWARD) { /* TODO: doesn't work when axis != 0 */
  1716. for (i = 0; i < p->n_child; ++i)
  1717. memcpy(&p->x[i * n], p->child[i]->x, n * sizeof(float));
  1718. } else if (action == KAD_BACKWARD) {
  1719. for (i = 0; i < p->n_child; ++i)
  1720. if (kad_is_back(p->child[i]))
  1721. kad_saxpy(n, 1.0f, &p->g[i * n], p->child[i]->g);
  1722. }
  1723. return 0;
  1724. }
  1725. int kad_op_select(kad_node_t *p, int action)
  1726. {
  1727. kad_node_t *q;
  1728. int i, n, which;
  1729. which = *(int32_t*)p->ptr;
  1730. if (which < 0) which += p->n_child;
  1731. assert(which >= 0 && which < p->n_child);
  1732. q = p->child[which];
  1733. n = kad_len(q);
  1734. if (action == KAD_SYNC_DIM) {
  1735. for (i = 0; i < p->n_child; ++i)
  1736. if (p->child[i]->n_d != q->n_d || kad_len(p->child[i]) != n)
  1737. break;
  1738. if (i < p->n_child) return -1;
  1739. kad_copy_dim1(p, q);
  1740. } else if (action == KAD_FORWARD) {
  1741. memcpy(p->x, q->x, n * sizeof(float));
  1742. } else if (action == KAD_BACKWARD && kad_is_back(q)) {
  1743. kad_saxpy(n, 1.0f, p->g, q->g);
  1744. }
  1745. return 0;
  1746. }
  1747. /********** 2D convolution **********/
  1748. static void conv_rot180(int d0, int d1, float *x) /* rotate/reverse a weight martix */
  1749. {
  1750. int i, j;
  1751. for (i = 0; i < d0; ++i) {
  1752. float tmp, *xi = &x[i * d1];
  1753. for (j = 0; j < d1>>1; ++j)
  1754. tmp = xi[j], xi[j] = xi[d1-1-j], xi[d1-1-j] = tmp;
  1755. }
  1756. }
  1757. static void conv2d_move_1to3(int d[4], const float *x, float *y) /* convert the NCHW shape to the NHWC shape */
  1758. {
  1759. int i, j, k, l;
  1760. for (i = 0; i < d[0]; ++i)
  1761. for (j = 0; j < d[1]; ++j)
  1762. for (k = 0; k < d[2]; ++k) {
  1763. int ik = (i * d[2] + k) * d[3], ijk = ((i * d[1] + j) * d[2] + k) * d[3];
  1764. for (l = 0; l < d[3]; ++l)
  1765. y[(ik + l) * d[1] + j] = x[ijk + l];
  1766. }
  1767. }
  1768. static void conv2d_add_3to1(int d[4], const float *y, float *x) /* convert the NHWC shape back to NCHW and add to another NCHW-shaped array */
  1769. {
  1770. int i, j, k, l;
  1771. for (i = 0; i < d[0]; ++i)
  1772. for (j = 0; j < d[1]; ++j)
  1773. for (k = 0; k < d[2]; ++k) {
  1774. int ik = (i * d[2] + k) * d[3], ijk = ((i * d[1] + j) * d[2] + k) * d[3];
  1775. for (l = 0; l < d[3]; ++l)
  1776. x[ijk + l] += y[(ik + l) * d[1] + j];
  1777. }
  1778. }
  1779. #define conv_out_size(in_size, aux) (((in_size) - (aux)->kernel_size + (aux)->pad[0] + (aux)->pad[1]) / (aux)->stride + 1)
  1780. #define process_row_for(_xx, _ww, _yy, _wn, _pn, _stride, _pad, _t) do { \
  1781. int j, l; \
  1782. if (_stride > 1) { \
  1783. for (l = 0; l < _wn; ++l) { \
  1784. const float *xl = &_xx[l - _pad]; \
  1785. for (j = 0; j < _pn; ++j, xl += _stride) _t[j] = *xl; \
  1786. kad_saxpy(_pn, _ww[l], _t, _yy); \
  1787. } \
  1788. } else for (l = 0; l < _wn; ++l) kad_saxpy(_pn, _ww[l], &_xx[l - _pad], _yy); \
  1789. } while (0)
  1790. #define process_row_back_x(_xx, _ww, _yy, _wn, _pn, _stride, _pad, _t) do { \
  1791. int j, l; \
  1792. if (_stride > 1) { \
  1793. for (l = 0; l < _wn; ++l) { \
  1794. float *xl = &_xx[l - _pad]; \
  1795. memset(_t, 0, _pn * sizeof(float)); \
  1796. kad_saxpy(_pn, _ww[l], _yy, _t); \
  1797. for (j = 0; j < _pn; ++j, xl += _stride) *xl += _t[j]; \
  1798. } \
  1799. } else for (l = 0; l < _wn; ++l) kad_saxpy(_pn, _ww[l], _yy, &_xx[l - _pad]); \
  1800. } while (0)
  1801. #define process_row_back_w(_xx, _ww, _yy, _wn, _pn, _stride, _pad, _t) do { \
  1802. int j, l; \
  1803. if (_stride > 1) { \
  1804. for (l = 0; l < _wn; ++l) { \
  1805. const float *xl = &_xx[l - _pad]; \
  1806. for (j = 0; j < _pn; ++j, xl += _stride) _t[j] = *xl; \
  1807. _ww[l] += kad_sdot(_pn, _yy, _t); \
  1808. } \
  1809. } else for (l = 0; l < _wn; ++l) _ww[l] += kad_sdot(_pn, _yy, &_xx[l - _pad]); \
  1810. } while (0)
  1811. /* Forward and backward passes are implemented with two different algorithms.
  1812. * The first is faster for small kernels with few input channels; otherwise the
  1813. * second algorithm is faster. Both algorithms should produce identical
  1814. * results, up to the precision of "float".
  1815. */
  1816. int kad_op_conv2d(kad_node_t *p, int action) /* in the number-channel-height-width (NCHW) shape */
  1817. {
  1818. #define conv2d_loop1(_x, _w, _y, _tmp, _row_func) do { /* for the NCHW shape */ \
  1819. int n, c1, c0, i, k, ii; \
  1820. for (n = 0; n < q->d[0]; ++n) /* mini-batch */ \
  1821. for (c1 = 0; c1 < w->d[0]; ++c1) /* output channel */ \
  1822. for (c0 = 0; c0 < w->d[1]; ++c0) /* input channel */ \
  1823. for (k = 0; k < w->d[2]; ++k) { /* kernel row */ \
  1824. float *_ww = &(_w)[((c1 * w->d[1] + c0) * w->d[2] + k) * w->d[3]]; \
  1825. for (i = 0, ii = k - aux[0].pad[0]; i < p->d[2] && ii >= 0 && ii < q->d[2]; ++i, ii += aux[0].stride) { /* output row */ \
  1826. float *_xx = &(_x)[((n * q->d[1] + c0) * q->d[2] + ii) * q->d[3]]; \
  1827. float *_yy = &(_y)[((n * p->d[1] + c1) * p->d[2] + i) * p->d[3]]; \
  1828. if (x_padded) { \
  1829. memcpy(x_padded + aux[1].pad[0], _xx, q->d[3] * sizeof(float)); \
  1830. _xx = x_padded + aux[1].pad[0]; \
  1831. } \
  1832. _row_func(_xx, _ww, _yy, w->d[3], p->d[3], aux[1].stride, aux[1].pad[0], (_tmp)); \
  1833. } /* ~i */ \
  1834. } /* ~k, c0, c1, n */ \
  1835. } while (0)
  1836. #define conv2d_loop2(_x, _w, _y, _code) do { /* for the NHWC shape */ \
  1837. int n, c1, i, j, k, ii, j_skip = aux[1].stride * q->d[1], m = w->d[3] * w->d[1]; \
  1838. for (n = 0; n < q->d[0]; ++n) /* mini-batch */ \
  1839. for (c1 = 0; c1 < w->d[0]; ++c1) /* output channel */ \
  1840. for (k = 0; k < w->d[2]; ++k) { /* kernel row */ \
  1841. float *_ww = &(_w)[(c1 * w->d[2] + k) * m]; \
  1842. for (i = 0, ii = k - aux[0].pad[0]; i < p->d[2] && ii >= 0 && ii < q->d[2]; ++i, ii += aux[0].stride) { /* output and input row */ \
  1843. float *_xx = &(_x)[(n * q->d[2] + ii) * q->d[3] * q->d[1]]; \
  1844. float *_yy = &(_y)[((n * p->d[1] + c1) * p->d[2] + i) * p->d[3]]; \
  1845. if (x_padded) { \
  1846. memcpy(x_padded + aux[1].pad[0] * q->d[1], _xx, q->d[3] * q->d[1] * sizeof(float)); \
  1847. _xx = x_padded; \
  1848. } \
  1849. for (j = 0; j < p->d[3]; ++j, _xx += j_skip, ++_yy) _code; /* output and input column */ \
  1850. } /* ~i */ \
  1851. } /* ~k, c1, n */ \
  1852. } while (0)
  1853. conv_conf_t *aux = (conv_conf_t*)p->ptr;
  1854. kad_node_t *q = p->child[0], *w = p->child[1];
  1855. float *t = 0, *q1 = 0, *w1 = 0, *x_padded = 0;
  1856. int algo_switch = 0;
  1857. if (action == KAD_FORWARD || action == KAD_BACKWARD) { /* allocate working space */
  1858. if (w->d[3] * w->d[1] < 16) {
  1859. t = (float*)malloc(p->d[3] * sizeof(float));
  1860. x_padded = aux[1].pad[0] + aux[1].pad[1] > 0? (float*)calloc(q->d[3] + aux[1].pad[0] + aux[1].pad[1], sizeof(float)) : 0;
  1861. } else {
  1862. q1 = (float*)malloc(kad_len(q) * sizeof(float));
  1863. w1 = (float*)malloc(kad_len(w) * sizeof(float));
  1864. x_padded = aux[1].pad[0] + aux[1].pad[1] > 0? (float*)calloc((q->d[3] + aux[1].pad[0] + aux[1].pad[1]) * q->d[1], sizeof(float)) : 0;
  1865. algo_switch = 1;
  1866. }
  1867. }
  1868. if (action == KAD_SYNC_DIM) {
  1869. if (q->n_d != 4 || w->n_d != 4) return -1;
  1870. if (q->d[1] != w->d[1]) return -1; /* unmatched input channels */
  1871. p->n_d = 4;
  1872. p->d[0] = q->d[0], p->d[1] = w->d[0], p->d[2] = conv_out_size(q->d[2], &aux[0]), p->d[3] = conv_out_size(q->d[3], &aux[1]);
  1873. } else if (action == KAD_FORWARD) {
  1874. conv_rot180(w->d[0] * w->d[1], w->d[2] * w->d[3], w->x);
  1875. memset(p->x, 0, kad_len(p) * sizeof(float));
  1876. if (!algo_switch) { /* this is the first algorithm */
  1877. conv2d_loop1(q->x, w->x, p->x, t, process_row_for);
  1878. } else { /* this is the second algorithm */
  1879. conv2d_move_1to3(q->d, q->x, q1);
  1880. conv2d_move_1to3(w->d, w->x, w1);
  1881. conv2d_loop2(q1, w1, p->x, (*_yy += kad_sdot(m, _ww, _xx)));
  1882. }
  1883. conv_rot180(w->d[0] * w->d[1], w->d[2] * w->d[3], w->x);
  1884. } else if (action == KAD_BACKWARD) {
  1885. if (kad_is_back(p->child[0])) { /* backprop to the input array */
  1886. conv_rot180(w->d[0] * w->d[1], w->d[2] * w->d[3], w->x);
  1887. if (!algo_switch) {
  1888. conv2d_loop1(q->g, w->x, p->g, t, process_row_back_x);
  1889. } else {
  1890. memset(q1, 0, kad_len(q) * sizeof(float));
  1891. conv2d_move_1to3(w->d, w->x, w1);
  1892. conv2d_loop2(q1, w1, p->g, kad_saxpy(m, *_yy, _ww, _xx));
  1893. conv2d_add_3to1(q->d, q1, q->g);
  1894. }
  1895. conv_rot180(w->d[0] * w->d[1], w->d[2] * w->d[3], w->x);
  1896. }
  1897. if (kad_is_back(p->child[1])) { /* backprop to the weight matrix */
  1898. conv_rot180(w->d[0] * w->d[1], w->d[2] * w->d[3], w->g);
  1899. if (!algo_switch) {
  1900. conv2d_loop1(q->x, w->g, p->g, t, process_row_back_w);
  1901. } else {
  1902. conv2d_move_1to3(q->d, q->x, q1);
  1903. memset(w1, 0, kad_len(w) * sizeof(float));
  1904. conv2d_loop2(q1, w1, p->g, kad_saxpy(m, *_yy, _xx, _ww));
  1905. conv2d_add_3to1(w->d, w1, w->g);
  1906. }
  1907. conv_rot180(w->d[0] * w->d[1], w->d[2] * w->d[3], w->g);
  1908. }
  1909. }
  1910. free(t); free(q1); free(w1); free(x_padded);
  1911. return 0;
  1912. }
  1913. int kad_op_max2d(kad_node_t *p, int action)
  1914. {
  1915. conv_conf_t *aux = (conv_conf_t*)p->ptr;
  1916. kad_node_t *q = p->child[0];
  1917. if (action == KAD_SYNC_DIM) {
  1918. if (q->n_d != 4) return -1;
  1919. p->n_d = 4;
  1920. p->d[0] = q->d[0], p->d[1] = q->d[1], p->d[2] = conv_out_size(q->d[2], &aux[0]), p->d[3] = conv_out_size(q->d[3], &aux[1]);
  1921. } else if (action == KAD_ALLOC) {
  1922. p->gtmp = realloc(p->gtmp, kad_len(p) * sizeof(int));
  1923. } else if (action == KAD_FORWARD) {
  1924. int rest = 1, len, t, i;
  1925. int *f = (int*)p->gtmp;
  1926. len = kad_len(p);
  1927. for (i = 0; i < len; ++i) p->x[i] = -FLT_MAX;
  1928. for (i = 0; i < p->n_d - 2; ++i) rest *= p->d[i];
  1929. for (t = 0; t < rest; ++t) {
  1930. int i, j, k, l, p_row = p->d[p->n_d - 2], p_col = p->d[p->n_d - 1];
  1931. for (i = 0; i < p_row; ++i) {
  1932. int u = (t * p_row + i) * p_col;
  1933. for (k = 0; k < aux[0].kernel_size; ++k) {
  1934. int v, v0, v_end, ii = i * aux[0].stride + k - aux[0].pad[0];
  1935. if (ii < 0 || ii >= q->d[p->n_d - 2]) continue;
  1936. v0 = (t * q->d[p->n_d - 2] + ii) * q->d[p->n_d - 1];
  1937. v_end = v0 + q->d[p->n_d - 1];
  1938. for (l = 0; l < aux[1].kernel_size; ++l)
  1939. for (j = 0, v = v0 + (l > aux[1].pad[0]? l - aux[1].pad[0] : 0); j < p_col && v < v_end; ++j, v += aux[1].stride)
  1940. if (p->x[u + j] < q->x[v])
  1941. p->x[u + j] = q->x[v], f[u + j] = v;
  1942. } /* ~k */
  1943. } /* ~i */
  1944. }
  1945. } else if (action == KAD_BACKWARD) {
  1946. int i, len, *f = (int*)p->gtmp;
  1947. len = kad_len(p);
  1948. for (i = 0; i < len; ++i) q->g[f[i]] += p->g[i];
  1949. }
  1950. return 0;
  1951. }
  1952. /********** 1D convolution **********/
  1953. static void conv1d_move_1to2(int d[3], const float *x, float *y)
  1954. {
  1955. int i, j, k;
  1956. for (k = 0; k < d[0]; ++k)
  1957. for (j = 0; j < d[1]; ++j)
  1958. for (i = 0; i < d[2]; ++i)
  1959. y[(k * d[2] + i) * d[1] + j] = x[(k * d[1] + j) * d[2] + i];
  1960. }
  1961. static void conv1d_add_2to1(int d[3], const float *y, float *x)
  1962. {
  1963. int i, j, k;
  1964. for (k = 0; k < d[0]; ++k)
  1965. for (j = 0; j < d[1]; ++j)
  1966. for (i = 0; i < d[2]; ++i)
  1967. x[(k * d[1] + j) * d[2] + i] += y[(k * d[2] + i) * d[1] + j];
  1968. }
  1969. int kad_op_conv1d(kad_node_t *p, int action) /* in the number-channel-width (NCW) shape */
  1970. {
  1971. #define conv1d_loop1(_x, _w, _y, _tmp, _row_func) do { /* for the NCW shape */ \
  1972. int n, c1, c0; \
  1973. for (n = 0; n < q->d[0]; ++n) /* mini-batch */ \
  1974. for (c1 = 0; c1 < w->d[0]; ++c1) /* output channel */ \
  1975. for (c0 = 0; c0 < w->d[1]; ++c0) { /* input channel */ \
  1976. float *_ww = &(_w)[(c1 * w->d[1] + c0) * w->d[2]]; \
  1977. float *_xx = &(_x)[(n * q->d[1] + c0) * q->d[2]]; \
  1978. float *_yy = &(_y)[(n * p->d[1] + c1) * p->d[2]]; \
  1979. if (x_padded) { \
  1980. memcpy(x_padded + aux->pad[0], _xx, q->d[2] * sizeof(float)); \
  1981. _xx = x_padded + aux->pad[0]; \
  1982. } \
  1983. _row_func(_xx, _ww, _yy, w->d[2], p->d[2], aux->stride, aux->pad[0], (_tmp)); \
  1984. } /* ~c0, c1, n */ \
  1985. } while (0)
  1986. #define conv1d_loop2(_x, _w, _y, _code) do { /* for the NWC shape */ \
  1987. int n, c1, j, j_skip = aux->stride * q->d[1], m = w->d[2] * w->d[1]; \
  1988. for (n = 0; n < q->d[0]; ++n) /* mini-batch */ \
  1989. for (c1 = 0; c1 < w->d[0]; ++c1) { /* output channel */ \
  1990. float *_ww = &(_w)[c1 * m]; \
  1991. float *_xx = &(_x)[n * q->d[1] * q->d[2]]; \
  1992. float *_yy = &(_y)[(n * p->d[1] + c1) * p->d[2]]; \
  1993. if (x_padded) { \
  1994. memcpy(x_padded + aux->pad[0] * q->d[1], _xx, q->d[2] * q->d[1] * sizeof(float)); \
  1995. _xx = x_padded; \
  1996. } \
  1997. for (j = 0; j < p->d[2]; ++j, _xx += j_skip, ++_yy) _code; \
  1998. } /* ~c1, n */ \
  1999. } while (0)
  2000. conv_conf_t *aux = (conv_conf_t*)p->ptr;
  2001. kad_node_t *q = p->child[0], *w = p->child[1];
  2002. float *t = 0, *q1 = 0, *w1 = 0, *x_padded = 0;
  2003. int algo_switch = 0;
  2004. if (action == KAD_FORWARD || action == KAD_BACKWARD) { /* allocate working space */
  2005. if (w->d[2] * w->d[1] < 32) {
  2006. t = (float*)malloc(p->d[2] * sizeof(float));
  2007. x_padded = aux->pad[0] + aux->pad[1] > 0? (float*)calloc(q->d[2] + aux->pad[0] + aux->pad[1], sizeof(float)) : 0;
  2008. } else {
  2009. q1 = (float*)malloc(kad_len(q) * sizeof(float));
  2010. w1 = (float*)malloc(kad_len(w) * sizeof(float));
  2011. x_padded = aux->pad[0] + aux->pad[1] > 0? (float*)calloc((q->d[2] + aux->pad[0] + aux->pad[1]) * q->d[1], sizeof(float)) : 0;
  2012. algo_switch = 1;
  2013. }
  2014. }
  2015. if (action == KAD_SYNC_DIM) {
  2016. if (q->n_d != 3 || w->n_d != 3) return -1;
  2017. if (q->d[1] != w->d[1]) return -1; /* unmatched input channels */
  2018. p->n_d = 3;
  2019. p->d[0] = q->d[0], p->d[1] = w->d[0], p->d[2] = conv_out_size(q->d[2], aux);
  2020. } else if (action == KAD_FORWARD) {
  2021. conv_rot180(w->d[0] * w->d[1], w->d[2], w->x);
  2022. memset(p->x, 0, kad_len(p) * sizeof(float));
  2023. if (!algo_switch) { /* this is the first algorithm */
  2024. conv1d_loop1(q->x, w->x, p->x, t, process_row_for);
  2025. } else { /* this is the second algorithm */
  2026. conv1d_move_1to2(q->d, q->x, q1);
  2027. conv1d_move_1to2(w->d, w->x, w1);
  2028. conv1d_loop2(q1, w1, p->x, (*_yy += kad_sdot(m, _ww, _xx)));
  2029. }
  2030. conv_rot180(w->d[0] * w->d[1], w->d[2], w->x);
  2031. } else if (action == KAD_BACKWARD) {
  2032. if (kad_is_back(p->child[0])) { /* backprop to the input array */
  2033. conv_rot180(w->d[0] * w->d[1], w->d[2], w->x);
  2034. if (!algo_switch) {
  2035. conv1d_loop1(q->g, w->x, p->g, t, process_row_back_x);
  2036. } else {
  2037. memset(q1, 0, kad_len(q) * sizeof(float));
  2038. conv1d_move_1to2(w->d, w->x, w1);
  2039. conv1d_loop2(q1, w1, p->g, kad_saxpy(m, *_yy, _ww, _xx));
  2040. conv1d_add_2to1(q->d, q1, q->g);
  2041. }
  2042. conv_rot180(w->d[0] * w->d[1], w->d[2], w->x);
  2043. }
  2044. if (kad_is_back(p->child[1])) { /* backprop to the weight matrix */
  2045. conv_rot180(w->d[0] * w->d[1], w->d[2], w->g);
  2046. if (!algo_switch) {
  2047. conv1d_loop1(q->x, w->g, p->g, t, process_row_back_w);
  2048. } else {
  2049. conv1d_move_1to2(q->d, q->x, q1);
  2050. memset(w1, 0, kad_len(w) * sizeof(float));
  2051. conv1d_loop2(q1, w1, p->g, kad_saxpy(m, *_yy, _xx, _ww));
  2052. conv1d_add_2to1(w->d, w1, w->g);
  2053. }
  2054. conv_rot180(w->d[0] * w->d[1], w->d[2], w->g);
  2055. }
  2056. }
  2057. free(t); free(q1); free(w1); free(x_padded);
  2058. return 0;
  2059. }
  2060. int kad_op_max1d(kad_node_t *p, int action)
  2061. {
  2062. conv_conf_t *aux = (conv_conf_t*)p->ptr;
  2063. kad_node_t *q = p->child[0];
  2064. if (action == KAD_SYNC_DIM) {
  2065. if (q->n_d != 3) return -1;
  2066. p->n_d = 3;
  2067. p->d[0] = q->d[0], p->d[1] = q->d[1], p->d[2] = conv_out_size(q->d[2], aux);
  2068. } else if (action == KAD_ALLOC) {
  2069. p->gtmp = realloc(p->gtmp, kad_len(p) * sizeof(int));
  2070. } else if (action == KAD_FORWARD) {
  2071. int rest = 1, len, t, i;
  2072. int *f = (int*)p->gtmp;
  2073. len = kad_len(p);
  2074. for (i = 0; i < len; ++i) p->x[i] = -FLT_MAX;
  2075. for (i = 0; i < p->n_d - 1; ++i) rest *= p->d[i];
  2076. for (t = 0; t < rest; ++t) {
  2077. int j, l, p_width = p->d[p->n_d - 1];
  2078. int u = t * p_width, v, v0 = t * q->d[p->n_d - 1], v_end = v0 + q->d[p->n_d - 1];
  2079. for (l = 0; l < aux->kernel_size; ++l)
  2080. for (j = 0, v = v0 + (l > aux->pad[0]? l - aux->pad[0] : 0); j < p_width && v < v_end; ++j, v += aux->stride)
  2081. if (p->x[u + j] < q->x[v])
  2082. p->x[u + j] = q->x[v], f[u + j] = v;
  2083. }
  2084. } else if (action == KAD_BACKWARD) {
  2085. int i, len, *f = (int*)p->gtmp;
  2086. len = kad_len(p);
  2087. for (i = 0; i < len; ++i) q->g[f[i]] += p->g[i];
  2088. }
  2089. return 0;
  2090. }
  2091. int kad_op_avg1d(kad_node_t *p, int action)
  2092. {
  2093. conv_conf_t *aux = (conv_conf_t*)p->ptr;
  2094. kad_node_t *q = p->child[0];
  2095. if (action == KAD_SYNC_DIM) {
  2096. if (q->n_d != 3) return -1;
  2097. p->n_d = 3;
  2098. p->d[0] = q->d[0], p->d[1] = q->d[1], p->d[2] = conv_out_size(q->d[2], aux);
  2099. } else if (action == KAD_ALLOC) {
  2100. p->gtmp = realloc(p->gtmp, kad_len(p) * sizeof(int));
  2101. } else if (action == KAD_FORWARD) {
  2102. int rest = 1, len, t, i;
  2103. int *f = (int*)p->gtmp;
  2104. len = kad_len(p);
  2105. for (i = 0; i < len; ++i) p->x[i] = 0.0f, f[i] = 0;
  2106. for (i = 0; i < p->n_d - 1; ++i) rest *= p->d[i];
  2107. for (t = 0; t < rest; ++t) {
  2108. int j, l, p_width = p->d[p->n_d - 1];
  2109. int u = t * p_width, v, v0 = t * q->d[p->n_d - 1], v_end = v0 + q->d[p->n_d - 1];
  2110. for (l = 0; l < aux->kernel_size; ++l)
  2111. for (j = 0, v = v0 + (l > aux->pad[0]? l - aux->pad[0] : 0); j < p_width && v < v_end; ++j, v += aux->stride)
  2112. p->x[u + j] += q->x[v], ++f[u + j];
  2113. }
  2114. for (i = 0; i < len; ++i) p->x[i] /= f[i];
  2115. } else if (action == KAD_BACKWARD) {
  2116. int rest = 1, t, i;
  2117. int *f = (int*)p->gtmp;
  2118. for (i = 0; i < p->n_d - 1; ++i) rest *= p->d[i];
  2119. for (t = 0; t < rest; ++t) {
  2120. int j, l, p_width = p->d[p->n_d - 1];
  2121. int u = t * p_width, v, v0 = t * q->d[p->n_d - 1], v_end = v0 + q->d[p->n_d - 1];
  2122. for (l = 0; l < aux->kernel_size; ++l)
  2123. for (j = 0, v = v0 + (l > aux->pad[0]? l - aux->pad[0] : 0); j < p_width && v < v_end; ++j, v += aux->stride)
  2124. q->g[v] += p->g[u + j] / f[u + j];
  2125. }
  2126. }
  2127. return 0;
  2128. }
  2129. /********** List of operators **********/
  2130. kad_op_f kad_op_list[KAD_MAX_OP] = {
  2131. 0,
  2132. kad_op_add, /* 1: element-wise addition */
  2133. kad_op_mul, /* 2: element-wise multiplication */
  2134. kad_op_cmul, /* 3: column multiplication */
  2135. kad_op_ce_bin_neg, /* 4: binary cross-entropy for (-1,1) */
  2136. kad_op_square, /* 5: square */
  2137. kad_op_sigm, /* 6: sigmoid */
  2138. kad_op_tanh, /* 7: tanh */
  2139. kad_op_relu, /* 8: ReLU */
  2140. kad_op_matmul, /* 9: matrix multiplication */
  2141. kad_op_avg, /* 10: general average pooling (not for ConvNet) */
  2142. kad_op_1minus, /* 11: 1-x */
  2143. kad_op_select, /* 12: choose between one of the children */
  2144. kad_op_ce_multi, /* 13: multi-class cross-entropy */
  2145. kad_op_softmax, /* 14: softmax */
  2146. kad_op_dropout, /* 15: dropout */
  2147. kad_op_conv2d, /* 16: 2D convolution */
  2148. kad_op_max2d, /* 17: 2D max pooling (for 2D ConvNet) */
  2149. kad_op_conv1d, /* 18: 1D convolution */
  2150. kad_op_max1d, /* 19: 1D max pooling (for 1D ConvNet) */
  2151. kad_op_slice, /* 20: slice data at a dimension */
  2152. kad_op_max, /* 21: general max pooling */
  2153. kad_op_ce_bin, /* 22: binary cross-entropy for (0,1) */
  2154. kad_op_sub, /* 23: element-wise subtraction */
  2155. kad_op_sample_normal, /* 24: sample from a normal distribution */
  2156. kad_op_reduce_sum, /* 25 */
  2157. kad_op_reduce_mean, /* 26 */
  2158. kad_op_log, /* 27: log() */
  2159. kad_op_avg1d, /* 28: 1D average pooling (for 1D ConvNet) */
  2160. kad_op_mse, /* 29: mean square error */
  2161. kad_op_reshape, /* 30 */
  2162. kad_op_concat, /* 31 */
  2163. kad_op_stdnorm, /* 32: layer normalization */
  2164. kad_op_exp, /* 33: exp() */
  2165. kad_op_sin, /* 34: sin() */
  2166. kad_op_stack, /* 35: tf.stack, but on the first axis only */
  2167. kad_op_reverse /* 36: tf.reverse, but on one axis only */
  2168. };
  2169. char *kad_op_name[KAD_MAX_OP] = {
  2170. 0, "add", "mul", "cmul", "ce_bin_neg", "square", "sigm", "tanh", "relu", "matmul", "avg", "1minus", "select", "ce_multi", "softmax",
  2171. "dropout", "conv2d", "max2d", "conv1d", "max1d", "slice", "max", "ce_bin", "sub", "sample_normal", "reduce_sum", "reduce_mean", "log",
  2172. "avg1d", "mse", "reshape", "concat", "stdnorm", "exp", "sin", "stack", "reverse"
  2173. };
  2174. /**************************
  2175. *** Debugging routines ***
  2176. **************************/
  2177. void kad_trap_fe(void)
  2178. {
  2179. #ifdef __SSE__
  2180. _MM_SET_EXCEPTION_MASK(_MM_GET_EXCEPTION_MASK() & ~(_MM_MASK_INVALID | _MM_MASK_DIV_ZERO));
  2181. #endif
  2182. }
  2183. void kad_print_graph(FILE *fp, int n, kad_node_t **v)
  2184. {
  2185. int i, j;
  2186. for (i = 0; i < n; ++i) v[i]->tmp = i;
  2187. for (i = 0; i < n; ++i) {
  2188. kad_node_t *p = v[i];
  2189. fprintf(fp, "%d\t%x:%x\t%d\t", i, p->flag, p->ext_flag, p->ext_label);
  2190. if (p->pre) fprintf(fp, "%d\t", p->pre->tmp);
  2191. else fprintf(fp, ".\t");
  2192. fputs("[", fp);
  2193. for (j = 0; j < p->n_d; ++j) {
  2194. if (j) fputc(',', fp);
  2195. fprintf(fp, "%d", p->d[j]);
  2196. }
  2197. fprintf(fp, "]\t");
  2198. if (p->n_child) {
  2199. fprintf(fp, "%s(", kad_op_name[p->op]);
  2200. for (j = 0; j < p->n_child; ++j) {
  2201. if (j) fputc(',', fp);
  2202. fprintf(fp, "$%d", p->child[j]->tmp);
  2203. }
  2204. fprintf(fp, ")");
  2205. } else fprintf(fp, "%s", kad_is_feed(p)? "feed" : kad_is_var(p)? "var" : kad_is_const(p)? "const" : "N/A");
  2206. fputc('\n', fp);
  2207. }
  2208. for (i = 0; i < n; ++i) v[i]->tmp = 0;
  2209. }
  2210. static void kad_add_delta(int n, kad_node_t **a, float c, float *delta)
  2211. {
  2212. int i, k;
  2213. for (i = k = 0; i < n; ++i)
  2214. if (kad_is_var(a[i])) {
  2215. kad_saxpy(kad_len(a[i]), c, &delta[k], a[i]->x);
  2216. k += kad_len(a[i]);
  2217. }
  2218. }
  2219. void kad_check_grad(int n, kad_node_t **a, int from)
  2220. {
  2221. const float eps = 1e-5f, rel = 1e-7f / eps;
  2222. int i, k, n_var;
  2223. float *g0, *delta, f0, f_minus, f_plus, s0, s1, rel_err, p_m_err;
  2224. n_var = kad_size_var(n, a);
  2225. g0 = (float*)calloc(n_var, sizeof(float));
  2226. f0 = *kad_eval_at(n, a, from);
  2227. kad_grad(n, a, from);
  2228. for (i = k = 0; i < n; ++i)
  2229. if (kad_is_var(a[i])) {
  2230. memcpy(&g0[k], a[i]->g, kad_len(a[i]) * sizeof(float));
  2231. k += kad_len(a[i]);
  2232. }
  2233. delta = (float*)calloc(n_var, sizeof(float));
  2234. for (k = 0; k < n_var; ++k) delta[k] = (float)kad_drand(0) * eps;
  2235. kad_add_delta(n, a, 1.0f, delta);
  2236. f_plus = *kad_eval_at(n, a, from);
  2237. kad_add_delta(n, a, -2.0f, delta);
  2238. f_minus = *kad_eval_at(n, a, from);
  2239. kad_add_delta(n, a, 1.0f, delta);
  2240. s0 = kad_sdot(n_var, g0, delta);
  2241. s1 = .5f * (f_plus - f_minus);
  2242. fprintf(stderr, "Gradient check -- %g <=> %g @ %g -- ", s0/eps, s1/eps, f0);
  2243. if (fabs(s1) >= rel * eps) {
  2244. rel_err = fabsf(fabsf(s0) - fabsf(s1)) / (fabsf(s0) + fabsf(s1));
  2245. p_m_err = fabsf(f_plus + f_minus - 2.0f * f0) / fabsf(f_plus - f_minus);
  2246. fprintf(stderr, "rel_err:%g p_m_err:%g -- ", rel_err, p_m_err);
  2247. if (rel_err >= rel && rel_err > p_m_err) fprintf(stderr, "failed\n");
  2248. else fprintf(stderr, "passed\n");
  2249. } else fprintf(stderr, "skipped\n");
  2250. free(delta); free(g0);
  2251. }