test-vec1.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576
  1. #include <stdint.h>
  2. #include <stdio.h>
  3. #include <assert.h>
  4. #include <stdlib.h>
  5. #include <time.h>
  6. #include <math.h>
  7. #include <sys/time.h>
  8. #include <immintrin.h>
  9. const int N = 1 << 14;
  10. const int M = 768;
  11. //
  12. // naive implementation
  13. //
  14. void mul_mat_vec_f32_0(
  15. const float * restrict src0,
  16. const float * restrict src1,
  17. float * dst,
  18. int nrows,
  19. int ncols) {
  20. for (int i = 0; i < nrows; i++) {
  21. float sum = 0.0f;
  22. for (int j = 0; j < ncols; j++) {
  23. sum += src0[i*ncols + j]*src1[j];
  24. }
  25. dst[i] = sum;
  26. }
  27. }
  28. //
  29. // SIMD with 8 32-bit floats
  30. //
  31. float reduce_vector8_0(__m256 v) {
  32. __m128 v1 = _mm256_extractf128_ps(v, 0);
  33. __m128 v2 = _mm256_extractf128_ps(v, 1);
  34. __m128 v3 = _mm_add_ps(v1, v2);
  35. __m128 v4 = _mm_shuffle_ps(v3, v3, 0x4e);
  36. __m128 v5 = _mm_add_ps(v3, v4);
  37. __m128 v6 = _mm_shuffle_ps(v5, v5, 0x11);
  38. __m128 v7 = _mm_add_ps(v5, v6);
  39. return _mm_cvtss_f32(v7);
  40. }
  41. // vectorized implementation using AVX
  42. void mul_mat_vec_f32_1(
  43. const float * restrict src0,
  44. const float * restrict src1,
  45. float * dst,
  46. int nrows,
  47. int ncols) {
  48. const int ncols8 = ncols & ~7;
  49. for (int i = 0; i < nrows; i++) {
  50. __m256 sum = _mm256_setzero_ps();
  51. for (int j = 0; j < ncols8; j += 8) {
  52. __m256 a = _mm256_loadu_ps(src0 + i*ncols + j);
  53. __m256 b = _mm256_loadu_ps(src1 + j);
  54. __m256 c = _mm256_mul_ps(a, b);
  55. sum = _mm256_add_ps(sum, c);
  56. }
  57. dst[i] = reduce_vector8_0(sum);
  58. for (int j = ncols8; j < ncols; j++) {
  59. dst[i] += src0[i*ncols + j]*src1[j];
  60. }
  61. }
  62. }
  63. void mul_mat_vec_f32_2(
  64. const float * restrict src0,
  65. const float * restrict src1,
  66. float * dst,
  67. int nrows,
  68. int ncols) {
  69. const int ncols32 = ncols & ~31;
  70. for (int i = 0; i < nrows; i++) {
  71. __m256 sum0 = _mm256_setzero_ps();
  72. __m256 sum1 = _mm256_setzero_ps();
  73. __m256 sum2 = _mm256_setzero_ps();
  74. __m256 sum3 = _mm256_setzero_ps();
  75. const float * restrict src0_row = src0 + i*ncols;
  76. for (int j = 0; j < ncols32; j += 32) {
  77. __m256 a0 = _mm256_loadu_ps(src0_row + j + 0);
  78. __m256 a1 = _mm256_loadu_ps(src0_row + j + 8);
  79. __m256 a2 = _mm256_loadu_ps(src0_row + j + 16);
  80. __m256 a3 = _mm256_loadu_ps(src0_row + j + 24);
  81. __m256 b0 = _mm256_loadu_ps(src1 + j + 0);
  82. __m256 b1 = _mm256_loadu_ps(src1 + j + 8);
  83. __m256 b2 = _mm256_loadu_ps(src1 + j + 16);
  84. __m256 b3 = _mm256_loadu_ps(src1 + j + 24);
  85. #if defined(__FMA__)
  86. sum0 = _mm256_fmadd_ps(a0, b0, sum0);
  87. sum1 = _mm256_fmadd_ps(a1, b1, sum1);
  88. sum2 = _mm256_fmadd_ps(a2, b2, sum2);
  89. sum3 = _mm256_fmadd_ps(a3, b3, sum3);
  90. #else
  91. sum0 = _mm256_add_ps(_mm256_mul_ps(a0, b0), sum0);
  92. sum1 = _mm256_add_ps(_mm256_mul_ps(a1, b1), sum1);
  93. sum2 = _mm256_add_ps(_mm256_mul_ps(a2, b2), sum2);
  94. sum3 = _mm256_add_ps(_mm256_mul_ps(a3, b3), sum3);
  95. #endif
  96. }
  97. dst[i] = reduce_vector8_0(_mm256_add_ps(_mm256_add_ps(sum0, sum1), _mm256_add_ps(sum2, sum3)));
  98. for (int j = ncols32; j < ncols; j++) {
  99. dst[i] += src0[i*ncols + j]*src1[j];
  100. }
  101. }
  102. }
  103. //
  104. // SIMD with 8 16-bit floats
  105. //
  106. static inline float fp32_from_bits(uint32_t w) {
  107. #if defined(__OPENCL_VERSION__)
  108. return as_float(w);
  109. #elif defined(__CUDA_ARCH__)
  110. return __uint_as_float((unsigned int) w);
  111. #elif defined(__INTEL_COMPILER)
  112. return _castu32_f32(w);
  113. #elif defined(_MSC_VER) && (defined(_M_ARM) || defined(_M_ARM64))
  114. return _CopyFloatFromInt32((__int32) w);
  115. #else
  116. union {
  117. uint32_t as_bits;
  118. float as_value;
  119. } fp32 = { w };
  120. return fp32.as_value;
  121. #endif
  122. }
  123. static inline uint32_t fp32_to_bits(float f) {
  124. #if defined(__OPENCL_VERSION__)
  125. return as_uint(f);
  126. #elif defined(__CUDA_ARCH__)
  127. return (uint32_t) __float_as_uint(f);
  128. #elif defined(__INTEL_COMPILER)
  129. return _castf32_u32(f);
  130. #elif defined(_MSC_VER) && (defined(_M_ARM) || defined(_M_ARM64))
  131. return (uint32_t) _CopyInt32FromFloat(f);
  132. #else
  133. union {
  134. float as_value;
  135. uint32_t as_bits;
  136. } fp32 = { f };
  137. return fp32.as_bits;
  138. #endif
  139. }
  140. /*
  141. * Convert a 16-bit floating-point number in IEEE half-precision format, in bit representation, to
  142. * a 32-bit floating-point number in IEEE single-precision format.
  143. *
  144. * @note The implementation relies on IEEE-like (no assumption about rounding mode and no operations on denormals)
  145. * floating-point operations and bitcasts between integer and floating-point variables.
  146. */
  147. static inline float fp16_ieee_to_fp32_value(uint16_t h) {
  148. /*
  149. * Extend the half-precision floating-point number to 32 bits and shift to the upper part of the 32-bit word:
  150. * +---+-----+------------+-------------------+
  151. * | S |EEEEE|MM MMMM MMMM|0000 0000 0000 0000|
  152. * +---+-----+------------+-------------------+
  153. * Bits 31 26-30 16-25 0-15
  154. *
  155. * S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0 - zero bits.
  156. */
  157. const uint32_t w = (uint32_t) h << 16;
  158. /*
  159. * Extract the sign of the input number into the high bit of the 32-bit word:
  160. *
  161. * +---+----------------------------------+
  162. * | S |0000000 00000000 00000000 00000000|
  163. * +---+----------------------------------+
  164. * Bits 31 0-31
  165. */
  166. const uint32_t sign = w & UINT32_C(0x80000000);
  167. /*
  168. * Extract mantissa and biased exponent of the input number into the high bits of the 32-bit word:
  169. *
  170. * +-----+------------+---------------------+
  171. * |EEEEE|MM MMMM MMMM|0 0000 0000 0000 0000|
  172. * +-----+------------+---------------------+
  173. * Bits 27-31 17-26 0-16
  174. */
  175. const uint32_t two_w = w + w;
  176. /*
  177. * Shift mantissa and exponent into bits 23-28 and bits 13-22 so they become mantissa and exponent
  178. * of a single-precision floating-point number:
  179. *
  180. * S|Exponent | Mantissa
  181. * +-+---+-----+------------+----------------+
  182. * |0|000|EEEEE|MM MMMM MMMM|0 0000 0000 0000|
  183. * +-+---+-----+------------+----------------+
  184. * Bits | 23-31 | 0-22
  185. *
  186. * Next, there are some adjustments to the exponent:
  187. * - The exponent needs to be corrected by the difference in exponent bias between single-precision and half-precision
  188. * formats (0x7F - 0xF = 0x70)
  189. * - Inf and NaN values in the inputs should become Inf and NaN values after conversion to the single-precision number.
  190. * Therefore, if the biased exponent of the half-precision input was 0x1F (max possible value), the biased exponent
  191. * of the single-precision output must be 0xFF (max possible value). We do this correction in two steps:
  192. * - First, we adjust the exponent by (0xFF - 0x1F) = 0xE0 (see exp_offset below) rather than by 0x70 suggested
  193. * by the difference in the exponent bias (see above).
  194. * - Then we multiply the single-precision result of exponent adjustment by 2**(-112) to reverse the effect of
  195. * exponent adjustment by 0xE0 less the necessary exponent adjustment by 0x70 due to difference in exponent bias.
  196. * The floating-point multiplication hardware would ensure than Inf and NaN would retain their value on at least
  197. * partially IEEE754-compliant implementations.
  198. *
  199. * Note that the above operations do not handle denormal inputs (where biased exponent == 0). However, they also do not
  200. * operate on denormal inputs, and do not produce denormal results.
  201. */
  202. const uint32_t exp_offset = UINT32_C(0xE0) << 23;
  203. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
  204. const float exp_scale = 0x1.0p-112f;
  205. #else
  206. const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
  207. #endif
  208. const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
  209. /*
  210. * Convert denormalized half-precision inputs into single-precision results (always normalized).
  211. * Zero inputs are also handled here.
  212. *
  213. * In a denormalized number the biased exponent is zero, and mantissa has on-zero bits.
  214. * First, we shift mantissa into bits 0-9 of the 32-bit word.
  215. *
  216. * zeros | mantissa
  217. * +---------------------------+------------+
  218. * |0000 0000 0000 0000 0000 00|MM MMMM MMMM|
  219. * +---------------------------+------------+
  220. * Bits 10-31 0-9
  221. *
  222. * Now, remember that denormalized half-precision numbers are represented as:
  223. * FP16 = mantissa * 2**(-24).
  224. * The trick is to construct a normalized single-precision number with the same mantissa and thehalf-precision input
  225. * and with an exponent which would scale the corresponding mantissa bits to 2**(-24).
  226. * A normalized single-precision floating-point number is represented as:
  227. * FP32 = (1 + mantissa * 2**(-23)) * 2**(exponent - 127)
  228. * Therefore, when the biased exponent is 126, a unit change in the mantissa of the input denormalized half-precision
  229. * number causes a change of the constructud single-precision number by 2**(-24), i.e. the same ammount.
  230. *
  231. * The last step is to adjust the bias of the constructed single-precision number. When the input half-precision number
  232. * is zero, the constructed single-precision number has the value of
  233. * FP32 = 1 * 2**(126 - 127) = 2**(-1) = 0.5
  234. * Therefore, we need to subtract 0.5 from the constructed single-precision number to get the numerical equivalent of
  235. * the input half-precision number.
  236. */
  237. const uint32_t magic_mask = UINT32_C(126) << 23;
  238. const float magic_bias = 0.5f;
  239. const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
  240. /*
  241. * - Choose either results of conversion of input as a normalized number, or as a denormalized number, depending on the
  242. * input exponent. The variable two_w contains input exponent in bits 27-31, therefore if its smaller than 2**27, the
  243. * input is either a denormal number, or zero.
  244. * - Combine the result of conversion of exponent and mantissa with the sign of the input number.
  245. */
  246. const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
  247. const uint32_t result = sign |
  248. (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
  249. return fp32_from_bits(result);
  250. }
  251. /*
  252. * Convert a 32-bit floating-point number in IEEE single-precision format to a 16-bit floating-point number in
  253. * IEEE half-precision format, in bit representation.
  254. *
  255. * @note The implementation relies on IEEE-like (no assumption about rounding mode and no operations on denormals)
  256. * floating-point operations and bitcasts between integer and floating-point variables.
  257. */
  258. static inline uint16_t fp16_ieee_from_fp32_value(float f) {
  259. #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
  260. const float scale_to_inf = 0x1.0p+112f;
  261. const float scale_to_zero = 0x1.0p-110f;
  262. #else
  263. const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
  264. const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
  265. #endif
  266. float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
  267. const uint32_t w = fp32_to_bits(f);
  268. const uint32_t shl1_w = w + w;
  269. const uint32_t sign = w & UINT32_C(0x80000000);
  270. uint32_t bias = shl1_w & UINT32_C(0xFF000000);
  271. if (bias < UINT32_C(0x71000000)) {
  272. bias = UINT32_C(0x71000000);
  273. }
  274. base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
  275. const uint32_t bits = fp32_to_bits(base);
  276. const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
  277. const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
  278. const uint32_t nonsign = exp_bits + mantissa_bits;
  279. return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
  280. }
  281. void mul_mat_vec_f16_0(
  282. const uint16_t * src0,
  283. const uint16_t * src1,
  284. float * dst,
  285. int nrows,
  286. int ncols) {
  287. const int ncols8 = ncols & ~7;
  288. for (int i = 0; i < nrows; i++) {
  289. __m256 sum = _mm256_setzero_ps();
  290. const uint16_t * src0_row = src0 + i * ncols;
  291. for (int j = 0; j < ncols8; j += 8) {
  292. __m256 a = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(src0_row + j)));
  293. __m256 b = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(src1 + j)));
  294. #if defined(__FMA__)
  295. sum = _mm256_fmadd_ps(a, b, sum);
  296. #else
  297. sum = _mm256_add_ps(_mm256_mul_ps(a, b), sum);
  298. #endif
  299. }
  300. dst[i] = reduce_vector8_0(sum);
  301. for (int j = ncols8; j < ncols; j++) {
  302. dst[i] += fp16_ieee_to_fp32_value(src0_row[j]) * fp16_ieee_to_fp32_value(src1[j]);
  303. }
  304. }
  305. }
  306. void mul_mat_vec_f16_1(
  307. const uint16_t * src0,
  308. const uint16_t * src1,
  309. float * dst,
  310. int nrows,
  311. int ncols) {
  312. const int ncols16 = ncols & ~15;
  313. for (int i = 0; i < nrows; i++) {
  314. __m256 sum0 = _mm256_setzero_ps();
  315. __m256 sum1 = _mm256_setzero_ps();
  316. const uint16_t * src0_row = src0 + i * ncols;
  317. for (int j = 0; j < ncols16; j += 16) {
  318. __m256 a0 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(src0_row + j + 0)));
  319. __m256 a1 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(src0_row + j + 8)));
  320. __m256 b0 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(src1 + j)));
  321. __m256 b1 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(src1 + j + 8)));
  322. #if defined(__FMA__)
  323. sum0 = _mm256_fmadd_ps(a0, b0, sum0);
  324. sum1 = _mm256_fmadd_ps(a1, b1, sum1);
  325. #else
  326. sum0 = _mm256_add_ps(_mm256_mul_ps(a0, b0), sum0);
  327. sum1 = _mm256_add_ps(_mm256_mul_ps(a1, b1), sum1);
  328. #endif
  329. }
  330. dst[i] = reduce_vector8_0(sum0) + reduce_vector8_0(sum1);
  331. for (int j = ncols16; j < ncols; j++) {
  332. dst[i] += fp16_ieee_to_fp32_value(src0_row[j]) * fp16_ieee_to_fp32_value(src1[j]);
  333. }
  334. }
  335. }
  336. void mul_mat_vec_f16_2(
  337. const uint16_t * src0,
  338. const uint16_t * src1,
  339. float * dst,
  340. int nrows,
  341. int ncols) {
  342. const int ncols32 = ncols & ~31;
  343. for (int i = 0; i < nrows; i++) {
  344. __m256 sum0 = _mm256_setzero_ps();
  345. __m256 sum1 = _mm256_setzero_ps();
  346. __m256 sum2 = _mm256_setzero_ps();
  347. __m256 sum3 = _mm256_setzero_ps();
  348. const uint16_t * src0_row = src0 + i * ncols;
  349. for (int j = 0; j < ncols32; j += 32) {
  350. __m256 a0 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(src0_row + j + 0)));
  351. __m256 a1 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(src0_row + j + 8)));
  352. __m256 a2 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(src0_row + j + 16)));
  353. __m256 a3 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(src0_row + j + 24)));
  354. __m256 b0 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(src1 + j)));
  355. __m256 b1 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(src1 + j + 8)));
  356. __m256 b2 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(src1 + j + 16)));
  357. __m256 b3 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(src1 + j + 24)));
  358. #if defined(__FMA__)
  359. sum0 = _mm256_fmadd_ps(a0, b0, sum0);
  360. sum1 = _mm256_fmadd_ps(a1, b1, sum1);
  361. sum2 = _mm256_fmadd_ps(a2, b2, sum2);
  362. sum3 = _mm256_fmadd_ps(a3, b3, sum3);
  363. #else
  364. sum0 = _mm256_add_ps(_mm256_mul_ps(a0, b0), sum0);
  365. sum1 = _mm256_add_ps(_mm256_mul_ps(a1, b1), sum1);
  366. sum2 = _mm256_add_ps(_mm256_mul_ps(a2, b2), sum2);
  367. sum3 = _mm256_add_ps(_mm256_mul_ps(a3, b3), sum3);
  368. #endif
  369. }
  370. dst[i] = reduce_vector8_0(sum0) + reduce_vector8_0(sum1) + reduce_vector8_0(sum2) + reduce_vector8_0(sum3);
  371. for (int j = ncols32; j < ncols; j++) {
  372. dst[i] += fp16_ieee_to_fp32_value(src0_row[j]) * fp16_ieee_to_fp32_value(src1[j]);
  373. }
  374. }
  375. }
  376. void mul_mat_vec_f16_3(
  377. const uint16_t * src0,
  378. const float * src1,
  379. float * dst,
  380. int nrows,
  381. int ncols) {
  382. const int ncols32 = ncols & ~31;
  383. for (int i = 0; i < nrows; i++) {
  384. __m256 sum0 = _mm256_setzero_ps();
  385. __m256 sum1 = _mm256_setzero_ps();
  386. __m256 sum2 = _mm256_setzero_ps();
  387. __m256 sum3 = _mm256_setzero_ps();
  388. const uint16_t * src0_row = src0 + i * ncols;
  389. for (int j = 0; j < ncols32; j += 32) {
  390. __m256 a0 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(src0_row + j + 0)));
  391. __m256 a1 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(src0_row + j + 8)));
  392. __m256 a2 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(src0_row + j + 16)));
  393. __m256 a3 = _mm256_cvtph_ps(_mm_loadu_si128((__m128i*)(src0_row + j + 24)));
  394. __m256 b0 = _mm256_loadu_ps(src1 + j);
  395. __m256 b1 = _mm256_loadu_ps(src1 + j + 8);
  396. __m256 b2 = _mm256_loadu_ps(src1 + j + 16);
  397. __m256 b3 = _mm256_loadu_ps(src1 + j + 24);
  398. #if defined(__FMA__)
  399. sum0 = _mm256_fmadd_ps(a0, b0, sum0);
  400. sum1 = _mm256_fmadd_ps(a1, b1, sum1);
  401. sum2 = _mm256_fmadd_ps(a2, b2, sum2);
  402. sum3 = _mm256_fmadd_ps(a3, b3, sum3);
  403. #else
  404. sum0 = _mm256_add_ps(_mm256_mul_ps(a0, b0), sum0);
  405. sum1 = _mm256_add_ps(_mm256_mul_ps(a1, b1), sum1);
  406. sum2 = _mm256_add_ps(_mm256_mul_ps(a2, b2), sum2);
  407. sum3 = _mm256_add_ps(_mm256_mul_ps(a3, b3), sum3);
  408. #endif
  409. }
  410. dst[i] = reduce_vector8_0(sum0) + reduce_vector8_0(sum1) + reduce_vector8_0(sum2) + reduce_vector8_0(sum3);
  411. for (int j = ncols32; j < ncols; j++) {
  412. dst[i] += fp16_ieee_to_fp32_value(src0_row[j]) * fp16_ieee_to_fp32_value(src1[j]);
  413. }
  414. }
  415. }
  416. uint64_t get_time_us(void) {
  417. struct timeval tv;
  418. gettimeofday(&tv, NULL);
  419. return tv.tv_sec * 1000000 + tv.tv_usec;
  420. }
  421. int main(int argc, const char ** argv) {
  422. float * src0 = malloc(sizeof(float)*N*M);
  423. float * src1 = malloc(sizeof(float)*M);
  424. float * dst = malloc(sizeof(float)*N);
  425. //float * src0 = (float *)(aligned_alloc(64, sizeof(float)*N*M));
  426. //float * src1 = (float *)(aligned_alloc(64, sizeof(float)*M));
  427. //float * dst = (float *)(aligned_alloc(64, sizeof(float)*N));
  428. for (int i = 0; i < N*M; i++) {
  429. src0[i] = rand() / (float)RAND_MAX;
  430. }
  431. for (int i = 0; i < M; i++) {
  432. src1[i] = rand() / (float)RAND_MAX;
  433. }
  434. // convert src0 and src1 to __fp16
  435. uint16_t * src0_fp16 = (uint16_t *)(malloc(sizeof(uint16_t)*N*M));
  436. uint16_t * src1_fp16 = (uint16_t *)(malloc(sizeof(uint16_t)*M));
  437. //uint16_t * src0_fp16 = (uint16_t *)(aligned_alloc(64, sizeof(uint16_t)*N*M));
  438. //uint16_t * src1_fp16 = (uint16_t *)(aligned_alloc(64, sizeof(uint16_t)*M));
  439. {
  440. const uint64_t t_start = get_time_us();
  441. for (int i = 0; i < N*M; i++) {
  442. src0_fp16[i] = fp16_ieee_from_fp32_value(src0[i]);
  443. //printf("%f %f\n", src0[i], fp16_ieee_to_fp32_value(src0_fp16[i]));
  444. //assert(!isnan(fp16_ieee_to_fp32_value(src0_fp16[i])));
  445. }
  446. for (int i = 0; i < M; i++) {
  447. src1_fp16[i] = fp16_ieee_from_fp32_value(src1[i]);
  448. }
  449. const uint64_t t_end = get_time_us();
  450. printf("convert time: %f ms\n", (t_end - t_start) / 1000.0);
  451. }
  452. for (int i = 0; i < 16; ++i) {
  453. printf("%f %f\n", src0[i], fp16_ieee_to_fp32_value(src0_fp16[i]));
  454. }
  455. int method = 0;
  456. if (argc > 1) {
  457. method = atoi(argv[1]);
  458. }
  459. const int nIter = 1000;
  460. const clock_t start = clock();
  461. const uint64_t start_us = get_time_us();
  462. double iM = 1.0/M;
  463. double sum = 0.0f;
  464. for (int i = 0; i < nIter; i++) {
  465. if (method == 0) {
  466. mul_mat_vec_f32_0(src0, src1, dst, N, M);
  467. }
  468. if (method == 1) {
  469. mul_mat_vec_f32_1(src0, src1, dst, N, M);
  470. }
  471. if (method == 2) {
  472. mul_mat_vec_f32_2(src0, src1, dst, N, M);
  473. }
  474. if (method == 3) {
  475. mul_mat_vec_f16_0(src0_fp16, src1_fp16, dst, N, M);
  476. }
  477. if (method == 4) {
  478. mul_mat_vec_f16_1(src0_fp16, src1_fp16, dst, N, M);
  479. }
  480. if (method == 5) {
  481. mul_mat_vec_f16_2(src0_fp16, src1_fp16, dst, N, M);
  482. }
  483. if (method == 6) {
  484. mul_mat_vec_f16_3(src0_fp16, src1, dst, N, M);
  485. }
  486. }
  487. for (int i = 0; i < N; i++) {
  488. sum += dst[i]*iM;
  489. }
  490. {
  491. const clock_t end = clock();
  492. const uint64_t end_us = get_time_us();
  493. printf("%s: elapsed ticks: %ld\n", __func__, end - start);
  494. printf("%s: elapsed us: %ld\n", __func__, end_us - start_us);
  495. }
  496. printf("%f\n", sum);
  497. free(src0);
  498. free(src1);
  499. free(dst);
  500. free(src0_fp16);
  501. free(src1_fp16);
  502. return 0;
  503. }