test2.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. #define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnigns on Windows
  2. #include "ggml/ggml.h"
  3. #include <math.h>
  4. #include <stdio.h>
  5. #include <stdlib.h>
  6. #if defined(_MSC_VER)
  7. #pragma warning(disable: 4244 4267) // possible loss of data
  8. #endif
  9. bool is_close(float a, float b, float epsilon) {
  10. return fabs(a - b) < epsilon;
  11. }
  12. int main(int argc, const char ** argv) {
  13. struct ggml_init_params params = {
  14. .mem_size = 128*1024*1024,
  15. .mem_buffer = NULL,
  16. .no_alloc = false,
  17. };
  18. //struct ggml_opt_params opt_params = ggml_opt_default_params(GGML_OPT_ADAM);
  19. //opt_params.adam.alpha = 0.01f;
  20. struct ggml_opt_params opt_params = ggml_opt_default_params(GGML_OPT_LBFGS);
  21. // original threads: 8
  22. int nthreads = 8;
  23. const char *env = getenv("GGML_NTHREADS");
  24. if (env != NULL) {
  25. nthreads = atoi(env);
  26. }
  27. if (argc > 1) {
  28. nthreads = atoi(argv[1]);
  29. }
  30. opt_params.n_threads = nthreads;
  31. printf("test2: n_threads:%d\n", opt_params.n_threads);
  32. const float xi[] = { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f , 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, };
  33. float yi[] = { 15.0f, 25.0f, 35.0f, 45.0f, 55.0f, 65.0f, 75.0f, 85.0f, 95.0f, 105.0f, };
  34. const int n = sizeof(xi)/sizeof(xi[0]);
  35. struct ggml_context * ctx0 = ggml_init(params);
  36. struct ggml_tensor * x = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, n);
  37. struct ggml_tensor * y = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, n);
  38. for (int i = 0; i < n; i++) {
  39. ((float *) x->data)[i] = xi[i];
  40. ((float *) y->data)[i] = yi[i];
  41. }
  42. {
  43. struct ggml_tensor * t0 = ggml_new_f32(ctx0, 0.0f);
  44. struct ggml_tensor * t1 = ggml_new_f32(ctx0, 0.0f);
  45. // initialize auto-diff parameters:
  46. ggml_set_param(ctx0, t0);
  47. ggml_set_param(ctx0, t1);
  48. // f = sum_i[(t0 + t1*x_i - y_i)^2]/(2n)
  49. struct ggml_tensor * f =
  50. ggml_div(ctx0,
  51. ggml_sum(ctx0,
  52. ggml_sqr(ctx0,
  53. ggml_sub(ctx0,
  54. ggml_add(ctx0,
  55. ggml_mul(ctx0, x, ggml_repeat(ctx0, t1, x)),
  56. ggml_repeat(ctx0, t0, x)),
  57. y)
  58. )
  59. ),
  60. ggml_new_f32(ctx0, 2.0f*n));
  61. enum ggml_opt_result res = ggml_opt(NULL, opt_params, f);
  62. printf("t0 = %f\n", ggml_get_f32_1d(t0, 0));
  63. printf("t1 = %f\n", ggml_get_f32_1d(t1, 0));
  64. GGML_ASSERT(res == GGML_OPT_OK);
  65. GGML_ASSERT(is_close(ggml_get_f32_1d(t0, 0), 5.0f, 1e-3f));
  66. GGML_ASSERT(is_close(ggml_get_f32_1d(t1, 0), 10.0f, 1e-3f));
  67. }
  68. {
  69. struct ggml_tensor * t0 = ggml_new_f32(ctx0, -1.0f);
  70. struct ggml_tensor * t1 = ggml_new_f32(ctx0, 9.0f);
  71. ggml_set_param(ctx0, t0);
  72. ggml_set_param(ctx0, t1);
  73. // f = 0.5*sum_i[abs(t0 + t1*x_i - y_i)]/n
  74. struct ggml_tensor * f =
  75. ggml_mul(ctx0,
  76. ggml_new_f32(ctx0, 1.0/(2*n)),
  77. ggml_sum(ctx0,
  78. ggml_abs(ctx0,
  79. ggml_sub(ctx0,
  80. ggml_add(ctx0,
  81. ggml_mul(ctx0, x, ggml_repeat(ctx0, t1, x)),
  82. ggml_repeat(ctx0, t0, x)),
  83. y)
  84. )
  85. )
  86. );
  87. enum ggml_opt_result res = ggml_opt(NULL, opt_params, f);
  88. GGML_ASSERT(res == GGML_OPT_OK);
  89. GGML_ASSERT(is_close(ggml_get_f32_1d(t0, 0), 5.0f, 1e-2f));
  90. GGML_ASSERT(is_close(ggml_get_f32_1d(t1, 0), 10.0f, 1e-2f));
  91. }
  92. {
  93. struct ggml_tensor * t0 = ggml_new_f32(ctx0, 5.0f);
  94. struct ggml_tensor * t1 = ggml_new_f32(ctx0, -4.0f);
  95. ggml_set_param(ctx0, t0);
  96. ggml_set_param(ctx0, t1);
  97. // f = t0^2 + t1^2
  98. struct ggml_tensor * f =
  99. ggml_add(ctx0,
  100. ggml_sqr(ctx0, t0),
  101. ggml_sqr(ctx0, t1)
  102. );
  103. enum ggml_opt_result res = ggml_opt(NULL, opt_params, f);
  104. GGML_ASSERT(res == GGML_OPT_OK);
  105. GGML_ASSERT(is_close(ggml_get_f32_1d(f, 0), 0.0f, 1e-3f));
  106. GGML_ASSERT(is_close(ggml_get_f32_1d(t0, 0), 0.0f, 1e-3f));
  107. GGML_ASSERT(is_close(ggml_get_f32_1d(t1, 0), 0.0f, 1e-3f));
  108. }
  109. /////////////////////////////////////////
  110. {
  111. struct ggml_tensor * t0 = ggml_new_f32(ctx0, -7.0f);
  112. struct ggml_tensor * t1 = ggml_new_f32(ctx0, 8.0f);
  113. ggml_set_param(ctx0, t0);
  114. ggml_set_param(ctx0, t1);
  115. // f = (t0 + 2*t1 - 7)^2 + (2*t0 + t1 - 5)^2
  116. struct ggml_tensor * f =
  117. ggml_add(ctx0,
  118. ggml_sqr(ctx0,
  119. ggml_sub(ctx0,
  120. ggml_add(ctx0,
  121. t0,
  122. ggml_mul(ctx0, t1, ggml_new_f32(ctx0, 2.0f))),
  123. ggml_new_f32(ctx0, 7.0f)
  124. )
  125. ),
  126. ggml_sqr(ctx0,
  127. ggml_sub(ctx0,
  128. ggml_add(ctx0,
  129. ggml_mul(ctx0, t0, ggml_new_f32(ctx0, 2.0f)),
  130. t1),
  131. ggml_new_f32(ctx0, 5.0f)
  132. )
  133. )
  134. );
  135. enum ggml_opt_result res = ggml_opt(NULL, opt_params, f);
  136. GGML_ASSERT(res == GGML_OPT_OK);
  137. GGML_ASSERT(is_close(ggml_get_f32_1d(f, 0), 0.0f, 1e-3f));
  138. GGML_ASSERT(is_close(ggml_get_f32_1d(t0, 0), 1.0f, 1e-3f));
  139. GGML_ASSERT(is_close(ggml_get_f32_1d(t1, 0), 3.0f, 1e-3f));
  140. }
  141. ggml_free(ctx0);
  142. return 0;
  143. }