123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438 |
- #include "ggml/ggml.h"
- #include <stdio.h>
- #include <stdlib.h>
- int main(int argc, const char ** argv) {
- const int n_threads = 2;
- struct ggml_init_params params = {
- .mem_size = 128*1024*1024,
- .mem_buffer = NULL,
- .no_alloc = false,
- };
- struct ggml_context * ctx0 = ggml_init(params);
- {
- struct ggml_tensor * x = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
- ggml_set_param(ctx0, x);
- struct ggml_tensor * a = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
- struct ggml_tensor * b = ggml_mul(ctx0, x, x);
- struct ggml_tensor * f = ggml_mul(ctx0, b, a);
- // a*x^2
- // 2*a*x
- ggml_print_objects(ctx0);
- struct ggml_cgraph gf = ggml_build_forward(f);
- struct ggml_cgraph gb = ggml_build_backward(ctx0, &gf, false);
- ggml_set_f32(x, 2.0f);
- ggml_set_f32(a, 3.0f);
- ggml_graph_reset(&gf);
- ggml_set_f32(f->grad, 1.0f);
- ggml_graph_compute_with_ctx(ctx0, &gb, n_threads);
- printf("f = %f\n", ggml_get_f32_1d(f, 0));
- printf("df/dx = %f\n", ggml_get_f32_1d(x->grad, 0));
- GGML_ASSERT(ggml_get_f32_1d(f, 0) == 12.0f);
- GGML_ASSERT(ggml_get_f32_1d(x->grad, 0) == 12.0f);
- ggml_set_f32(x, 3.0f);
- ggml_graph_reset(&gf);
- ggml_set_f32(f->grad, 1.0f);
- ggml_graph_compute_with_ctx(ctx0, &gb, n_threads);
- printf("f = %f\n", ggml_get_f32_1d(f, 0));
- printf("df/dx = %f\n", ggml_get_f32_1d(x->grad, 0));
- GGML_ASSERT(ggml_get_f32_1d(f, 0) == 27.0f);
- GGML_ASSERT(ggml_get_f32_1d(x->grad, 0) == 18.0f);
- ggml_graph_dump_dot(&gf, NULL, "test1-1-forward.dot");
- ggml_graph_dump_dot(&gb, &gf, "test1-1-backward.dot");
- }
- ///////////////////////////////////////////////////////////////
- {
- struct ggml_tensor * x1 = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
- struct ggml_tensor * x2 = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
- struct ggml_tensor * x3 = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
- ggml_set_f32(x1, 3.0f);
- ggml_set_f32(x2, 1.0f);
- ggml_set_f32(x3, 0.0f);
- ggml_set_param(ctx0, x1);
- ggml_set_param(ctx0, x2);
- struct ggml_tensor * y = ggml_add(ctx0, ggml_mul(ctx0, x1, x1), ggml_mul(ctx0, x1, x2));
- struct ggml_cgraph gf = ggml_build_forward(y);
- struct ggml_cgraph gb = ggml_build_backward(ctx0, &gf, false);
- ggml_graph_reset(&gf);
- ggml_set_f32(y->grad, 1.0f);
- ggml_graph_compute_with_ctx(ctx0, &gb, n_threads);
- printf("y = %f\n", ggml_get_f32_1d(y, 0));
- printf("df/dx1 = %f\n", ggml_get_f32_1d(x1->grad, 0));
- printf("df/dx2 = %f\n", ggml_get_f32_1d(x2->grad, 0));
- GGML_ASSERT(ggml_get_f32_1d(y, 0) == 12.0f);
- GGML_ASSERT(ggml_get_f32_1d(x1->grad, 0) == 7.0f);
- GGML_ASSERT(ggml_get_f32_1d(x2->grad, 0) == 3.0f);
- struct ggml_tensor * g1 = x1->grad;
- struct ggml_tensor * g2 = x2->grad;
- struct ggml_cgraph gbb = ggml_build_backward(ctx0, &gb, true);
- ggml_graph_reset(&gb);
- ggml_set_f32(g1->grad, 1.0f);
- ggml_set_f32(g2->grad, 1.0f);
- ggml_graph_compute_with_ctx(ctx0, &gbb, n_threads);
- printf("H * [1, 1] = [ %f %f ]\n", ggml_get_f32_1d(x1->grad, 0), ggml_get_f32_1d(x2->grad, 0));
- GGML_ASSERT(ggml_get_f32_1d(x1->grad, 0) == 3.0f);
- GGML_ASSERT(ggml_get_f32_1d(x2->grad, 0) == 1.0f);
- ggml_graph_dump_dot(&gf, NULL, "test1-2-forward.dot");
- ggml_graph_dump_dot(&gb, &gf, "test1-2-backward.dot");
- }
- ///////////////////////////////////////////////////////////////
- {
- struct ggml_tensor * x1 = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
- struct ggml_tensor * x2 = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
- ggml_set_param(ctx0, x1);
- ggml_set_param(ctx0, x2);
- struct ggml_tensor * y = ggml_mul(ctx0, ggml_add(ctx0, ggml_mul(ctx0, x1, x1), ggml_mul(ctx0, x1, x2)), x1);
- struct ggml_cgraph gf = ggml_build_forward(y);
- struct ggml_cgraph gb = ggml_build_backward(ctx0, &gf, false);
- ggml_set_f32(x1, 3.0f);
- ggml_set_f32(x2, 4.0f);
- ggml_graph_reset(&gf);
- ggml_set_f32(y->grad, 1.0f);
- ggml_graph_compute_with_ctx(ctx0, &gb, n_threads);
- printf("y = %f\n", ggml_get_f32_1d(y, 0));
- printf("df/dx1 = %f\n", ggml_get_f32_1d(x1->grad, 0));
- printf("df/dx2 = %f\n", ggml_get_f32_1d(x2->grad, 0));
- GGML_ASSERT(ggml_get_f32_1d(y, 0) == 63.0f);
- GGML_ASSERT(ggml_get_f32_1d(x1->grad, 0) == 51.0f);
- GGML_ASSERT(ggml_get_f32_1d(x2->grad, 0) == 9.0f);
- ggml_graph_dump_dot(&gf, NULL, "test1-3-forward.dot");
- ggml_graph_dump_dot(&gb, &gf, "test1-3-backward.dot");
- }
- ///////////////////////////////////////////////////////////////
- {
- struct ggml_tensor * x1 = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
- struct ggml_tensor * x2 = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
- struct ggml_tensor * x3 = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
- ggml_set_param(ctx0, x1);
- ggml_set_param(ctx0, x2);
- ggml_set_param(ctx0, x3);
- struct ggml_tensor * y = ggml_mul(ctx0, ggml_mul(ctx0, ggml_mul(ctx0, x1, x1), ggml_mul(ctx0, x2, x2)), x3);
- struct ggml_cgraph gf = ggml_build_forward(y);
- struct ggml_cgraph gb = ggml_build_backward(ctx0, &gf, false);
- ggml_set_f32(x1, 1.0f);
- ggml_set_f32(x2, 2.0f);
- ggml_set_f32(x3, 3.0f);
- ggml_graph_reset(&gf);
- ggml_set_f32(y->grad, 1.0f);
- ggml_graph_compute_with_ctx(ctx0, &gb, n_threads);
- printf("y = %f\n", ggml_get_f32_1d(y, 0));
- printf("df/dx1 = %f\n", ggml_get_f32_1d(x1->grad, 0));
- printf("df/dx2 = %f\n", ggml_get_f32_1d(x2->grad, 0));
- printf("df/dx3 = %f\n", ggml_get_f32_1d(x3->grad, 0));
- GGML_ASSERT(ggml_get_f32_1d(y, 0) == 12.0f);
- GGML_ASSERT(ggml_get_f32_1d(x1->grad, 0) == 24.0f);
- GGML_ASSERT(ggml_get_f32_1d(x2->grad, 0) == 12.0f);
- GGML_ASSERT(ggml_get_f32_1d(x3->grad, 0) == 4.0f);
- struct ggml_tensor * g1 = x1->grad;
- struct ggml_tensor * g2 = x2->grad;
- struct ggml_tensor * g3 = x3->grad;
- struct ggml_cgraph gbb = ggml_build_backward(ctx0, &gb, true);
- ggml_graph_reset(&gb);
- ggml_set_f32(g1->grad, 1.0f);
- ggml_set_f32(g2->grad, 1.0f);
- ggml_set_f32(g3->grad, 1.0f);
- ggml_graph_compute_with_ctx(ctx0, &gbb, n_threads);
- printf("H * [1, 1, 1] = [ %f %f %f ]\n",
- ggml_get_f32_1d(x1->grad, 0),
- ggml_get_f32_1d(x2->grad, 0),
- ggml_get_f32_1d(x3->grad, 0));
- GGML_ASSERT(ggml_get_f32_1d(x1->grad, 0) == 56.0f);
- GGML_ASSERT(ggml_get_f32_1d(x2->grad, 0) == 34.0f);
- GGML_ASSERT(ggml_get_f32_1d(x3->grad, 0) == 12.0f);
- ggml_graph_dump_dot(&gf, NULL, "test1-4-forward.dot");
- ggml_graph_dump_dot(&gb, &gf, "test1-4-backward.dot");
- }
- ///////////////////////////////////////////////////////////////
- {
- struct ggml_tensor * x1 = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 3);
- struct ggml_tensor * x2 = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 3);
- ggml_set_param(ctx0, x1);
- ggml_set_param(ctx0, x2);
- struct ggml_tensor * y = ggml_sum(ctx0, ggml_mul(ctx0, x1, x2));
- struct ggml_cgraph gf = ggml_build_forward(y);
- struct ggml_cgraph gb = ggml_build_backward(ctx0, &gf, false);
- ggml_set_f32(x1, 3.0f);
- ggml_set_f32(x2, 5.0f);
- ggml_graph_reset(&gf);
- ggml_set_f32(y->grad, 1.0f);
- ggml_graph_compute_with_ctx(ctx0, &gb, n_threads);
- printf("y = %f\n", ggml_get_f32_1d(y, 0));
- printf("df/dx1 = %f %f %f\n",
- ggml_get_f32_1d(x1->grad, 0),
- ggml_get_f32_1d(x1->grad, 1),
- ggml_get_f32_1d(x1->grad, 2));
- printf("df/dx2 = %f %f %f\n",
- ggml_get_f32_1d(x2->grad, 0),
- ggml_get_f32_1d(x2->grad, 1),
- ggml_get_f32_1d(x2->grad, 2));
- GGML_ASSERT(ggml_get_f32_1d(y, 0) == 45.0f);
- GGML_ASSERT(ggml_get_f32_1d(x1->grad, 0) == 5.0f);
- GGML_ASSERT(ggml_get_f32_1d(x2->grad, 0) == 3.0f);
- GGML_ASSERT(ggml_get_f32_1d(x1->grad, 1) == 5.0f);
- GGML_ASSERT(ggml_get_f32_1d(x2->grad, 1) == 3.0f);
- GGML_ASSERT(ggml_get_f32_1d(x1->grad, 2) == 5.0f);
- GGML_ASSERT(ggml_get_f32_1d(x2->grad, 2) == 3.0f);
- ggml_graph_dump_dot(&gf, NULL, "test1-5-forward.dot");
- ggml_graph_dump_dot(&gb, &gf, "test1-5-backward.dot");
- }
- ///////////////////////////////////////////////////////////////
- {
- struct ggml_tensor * x1 = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 3);
- struct ggml_tensor * x2 = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 3);
- ggml_set_param(ctx0, x1);
- ggml_set_param(ctx0, x2);
- struct ggml_tensor * y =
- ggml_sum(ctx0,
- ggml_add(ctx0,
- ggml_mul(ctx0, x1, x2),
- ggml_mul(ctx0,
- ggml_repeat(ctx0, ggml_new_f32(ctx0, -2.0f), x1),
- ggml_mul(ctx0, x1, x1)
- )
- )
- );
- struct ggml_cgraph gf = ggml_build_forward(y);
- struct ggml_cgraph gb = ggml_build_backward(ctx0, &gf, false);
- ggml_set_f32(x1, 3.0f);
- ggml_set_f32(x2, 5.0f);
- ggml_graph_reset(&gf);
- ggml_set_f32(y->grad, 1.0f);
- ggml_graph_compute_with_ctx(ctx0, &gb, n_threads);
- printf("y = %f\n", ggml_get_f32_1d(y, 0));
- printf("df/dx1 = %f %f %f\n",
- ggml_get_f32_1d(x1->grad, 0),
- ggml_get_f32_1d(x1->grad, 1),
- ggml_get_f32_1d(x1->grad, 2));
- printf("df/dx2 = %f %f %f\n",
- ggml_get_f32_1d(x2->grad, 0),
- ggml_get_f32_1d(x2->grad, 1),
- ggml_get_f32_1d(x2->grad, 2));
- GGML_ASSERT(ggml_get_f32_1d(y, 0) == -9.0f);
- GGML_ASSERT(ggml_get_f32_1d(x1->grad, 0) == -7.0f);
- GGML_ASSERT(ggml_get_f32_1d(x1->grad, 1) == -7.0f);
- GGML_ASSERT(ggml_get_f32_1d(x1->grad, 2) == -7.0f);
- GGML_ASSERT(ggml_get_f32_1d(x2->grad, 0) == 3.0f);
- GGML_ASSERT(ggml_get_f32_1d(x2->grad, 1) == 3.0f);
- GGML_ASSERT(ggml_get_f32_1d(x2->grad, 2) == 3.0f);
- ggml_graph_dump_dot(&gf, NULL, "test1-6-forward.dot");
- ggml_graph_dump_dot(&gb, &gf, "test1-6-backward.dot");
- }
- ///////////////////////////////////////////////////////////////
- {
- struct ggml_tensor * x1 = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 3);
- struct ggml_tensor * x2 = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 3);
- ggml_set_param(ctx0, x1);
- ggml_set_param(ctx0, x2);
- struct ggml_tensor * y =
- ggml_sum(ctx0,
- ggml_sub(ctx0,
- ggml_mul(ctx0, x1, x2),
- ggml_mul(ctx0,
- ggml_mul(ctx0, x1, x1),
- ggml_repeat(ctx0, ggml_new_f32(ctx0, -2.0f), x1)
- )
- )
- );
- struct ggml_cgraph gf = ggml_build_forward(y);
- struct ggml_cgraph gb = ggml_build_backward(ctx0, &gf, false);
- ggml_set_f32(x1, 3.0f);
- ggml_set_f32(x2, 5.0f);
- ggml_graph_reset(&gf);
- ggml_set_f32(y->grad, 1.0f);
- ggml_graph_compute_with_ctx(ctx0, &gb, n_threads);
- printf("y = %f\n", ggml_get_f32_1d(y, 0));
- printf("df/dx1 = %f %f %f\n",
- ggml_get_f32_1d(x1->grad, 0),
- ggml_get_f32_1d(x1->grad, 1),
- ggml_get_f32_1d(x1->grad, 2));
- printf("df/dx2 = %f %f %f\n",
- ggml_get_f32_1d(x2->grad, 0),
- ggml_get_f32_1d(x2->grad, 1),
- ggml_get_f32_1d(x2->grad, 2));
- GGML_ASSERT(ggml_get_f32_1d(y, 0) == 99.0f);
- GGML_ASSERT(ggml_get_f32_1d(x1->grad, 0) == 17.0f);
- GGML_ASSERT(ggml_get_f32_1d(x1->grad, 1) == 17.0f);
- GGML_ASSERT(ggml_get_f32_1d(x1->grad, 2) == 17.0f);
- GGML_ASSERT(ggml_get_f32_1d(x2->grad, 0) == 3.0f);
- GGML_ASSERT(ggml_get_f32_1d(x2->grad, 1) == 3.0f);
- GGML_ASSERT(ggml_get_f32_1d(x2->grad, 2) == 3.0f);
- ggml_graph_dump_dot(&gf, NULL, "test1-7-forward.dot");
- ggml_graph_dump_dot(&gb, &gf, "test1-7-backward.dot");
- }
- ///////////////////////////////////////////////////////////////
- {
- struct ggml_tensor * x1 = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 3);
- struct ggml_tensor * x2 = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 3);
- ggml_set_param(ctx0, x1);
- ggml_set_param(ctx0, x2);
- struct ggml_tensor * y =
- ggml_abs(ctx0,
- ggml_sub(ctx0, x1, x2)
- );
- struct ggml_cgraph gf = ggml_build_forward(y);
- struct ggml_cgraph gb = ggml_build_backward(ctx0, &gf, false);
- ggml_set_f32(x1, 3.0f);
- ggml_set_f32(x2, 5.0f);
- ggml_graph_reset(&gf);
- ggml_set_f32(y->grad, 1.0f);
- ggml_graph_compute_with_ctx(ctx0, &gb, n_threads);
- printf("y = %f\n", ggml_get_f32_1d(y, 0));
- printf("df/dx1 = %f %f %f\n",
- ggml_get_f32_1d(x1->grad, 0),
- ggml_get_f32_1d(x1->grad, 1),
- ggml_get_f32_1d(x1->grad, 2));
- printf("df/dx2 = %f %f %f\n",
- ggml_get_f32_1d(x2->grad, 0),
- ggml_get_f32_1d(x2->grad, 1),
- ggml_get_f32_1d(x2->grad, 2));
- GGML_ASSERT(ggml_get_f32_1d(y, 0) == 2.0f);
- GGML_ASSERT(ggml_get_f32_1d(x1->grad, 0) == -1.0f);
- GGML_ASSERT(ggml_get_f32_1d(x1->grad, 1) == -1.0f);
- GGML_ASSERT(ggml_get_f32_1d(x1->grad, 2) == -1.0f);
- GGML_ASSERT(ggml_get_f32_1d(x2->grad, 0) == 1.0f);
- GGML_ASSERT(ggml_get_f32_1d(x2->grad, 1) == 1.0f);
- GGML_ASSERT(ggml_get_f32_1d(x2->grad, 2) == 1.0f);
- ggml_set_f32(x1, 7.0f);
- ggml_set_f32(x2, 5.0f);
- ggml_graph_reset(&gf);
- ggml_set_f32(y->grad, 1.0f);
- ggml_graph_compute_with_ctx(ctx0, &gb, n_threads);
- printf("y = %f\n", ggml_get_f32_1d(y, 0));
- printf("df/dx1 = %f %f %f\n",
- ggml_get_f32_1d(x1->grad, 0),
- ggml_get_f32_1d(x1->grad, 1),
- ggml_get_f32_1d(x1->grad, 2));
- printf("df/dx2 = %f %f %f\n",
- ggml_get_f32_1d(x2->grad, 0),
- ggml_get_f32_1d(x2->grad, 1),
- ggml_get_f32_1d(x2->grad, 2));
- GGML_ASSERT(ggml_get_f32_1d(y, 0) == 2.0f);
- GGML_ASSERT(ggml_get_f32_1d(x1->grad, 0) == 1.0f);
- GGML_ASSERT(ggml_get_f32_1d(x1->grad, 1) == 1.0f);
- GGML_ASSERT(ggml_get_f32_1d(x1->grad, 2) == 1.0f);
- GGML_ASSERT(ggml_get_f32_1d(x2->grad, 0) == -1.0f);
- GGML_ASSERT(ggml_get_f32_1d(x2->grad, 1) == -1.0f);
- GGML_ASSERT(ggml_get_f32_1d(x2->grad, 2) == -1.0f);
- ggml_graph_dump_dot(&gf, NULL, "test1-8-forward.dot");
- ggml_graph_dump_dot(&gb, &gf, "test1-8-backward.dot");
- }
- ggml_free(ctx0);
- return 0;
- }
|