-
Notifications
You must be signed in to change notification settings - Fork 12
Expand file tree
/
Copy pathoperation.hpp
More file actions
4939 lines (4252 loc) · 237 KB
/
operation.hpp
File metadata and controls
4939 lines (4252 loc) · 237 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#ifndef IPKVWSJOCMGGVRASCBLPYHFBCHRIVEXYBOMMDAKFAUDFYVYOOOISLRXJNUJKPJEVMLDPRDSNM
#define IPKVWSJOCMGGVRASCBLPYHFBCHRIVEXYBOMMDAKFAUDFYVYOOOISLRXJNUJKPJEVMLDPRDSNM
#include "./includes.hpp"
#include "./place_holder.hpp"
#include "./variable.hpp"
#include "./constant.hpp"
#include "./value.hpp"
#include "./session.hpp"
#include "./utils/range.hpp"
#include "./utils/debug.hpp"
#include "./config.hpp"
#include "./utils/context_cast.hpp"
#include "./utils/for_each.hpp"
#include "./utils/id.hpp"
#include "./utils/enable_shared.hpp"
#include "./utils/fmt.hpp"
#include "./utils/enable_serializer.hpp"
#include "./utils/overload.hpp"
#include "./utils/concepts.hpp"
namespace ceras
{
///
/// @brief The default identity output shape calculator for unary/binary operators. Should be overrided for some special operators
///
struct identity_output_shape_calculator
{
std::vector<unsigned long> operator()( std::vector<unsigned long> const& input_shape ) const noexcept
{
return input_shape;
}
std::vector<unsigned long> operator()( std::vector<unsigned long> const& lhs_input_shape, std::vector<unsigned long> const& rhs_input_shape ) const noexcept
{
return lhs_input_shape.size() > rhs_input_shape.size() ? lhs_input_shape : rhs_input_shape;
}
std::vector<unsigned long> operator()() const noexcept
{
return std::vector<unsigned long>{ {-1UL,} };
}
}; // struct identity_output_shape_calculator
///
/// @brief A unary operator is composed of a.) an input expression, b.) a forward action and c.) a backward action.
/// plus d.) an output shape calculator and e.) a serializer
/// Note: the above components may have some states, in this case, a shared_ptr is better than direct copy.
///
template< typename Operator, typename Forward_Action, typename Backward_Action, typename Output_Shape_Calculator, typename Serializer >
struct unary_operator :
enable_id<unary_operator<Operator, Forward_Action, Backward_Action, Output_Shape_Calculator, Serializer>, "Unary_Operator">,
enable_unary_serializer<unary_operator<Operator, Forward_Action, Backward_Action, Output_Shape_Calculator, Serializer> >
{
struct unary_operator_state
{
Operator op_;
Forward_Action forward_action_;
Backward_Action backward_action_;
Output_Shape_Calculator output_shape_calculator_;
Serializer serializer_;
};
std::shared_ptr<unary_operator_state> state_;
typedef decltype( std::declval<Forward_Action>()( std::declval<Operator>().forward() ) ) tensor_type;
tensor_type input_data_;
tensor_type output_data_;
unary_operator( Operator const& op, Forward_Action const& forward_action, Backward_Action const& backward_action, Output_Shape_Calculator const& output_shape_calculator, Serializer const& serializer ) noexcept : state_{ std::make_shared<unary_operator_state>( op, forward_action, backward_action, output_shape_calculator, serializer ) } {}
auto forward()
{
auto& sess = get_default_session<tensor_type>();
output_data_= sess.query_forward_cache( (*this).id() );
if ( output_data_.empty() )
{
input_data_ = op().forward();
output_data_ = forward_action()( input_data_ );
sess.update_forward_cache( (*this).id(), output_data_ );
}
return output_data_;
}
void backward( tensor_type const& grad )
{
auto const& current_gradient = backward_action()( input_data_, output_data_, grad );
op().backward( current_gradient );
}
///
/// @brief Calculate the output tensor shape.
///
std::vector<unsigned long> shape() const noexcept
{
return output_shape_calculator()( op().shape() );
}
Operator const& op() const { return state_->op_; }
Forward_Action const& forward_action() const { return state_->forward_action_; }
Backward_Action const& backward_action() const { return state_->backward_action_; }
Output_Shape_Calculator const& output_shape_calculator() const { return state_->output_shape_calculator_; }
Serializer const& serializer() const { return state_->serializer_; }
tensor_type output_data() { return output_data_; }
tensor_type input_data() { return input_data_; }
Operator& op() { return state_->op_; }
Forward_Action& forward_action() { return state_->forward_action_; }
Backward_Action& backward_action() { return state_->backward_action_; }
Output_Shape_Calculator& output_shape_calculator() { return state_->output_shape_calculator_; }
Serializer& serializer() { return state_->serializer_; }
};
///
/// @brief Construct an unary operator by passing the forward/backward actions and output shape calculator
///
template< typename Forward_Action, typename Backward_Action, typename Output_Shape_Calculator= identity_output_shape_calculator, typename Serializer = default_unary_expression_serializer >
auto constexpr make_unary_operator( Forward_Action const& unary_forward_action,
Backward_Action const& unary_backward_action,
std::string const& name = "Anonymous_Unary_Operator",
Output_Shape_Calculator const& output_shape_calculator = Output_Shape_Calculator{},
Serializer const& serializer = Serializer{}
) noexcept
{
static_assert( std::is_invocable_v<Output_Shape_Calculator, std::vector<unsigned long>>, "Unary operator shape calculator should be able to accept a vector of unsigned long." );
return [&]( auto const& op ) noexcept
{
auto ans = unary_operator{ op, unary_forward_action, unary_backward_action, output_shape_calculator, serializer };
ans.name_ = name;
return ans;
};
}
///
/// @brief A binary operator is composed of a.) a left-side input expression, b.) a right-side input expression, c.) a forward action and d.) a backward action.
///
template< typename Lhs_Operator, typename Rhs_Operator, typename Forward_Action, typename Backward_Action, typename Output_Shape_Calculator, typename Serializer >
struct binary_operator :
enable_id<binary_operator<Lhs_Operator, Rhs_Operator, Forward_Action, Backward_Action, Output_Shape_Calculator, Serializer>, "Binary Operator">,
enable_binary_serializer<binary_operator<Lhs_Operator, Rhs_Operator, Forward_Action, Backward_Action, Output_Shape_Calculator, Serializer>>
{
struct binary_operator_state
{
Lhs_Operator lhs_op_;
Rhs_Operator rhs_op_;
Forward_Action forward_action_;
Backward_Action backward_action_;
Output_Shape_Calculator output_shape_calculator_;
Serializer serializer_;
};
std::shared_ptr<binary_operator_state> state_;
typedef typename tensor_deduction<Lhs_Operator, Rhs_Operator>::tensor_type tensor_type; // defined in value.hpp
tensor_type lhs_input_data_;
tensor_type rhs_input_data_;
tensor_type output_data_;
binary_operator( Lhs_Operator const& lhs_op, Rhs_Operator const& rhs_op, Forward_Action const& forward_action, Backward_Action const& backward_action, Output_Shape_Calculator const& output_shape_calculator, Serializer const& serializer) noexcept :
state_{ std::make_shared<binary_operator_state>(lhs_op, rhs_op, forward_action, backward_action, output_shape_calculator, serializer) } {}
auto forward()
{
auto& sess = get_default_session<tensor_type>();
output_data_= sess.query_forward_cache( (*this).id() );
if ( !output_data_.empty() )
return output_data_;
static_assert( !(is_value_v<Lhs_Operator> && is_value_v<Rhs_Operator>), "Not valid for two values" );
if constexpr ( is_value_v<Lhs_Operator> )
{
rhs_input_data_ = rhs_op().forward();
lhs_input_data_ = lhs_op().forward( rhs_input_data_ );
}
else if constexpr ( is_value_v<Rhs_Operator> )
{
lhs_input_data_ = lhs_op().forward();
rhs_input_data_ = rhs_op().forward( lhs_input_data_ );
}
else
{
lhs_input_data_ = lhs_op().forward();
rhs_input_data_ = rhs_op().forward();
}
output_data_ = forward_action()( lhs_input_data_, rhs_input_data_ );
sess.update_forward_cache( (*this).id(), output_data_ );
return output_data_;
}
///
/// @brief Backward action, grad back-propagated.
///
void backward( tensor_type const& grad )
{
auto const& [current_gradient_lhs, current_gradient_rhs] = backward_action()( lhs_input_data_, rhs_input_data_, output_data_, grad );
lhs_op().backward( current_gradient_lhs );
rhs_op().backward( current_gradient_rhs );
}
///
/// @brief Calculate the output shape.
///
std::vector<unsigned long> shape() const noexcept
{
if constexpr ( is_value_v<Lhs_Operator> )
return rhs_op().shape();
else if constexpr ( is_value_v<Rhs_Operator> )
return lhs_op().shape();
else
return output_shape_calculator()( lhs_op().shape(), rhs_op().shape() );
}
Lhs_Operator const& lhs_op() const { return state_->lhs_op_; }
Rhs_Operator const& rhs_op() const { return state_->rhs_op_; }
Forward_Action const& forward_action() const { return state_->forward_action_; }
Backward_Action const& backward_action() const { return state_->backward_action_; }
Output_Shape_Calculator const& output_shape_calculator() const { return state_->output_shape_calculator_; }
Serializer const& serializer() const { return state_->serializer_; }
Lhs_Operator& lhs_op() { return state_->lhs_op_; }
Rhs_Operator& rhs_op() { return state_->rhs_op_; }
Forward_Action& forward_action() { return state_->forward_action_; }
Backward_Action& backward_action() { return state_->backward_action_; }
Output_Shape_Calculator& output_shape_calculator() { return state_->output_shape_calculator_; }
Serializer& serializer() { return state_->serializer_; }
tensor_type output_data() { return output_data_; }
tensor_type lhs_input_data() { return lhs_input_data_; }
tensor_type rhs_input_data() { return rhs_input_data_; }
}; // struct binary_operator
template< typename Forward_Action, typename Backward_Action, typename Output_Shape_Calculator= identity_output_shape_calculator, typename Serializer = default_binary_expression_serializer >
auto make_binary_operator( Forward_Action const& binary_forward_action,
Backward_Action const& binary_backward_action,
std::string const& name = "Anonymous_Binary_Operator",
Output_Shape_Calculator const& output_shape_calculator = Output_Shape_Calculator{},
Serializer const& serializer = Serializer{}) noexcept
{
static_assert( std::is_invocable_v<Output_Shape_Calculator, std::vector<unsigned long>, std::vector<unsigned long>>, "Unary operator shape calculator should be able to accept two vectors of unsigned long." );
return [&]( auto const& lhs_op, auto const& rhs_op ) noexcept
{
auto ans = binary_operator{ lhs_op, rhs_op, binary_forward_action, binary_backward_action, output_shape_calculator, serializer };
ans.name_ = name;
return ans;
};
}
template< typename T >
struct is_unary_operator : std::false_type{};
template< typename Operator, typename Forward_Action, typename Backward_Action, typename Output_Shape_Calculator, typename Serializer >
struct is_unary_operator< unary_operator<Operator, Forward_Action, Backward_Action, Output_Shape_Calculator, Serializer> > : std::true_type {};
///
/// If T is an instance of a unary_operator, the constant value equals to `true`. `false` otherwise.
///
template< class T >
inline constexpr bool is_unary_operator_v = is_unary_operator<T>::value;
///
/// @concept Unary_Operator<>
/// @brief A type that represents an unary operator.
///
template< typename T >
concept Unary_Operator = is_unary_operator_v<T>;
template< typename T >
struct is_binary_operator : std::false_type{};
template< typename Lhs_Operator, typename Rhs_Operator, typename Forward_Action, typename Backward_Action, typename Output_Shape_Calculator, typename Serializer >
struct is_binary_operator< binary_operator<Lhs_Operator, Rhs_Operator, Forward_Action, Backward_Action, Output_Shape_Calculator, Serializer> > : std::true_type {};
///
/// If T is an instance of a binary_operator, the constant value equals to `true`. Otherwise this value is `false`.
///
template< class T >
inline constexpr bool is_binary_operator_v = is_binary_operator<T>::value;
///
/// @concept Binary_Operator<>
/// @brief A type that represents a binary operator.
///
template< typename T >
concept Binary_Operator = is_binary_operator_v<T>;
///
/// @concept Operator<>
/// @brief A type that represents an unary or a binary operator.
///
template< typename T >
concept Operator = Unary_Operator<T> || Binary_Operator<T>;
///
/// @concept Expression<>
/// @brief A type that represents a unary operator, a binary operator, a variable, a place_holder, a constant or a value
///
template< typename T >
concept Expression = Operator<T> || Variable<T> || Place_Holder<T> || Constant<T> || Value<T>;
template< Expression Ex >
std::tuple<std::string, std::vector<std::string>> const serialize( Ex const& ex )
{
return ex.serialize();
}
template< typename ... Args >
inline constexpr auto make_argumented_operator_serializer( Args const& ... args ) noexcept
{
#if 0
return [=]<Expression Self_Expression, Expression Input_Expression>( Self_Expression const& self_expression, Input_Expression const& input_expression ) noexcept
{
auto const& [input_expression_name, input_expression_code] = serialize( input_expression );
std::string const& self_expression_identity = fmt::format( "unary_expression_{}_{}", self_expression.name(), self_expression.id() );
std::vector<std::string> self_expression_code = input_expression_code;
constexpr unsigned long number_of_args = sizeof...( Args );
std::string arg_string_formater = std::string{ "{}" };
{
for ( unsigned long idx = 1; idx < number_of_args; ++idx )
arg_string_formater += std::string{", {}"};
}
std::string code_formater = std::string{"auto {} = {}( "} + arg_string_formater + std::string{" )( {} );"};
self_expression_code.emplace_back( fmt::format( code_formater, self_expression_identity, self_expression.name(), args..., input_expression_name ) );
return std::make_tuple( self_expression_identity, self_expression_code );
};
#endif
return overload( [=]<Expression Self_Expression, Expression Input_Expression>( Self_Expression const& self_expression, Input_Expression const& input_expression ) noexcept
{
auto const& [input_expression_name, input_expression_code] = serialize( input_expression );
std::string const& self_expression_identity = fmt::format( "unary_expression_{}_{}", self_expression.name(), self_expression.id() );
std::vector<std::string> self_expression_code = input_expression_code;
constexpr unsigned long number_of_args = sizeof...( Args );
std::string arg_string_formater = std::string{ "{}" };
{
for ( unsigned long idx = 1; idx < number_of_args; ++idx )
arg_string_formater += std::string{", {}"};
}
std::string code_formater = std::string{"auto {} = {}( "} + arg_string_formater + std::string{" )( {} );"};
self_expression_code.emplace_back( fmt::format( code_formater, self_expression_identity, self_expression.name(), args..., input_expression_name ) );
return std::make_tuple( self_expression_identity, self_expression_code );
},
[=]<Expression Self_Expression, Expression LHS_Input_Expression, Expression RHS_Input_Expression>( Self_Expression const& self_expression, LHS_Input_Expression const& lhs_input_expression, RHS_Input_Expression const& rhs_input_expression ) noexcept
{
auto const& [lhs_input_expression_name, lhs_input_expression_code] = serialize( lhs_input_expression );
auto const& [rhs_input_expression_name, rhs_input_expression_code] = serialize( rhs_input_expression );
std::string const& self_expression_identity = fmt::format( "unary_expression_{}_{}", self_expression.name(), self_expression.id() );
std::vector<std::string> self_expression_code = lhs_input_expression_code;
std::copy( rhs_input_expression_code.begin(), rhs_input_expression_code.end(), std::back_inserter( self_expression_code ) );
constexpr unsigned long number_of_args = sizeof...( Args );
std::string arg_string_formater = std::string{ "{}" };
{
for ( unsigned long idx = 1; idx < number_of_args; ++idx )
arg_string_formater += std::string{", {}"};
}
std::string code_formater = std::string{"auto {} = {}( "} + arg_string_formater + std::string{" )( {}, {} );"};
self_expression_code.emplace_back( fmt::format( code_formater, self_expression_identity, self_expression.name(), args..., lhs_input_expression_name, rhs_input_expression_name ) );
return std::make_tuple( self_expression_identity, self_expression_code );
}
);
}
///
/// Generating the computation graph, in [graph description language](https://www.graphviz.org/documentation/).
/// @param ex An expression.
/// @return A string describing the computation graph, in graph description language.
///
template< Expression Ex >
inline std::string computation_graph( Ex const& ex ) noexcept
{
auto generate_node_and_label = []<Expression Expr>( Expr const& expr ) noexcept
{
std::string const id = std::to_string( expr.id() );
std::string const name = expr.name();
std::string node = std::string{"n"} + id;
std::vector<long long> shape;
{
std::vector<unsigned long> _shape = expr.shape();
shape.resize( _shape.size() );
std::copy( _shape.begin(), _shape.end(), shape.begin() );
if ( _shape.size() > 0 && _shape[0] == -1UL )
shape[0] = -1;
}
std::string label = fmt::format( "{} <shape:{}> [id:{}]", name, shape, id);
return std::make_tuple( node, label );
};
auto generate_dot = [&generate_node_and_label]<Expression Expr>( Expr const& expr, auto const& _generate_dot ) noexcept
{
auto const& [node, label] = generate_node_and_label( expr );
std::string const& expr_dot = node + std::string{" [label=\""} + label + std::string{"\"] ;\n"};
if constexpr( is_unary_operator_v<Expr> )
{
auto const& [n_node, n_label] = generate_node_and_label( expr.op() );
std::string const& arrow_relation = n_node + std::string{" -> "} + node + std::string{" ;\n"};
std::string const& op_dot = _generate_dot( expr.op(), _generate_dot );
return expr_dot + arrow_relation + op_dot;
}
else if constexpr( is_binary_operator_v<Expr> )
{
// for LHS operator
auto const& [n_lhs_node, n_lhs_label] = generate_node_and_label( expr.lhs_op() );
std::string const& arrow_lhs_relation = n_lhs_node + std::string{" -> "} + node + std::string{" ;\n"};
std::string const& op_lhs_dot = _generate_dot( expr.lhs_op(), _generate_dot );
// for RHS operator
auto const& [n_rhs_node, n_rhs_label] = generate_node_and_label( expr.rhs_op() );
std::string const& arrow_rhs_relation = n_rhs_node + std::string{" -> "} + node + std::string{" ;\n"};
std::string const& op_rhs_dot = _generate_dot( expr.rhs_op(), _generate_dot );
return expr_dot + arrow_lhs_relation + arrow_rhs_relation + op_lhs_dot + op_rhs_dot;
}
else if constexpr ( is_variable_v<Expr> )
{
std::vector<unsigned long> const& shape = expr.shape();
bool const training_state = expr.trainable();
// shape
std::stringstream ss;
std::copy( shape.begin(), shape.end(), std::ostream_iterator<unsigned long>( ss, " " ) );
std::string const& str_shape = ss.str() + (training_state ? std::string{"), trainable"} : std::string{"), non-trainable"});
// trainable state
std::string const& new_label = label + std::string{"[("} + str_shape + std::string{"]"};
if (!training_state)
return node + std::string{" [shape=box,label=\""} + new_label + std::string{"\"] ;\n"};
return node + std::string{" [peripheries=3,style=filled,color=\".7 .3 1.0\",shape=box,label=\""} + new_label + std::string{"\"] ;\n"};
}
else
{
return expr_dot;
}
};
std::string const& head = "\n\ndigraph g {\n";
std::string const& tail = "}\n\n";
return head + generate_dot( ex, generate_dot ) + tail;
}
///
/// @brief Broadcast an expression to produce a new shape.
///
/// \code{.cpp}
/// auto e = ...; // shape `(1, 64)`
/// auto f = broadcast( {128, 128, 64} )( e ); // shape `(128, 128, 64)`
/// \endcode
///
auto inline broadcast( std::vector<unsigned long> const& new_shape ) noexcept
{
std::shared_ptr<std::any> forward_cache = std::make_shared<std::any>();
std::shared_ptr<std::any> backward_cache = std::make_shared<std::any>();
return [new_shape, forward_cache, backward_cache]<Expression Ex>( Ex const& ex ) noexcept
{
return make_unary_operator
(
[new_shape, forward_cache]<Tensor Tsor>( Tsor const& input )noexcept
{
better_assert( input.size() , "broadcast::forward: empty input." );
std::vector<unsigned long> const& old_shape = input.shape();
if (new_shape == old_shape) return input;
// Note: ceras only considers simple cases such as `Wx+b`, in which `b` is either has shape `(n,)` or shape `(1,n)`, the implementation is simplified accordingly.
unsigned long const new_size = std::accumulate( new_shape.begin(), new_shape.end(), 1UL, []( auto x, auto y ){ return x*y; } );
unsigned long const old_size = std::accumulate( old_shape.begin(), old_shape.end(), 1UL, []( auto x, auto y ){ return x*y; } );
unsigned long const factor = new_size / old_size;
Tsor& ans = context_cast<Tsor>( forward_cache );
ans.resize( new_shape );
for ( auto idx : range( factor ) )
for_each( input.begin(), input.end(), ans.begin()+idx*old_size, []( auto x, auto& y ){ y = x; } );
return ans;
},
[backward_cache]<Tensor Tsor>( Tsor const& input, Tsor const& output, Tsor const& grad) noexcept
{
better_assert( input.size() , "broadcast::backward: empty input." );
better_assert( output.size() , "broadcast::backward: empty output." );
better_assert( grad.size() , "broadcast::backward: empty grad." );
if ( input.shape() == output.shape() ) return grad;
better_assert( output.shape() == grad.shape(), fmt::format( "Error with broadcast: shape mismatch. Output shape is {}, but grad shape is {}", output.shape(), grad.shape() ) );
std::vector<unsigned long> const& old_shape = input.shape();
unsigned long const old_size = std::accumulate( old_shape.begin(), old_shape.end(), 1UL, []( auto x, auto y ){ return x*y; } );
std::vector<unsigned long> const& new_shape = grad.shape();
unsigned long const new_size = std::accumulate( new_shape.begin(), new_shape.end(), 1UL, []( auto x, auto y ){ return x*y; } );
unsigned long const factor = new_size / old_size;
Tsor& ans = context_cast<Tsor>( backward_cache );
ans.resize( input.shape() );
for_each( ans.begin(), ans.end(), []( auto& x ){ x = 0; } );
for ( auto idx : range( factor ) )
for_each( ans.begin(), ans.end(), grad.begin()+idx*old_size, []( auto& x, auto y ){ x += y; } );
return ans;
},
"broadcast",
[new_shape]( std::vector<unsigned long> const&) noexcept { return new_shape; },
make_argumented_operator_serializer( new_shape )
)(ex);
};
}
namespace
{
struct plus_context
{
auto make_forward() const noexcept
{
return []<Tensor Tsor>( Tsor const& lhs_tensor, Tsor const& rhs_tensor ) noexcept
{
better_assert( !has_nan( lhs_tensor ), "forward propagation for operator plus: lhs_tensor contains Nan!" );
better_assert( !has_nan( rhs_tensor ), "forward propagation for operator plus: rhs_tensor contains Nan!" );
return add( lhs_tensor, rhs_tensor );
};
}
auto make_backward() const noexcept
{
return []<Tensor Tsor>( Tsor const& lhs_input, Tsor const& rhs_input, Tsor const&, Tsor const& grad ) noexcept
{
better_assert( !has_nan( grad ), "backprop: upcoming gradient for operator + contains NaN!" );
auto const& grad_fun = [&grad]( auto const& input )
{
Tsor ans = grad.deep_copy();
while( input.ndim() < ans.ndim() )
ans = sum( ans, 0 );
auto const& shape = input.shape();
for ( auto axis : range( input.ndim() ) )
if ( shape[axis] == 1 )
ans = sum( ans, axis, true );
return ans;
};
return std::make_tuple( grad_fun( lhs_input), grad_fun( rhs_input ) );
};
}
}; // plus_context
}//anonymous namespace
template< Expression Lhs_Expression, Expression Rhs_Expression >
auto constexpr plus( Lhs_Expression const& lhs_ex, Rhs_Expression const& rhs_ex ) noexcept
{
auto const& shape_calculator = []( std::vector<unsigned long> const& l, std::vector<unsigned long> const& r ) noexcept
{
return broadcast_shape( l, r );
};
return make_binary_operator( plus_context{}.make_forward(), plus_context{}.make_backward(), "plus", shape_calculator )( lhs_ex, rhs_ex );
}
template< Expression Lhs_Expression, Expression Rhs_Expression >
auto constexpr operator + ( Lhs_Expression const& lhs_ex, Rhs_Expression const& rhs_ex ) noexcept
{
return plus( lhs_ex, rhs_ex );
}
template< Expression Ex >
auto constexpr operator + ( Ex const& ex ) noexcept
{
return ex;
}
namespace
{
struct multiplication_context
{
auto make_forward() const noexcept
{
return []( std::shared_ptr<std::any> forward_cache ) noexcept
{
return [forward_cache]<Tensor Tsor>( Tsor const& lhs_tensor, Tsor const& rhs_tensor ) noexcept
{
better_assert( lhs_tensor.size(), "multiplication::forward: empty lhs tensor." );
better_assert( rhs_tensor.size(), "multiplication::forward: empty rhs tensor." );
better_assert( lhs_tensor.ndim() == 2, "multiplication::forward: lhs_tensor is not 2D." );
better_assert( rhs_tensor.ndim() == 2, "multiplication::forward: rhs_tensor is not 2D." );
Tsor& ans = context_cast<Tsor>( forward_cache );
multiply( lhs_tensor, rhs_tensor, ans );
return ans;
};
};
}
auto make_backward() const noexcept
{
return []( std::shared_ptr<std::any> backward_cache_lhs, std::shared_ptr<std::any> backward_cache_rhs ) noexcept
{
return [backward_cache_lhs, backward_cache_rhs]<Tensor Tsor>( Tsor const& lhs_input, Tsor const& rhs_input, [[maybe_unused]] Tsor const& output, Tsor const& grad ) noexcept
{
// left branch <-- grad * rhs^T
auto const& g_shape = grad.shape();
auto const[m, n] = std::make_tuple( g_shape[0], g_shape[1] ); // 4, 1
auto const k = *(lhs_input.shape().rbegin()); // 13
Tsor& lhs_grad = context_cast<Tsor>( backward_cache_lhs );
lhs_grad.resize( lhs_input.shape() );
gemm( grad.data(), false, rhs_input.data(), true, m, n, k, lhs_grad.data() );
// right branch <-- lhs^T * grad
Tsor& rhs_grad = context_cast<Tsor>( backward_cache_rhs );
rhs_grad.resize( rhs_input.shape() );
gemm( lhs_input.data(), true, grad.data(), false, k, m, n, rhs_grad.data() );
return std::make_tuple( lhs_grad, rhs_grad );
};
};
}
};//multiplication_context
}//anonymous namespace
template< Expression Lhs_Expression, Expression Rhs_Expression >
auto operator * ( Lhs_Expression const& lhs_ex, Rhs_Expression const& rhs_ex ) noexcept
{
// case of Value * Operator and Operator * Value
if constexpr( is_value_v<Lhs_Expression> || is_value_v<Rhs_Expression> )
{
return elementwise_product( lhs_ex, rhs_ex );
}
else
{
auto const& shape_calculator = []( std::vector<unsigned long> const& l, std::vector<unsigned long> const& r ) noexcept
{
better_assert( l.size() == 2, fmt::format( "expecting l size of 2, but got {}", l.size() ) );
better_assert( r.size() == 2, fmt::format( "expecting r size of 2, but got {}", r.size() ) );
better_assert( l[1] == r[0], fmt::format( "expecting l[1] == r[0], but l[1]={}, r[0]={}", l[1], r[0] ) ); // TODO: what if unknown dimension???
return std::vector<unsigned long>{ {l[0], r[1]} };
};
std::shared_ptr<std::any> forward_cache = std::make_shared<std::any>();
std::shared_ptr<std::any> backward_cache_lhs = std::make_shared<std::any>();
std::shared_ptr<std::any> backward_cache_rhs = std::make_shared<std::any>();
return make_binary_operator( multiplication_context{}.make_forward()(forward_cache), multiplication_context{}.make_backward()(backward_cache_lhs, backward_cache_rhs), "multiply", shape_calculator )( lhs_ex, rhs_ex );
}
}
template< Expression Lhs_Expression, Expression Rhs_Expression >
auto multiply( Lhs_Expression const& lhs_ex, Rhs_Expression const& rhs_ex ) noexcept
{
return lhs_ex * rhs_ex;
}
///
/// @brief Negative operator, elementwise.
/// @code{.cpp}
/// auto x = variable{ ... };
/// auto ix = negative( x );
/// @endcode
///
template <Expression Ex>
auto constexpr negative( Ex const& ex ) noexcept
{
return make_unary_operator( []<Tensor Tsor>( Tsor const& tensor ) noexcept
{
better_assert( !has_nan( tensor ), "forward propagation for operator log: tensor contains Nan!" );
return -tensor;
},
[]<Tensor Tsor>( Tsor const&, Tsor const&, Tsor const& grad ) noexcept
{
better_assert( !has_nan( grad ), "input gradient for operator negative contains NaN!" );
return -grad;
},
"negative"
)( ex );
};
template< Expression Ex, arithmetic A >
auto operator + ( Ex const& ex, A const& rhs_val ) noexcept
{
std::shared_ptr<std::any> forward_cache = std::make_shared<std::any>();
return make_unary_operator( [rhs_val, forward_cache]<Tensor Tsor>( Tsor const& tensor ) noexcept
{
better_assert( tensor.size() > 0, "forward propagation for operator ex + a receives empty grad." );
typedef typename Tsor::value_type value_type;
Tsor& ans = context_cast<Tsor>( forward_cache );
ans.resize( tensor.shape() );
for_each( ans.begin(), ans.end(), tensor.begin(), [rhs_val]( value_type& x, value_type const& y ){ x = y + static_cast<value_type>(rhs_val); } );
return ans;
},
[]<Tensor Tsor>( Tsor const&, Tsor const&, Tsor const& grad ) noexcept
{
better_assert( grad.size() > 0, "backward propagation for operator ex + a receives empty grad." );
return grad;
},
"arithmetic_plus",
identity_output_shape_calculator{},
[rhs_val]<Expression Self_Expression, Expression Input_Expression>( Self_Expression const& unary_expression, Input_Expression const& input_expression ) noexcept
{ // serializer
auto const& [input_expression_name, input_expression_code] = serialize( input_expression );
std::string unary_expression_identity = fmt::format( "unary_expression_{}_{}", unary_expression.name(), unary_expression.id() );
std::vector<std::string> unary_expressioncode = input_expression_code;
unary_expressioncode.emplace_back( fmt::format( "auto {} = {} + {};", unary_expression_identity, input_expression_name, rhs_val ) );
return std::make_tuple( unary_expression_identity, unary_expressioncode );
}
)( ex );
}
template< Expression Ex, arithmetic A >
auto constexpr operator + ( A const& lhs_val, Ex const& ex ) noexcept\
{
return ex + lhs_val;
}
template< Expression Ex, arithmetic A >
auto constexpr operator - ( Ex const& ex, A const& rhs_val ) noexcept\
{
return ex + (-rhs_val); // TODO: corner cases of unsigned integers
}
template< Expression Ex, arithmetic A >
auto constexpr operator - ( A const& lhs_val, Ex const& ex ) noexcept\
{
return (-ex) + lhs_val;
}
template< Expression Ex, arithmetic A >
auto operator * ( Ex const& ex, A const& rhs_val ) noexcept
{
std::shared_ptr<std::any> forward_cache = std::make_shared<std::any>();
std::shared_ptr<std::any> backward_cache = std::make_shared<std::any>();
return make_unary_operator( [rhs_val, forward_cache]<Tensor Tsor>( Tsor const& tensor ) noexcept
{
better_assert( tensor.size() > 0, "forward propagation for operator ex * a receives empty grad." );
typedef typename Tsor::value_type value_type;
Tsor& ans = context_cast<Tsor>( forward_cache );
ans.resize( tensor.shape() );
for_each( ans.begin(), ans.end(), tensor.begin(), [rhs_val]( value_type& x, value_type const& y ){ x = y * static_cast<value_type>(rhs_val); } );
return ans;
},
[rhs_val, backward_cache]<Tensor Tsor>( [[maybe_unused]] Tsor const& input, [[maybe_unused]] Tsor const& output, Tsor const& grad ) noexcept
{
better_assert( grad.size() > 0, "backward propagation for operator ex * a receives empty grad." );
typedef typename Tsor::value_type value_type;
Tsor& ans = context_cast<Tsor>( backward_cache );
ans.resize( grad.shape() );
for_each( ans.begin(), ans.end(), grad.begin(), [rhs_val]( value_type& x, value_type const& y ){ x = y * static_cast<value_type>(rhs_val); } );
return ans;
},
"arithmetic_multiply",
identity_output_shape_calculator{},
[rhs_val]<Expression Self_Expression, Expression Input_Expression>( Self_Expression const& unary_expression, Input_Expression const& input_expression ) noexcept
{ // serializer
auto const& [input_expression_name, input_expression_code] = serialize( input_expression );
std::string unary_expression_identity = fmt::format( "unary_expression_{}_{}", unary_expression.name(), unary_expression.id() );
std::vector<std::string> unary_expressioncode = input_expression_code;
unary_expressioncode.emplace_back( fmt::format( "auto {} = {} * {};", unary_expression_identity, input_expression_name, rhs_val ) );
return std::make_tuple( unary_expression_identity, unary_expressioncode );
}
)( ex );
}
template< Expression Ex, arithmetic A >
auto constexpr operator * ( A const& lhs_val, Ex const& ex ) noexcept
{
return ex * lhs_val;
}
template< Expression Ex, arithmetic A >
auto constexpr operator / ( Ex const& ex, A const& rhs_val ) noexcept
{
return ex * ( 1.0 / rhs_val );
}
template< Expression Ex, arithmetic A >
auto constexpr operator / ( A const& lhs_val, Ex const& ex ) noexcept
{
return ex * inverse( lhs_val );
}
template <Expression Ex>
auto constexpr operator - ( Ex const& ex ) noexcept
{
return negative( ex );
}
///
/// @brief Inverse operator, elementwise.
/// @code{.cpp}
/// auto x = variable{ ... };
/// auto ix = inverse( x );
/// @endcode
template <Expression Ex>
auto inverse( Ex const& ex ) noexcept
{
std::shared_ptr<std::any> forward_cache = std::make_shared<std::any>();
std::shared_ptr<std::any> backward_cache = std::make_shared<std::any>();
return make_unary_operator( [forward_cache]<Tensor Tsor>( Tsor const& tensor ) noexcept
{
Tsor& ans = context_cast<Tsor>( forward_cache );
ans.resize( tensor.shape() );
for_each( tensor.begin(), tensor.end(), ans.begin(), [](auto const x, auto& y) { y = (x > 0.0) ? (1.0/std::max(eps, static_cast<double>(x))) : (1.0/std::min(-eps, static_cast<double>(x))); });
return ans;
},
[backward_cache]<Tensor Tsor>( Tsor const& input, Tsor const&, Tsor const& grad ) noexcept
{
Tsor& ans = context_cast<Tsor>( backward_cache );
ans.resize( input.shape() );
for_each( ans.begin(), ans.end(), grad.begin(), input.begin(), []( auto& x, auto y, auto z ){ x = - y / std::max(static_cast<double>(z*z), eps); } );
ans.resize( grad.shape() );
return ans;
},
"inverse"
)( ex );
};
///
/// @brief Multiply two input operators, elementwise.
/// @code{.cpp}
/// auto x = variable{ tensor<float>{ {2, 3, 5} } };
/// auto y = variable{ tensor<float>{ {2, 3, 5} } };
/// auto z = elementwise_product( x, y ); // z = x*y;
/// @endcode
///
template< Expression Lhs_Expression, Expression Rhs_Expression >
auto constexpr elementwise_product( Lhs_Expression const& lhs_ex, Rhs_Expression const& rhs_ex ) noexcept
{
return make_binary_operator( []<Tensor Tsor>( Tsor const& lhs_tensor, Tsor const& rhs_tensor ) noexcept
{
return elementwise_product( lhs_tensor, rhs_tensor );
},
[]<Tensor Tsor>( Tsor const& lhs_input, Tsor const& rhs_input, Tsor const&, Tsor const grad ) noexcept
{
auto const& grad_fun = [&grad]( auto const& input, auto const& other_input )
{
Tsor ans = elementwise_product( grad, other_input );
while( input.ndim() < ans.ndim() )
ans = sum( ans, 0 );
auto const& shape = input.shape();
for ( auto axis : range( input.ndim() ) )
if ( shape[axis] == 1 )
ans = sum( ans, axis, true );
return ans;
};
return std::make_tuple( grad_fun( lhs_input, rhs_input ), grad_fun( rhs_input, lhs_input ) );
},
"elementwise_product"
)( lhs_ex, rhs_ex );
};
template< Expression Lhs_Expression, Expression Rhs_Expression >
auto constexpr elementwise_multiply( Lhs_Expression const& lhs_ex, Rhs_Expression const& rhs_ex ) noexcept
{
return elementwise_product( lhs_ex, rhs_ex );
}
template< Expression Lhs_Expression, Expression Rhs_Expression >
auto constexpr hadamard_product( Lhs_Expression const& lhs_ex, Rhs_Expression const& rhs_ex ) noexcept
{
return elementwise_product( lhs_ex, rhs_ex );
}
///
/// @brief Divide one tensor by the other.
/// @code{.cpp}
/// auto x = varialbe{ tensor<float>{ {17, 12} } };
/// auto y = varialbe{ tensor<float>{ {17, 12} } };
/// auto z = divide( x, y ); // z = x / y
/// @endcode
///
template< Expression Lhs_Expression, Expression Rhs_Expression >
auto constexpr divide( Lhs_Expression const& lhs_ex, Rhs_Expression const& rhs_ex ) noexcept
{
return elementwise_product( lhs_ex, inverse( rhs_ex ) );
}
///
/// @brief Divide one tensor by the other.
/// @code{.cpp}
/// auto x = varialbe{ tensor<float>{ {17, 12} } };
/// auto y = varialbe{ tensor<float>{ {17, 12} } };
/// auto z = x/y; // same as `divide( x, y );`
/// @endcode
///
template< Expression Lhs_Expression, Expression Rhs_Expression >
auto constexpr operator / ( Lhs_Expression const& lhs_ex, Rhs_Expression const& rhs_ex ) noexcept
{
return divide( lhs_ex, rhs_ex );
}
///
/// @brief Sum up all elements, returns a scalar.
/// @code{.cpp}
/// auto x = variable{ ... };
/// auto y = sum_reduce( x );
/// @endcode
///
template <Expression Ex>
auto constexpr sum_reduce( Ex const& ex ) noexcept
{
return make_unary_operator( []<Tensor Tsor>( Tsor const& tsor ) noexcept
{
better_assert( !has_nan( tsor ), "forward propagation for operator sum_reduce: tensor contains Nan!" );
return reduce_sum( tsor );
},
[]<Tensor Tsor>( Tsor const& input, Tsor const&, Tsor const& grad ) noexcept
{
better_assert( !has_nan( grad ), "input gradient for operator sum_reduce contains NaN!" );
better_assert( grad.size() == 1, "sum_reduce should only output one value" );
Tsor ans = ones_like( input );
ans *= grad[0];
return ans;
},
"sum_reduce",
[]( std::vector<unsigned long> const& ) noexcept { return std::vector<unsigned long>{ {1,} }; }
)( ex );
}
template <Expression Ex>
auto constexpr reduce_sum( Ex const& ex ) noexcept
{
return sum_reduce( ex );
}
///
/// @brief Computes the mean of elements across all dimensions of an expression.
/// @param ex Incoming expression.
///
/// Example code:
///
/// \code{.cpp}
/// auto va = place_holder<tensor<float>>{};
/// auto vb = variable{ random<float>{ 3, 4} };
/// auto diff = mean_reduce( va, vb );
/// \endcode
///
template <Expression Ex>
auto constexpr mean_reduce( Ex const& ex ) noexcept
{
return make_unary_operator( []<Tensor Tsor>( Tsor const& tsor ) noexcept
{
better_assert( !has_nan( tsor ), "forward propagation for operator mean: tensor contains Nan!" );
return reduce_mean( tsor );
},
[]<Tensor Tsor>( Tsor const& input, Tsor const&, Tsor const& grad ) noexcept
{
better_assert( !has_nan( grad ), "input gradient for operator mean_reduce contains NaN!" );
better_assert( grad.size() == 1, "mean_reduce should only output one value" );
Tsor ans = ones_like( input );
ans *= grad[0];
unsigned long const batch_size = (input.shape().size() == 1) ? 1 : (*(input.shape().begin()));
ans /= static_cast<typename Tsor::value_type>(batch_size);
return ans;
},
"mean_reduce",
[]( std::vector<unsigned long> const& ) noexcept { return std::vector<unsigned long>{ {1,} }; }
)( ex );
}
///
/// @brief An alias name of mean_reduce.
///
template <Expression Ex>
auto constexpr reduce_mean( Ex const& ex ) noexcept
{
return mean_reduce( ex );
}
///
/// @brief An alias name of mean_reduce.
///
template <Expression Ex>
auto constexpr mean( Ex const& ex ) noexcept
{
return mean_reduce( ex );
}