opm-common
Loading...
Searching...
No Matches
ml_model.hpp
1/*
2 Copyright (c) 2016 Robert W. Rose
3 Copyright (c) 2018 Paul Maevskikh
4 Copyright (c) 2024 NORCE
5
6 Permission is hereby granted, free of charge, to any person obtaining a copy
7 of this software and associated documentation files (the "Software"), to deal
8 in the Software without restriction, including without limitation the rights
9 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 copies of the Software, and to permit persons to whom the Software is
11 furnished to do so, subject to the following conditions:
12
13 The above copyright notice and this permission notice shall be included in all
14 copies or substantial portions of the Software.
15
16 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 SOFTWARE.
23
24 Note: This file is based on kerasify/keras_model.hh
25*/
26
27#ifndef ML_MODEL_H_
28#define ML_MODEL_H_
29
30#include <fmt/format.h>
31
32#include <opm/common/ErrorMacros.hpp>
34
35#include <algorithm>
36#include <chrono>
37#include <cmath>
38#include <cstdio>
39#include <numeric>
40#include <string>
41#include <vector>
42#include <fstream>
43
44namespace Opm
45{
46
47namespace ML
48{
49
50 // NN layer
51 // ---------------------
55 template <class T>
56 class Tensor
57 {
58 public:
59 Tensor()
60 {
61 }
62
63 explicit Tensor(int i)
64 {
65 resizeI<std::vector<int>>({i});
66 }
67
68 Tensor(int i, int j)
69 {
70 resizeI<std::vector<int>>({i, j});
71 }
72
73 Tensor(int i, int j, int k)
74 {
75 resizeI<std::vector<int>>({i, j, k});
76 }
77
78 Tensor(int i, int j, int k, int l)
79 {
80 resizeI<std::vector<int>>({i, j, k, l});
81 }
82
83 template <typename Sizes>
84 void resizeI(const Sizes& sizes)
85 {
86 if (sizes.size() == 1)
87 dims_ = {(int)sizes[0]};
88 if (sizes.size() == 2)
89 dims_ = {(int)sizes[0], (int)sizes[1]};
90 if (sizes.size() == 3)
91 dims_ = {(int)sizes[0], (int)sizes[1], (int)sizes[2]};
92 if (sizes.size() == 4)
93 dims_ = {(int)sizes[0], (int)sizes[1], (int)sizes[2], (int)sizes[3]};
94
95 data_.resize(std::accumulate(begin(dims_), end(dims_), 1.0, std::multiplies<>()));
96 }
97
98 void flatten()
99 {
100 OPM_ERROR_IF(dims_.size() == 0, "Invalid tensor");
101
102 int elements = dims_[0];
103 for (unsigned int i = 1; i < dims_.size(); i++) {
104 elements *= dims_[i];
105 }
106 dims_ = {elements};
107 }
108
109 T& operator()(int i)
110 {
111 OPM_ERROR_IF(dims_.size() != 1, "Invalid indexing for tensor");
112
113 OPM_ERROR_IF(!(i < dims_[0] && i >= 0),
114 fmt::format(fmt::runtime(" Invalid i: "
115 "{}"
116 " max: "
117 "{}"),
118 i,
119 dims_[0]));
120
121 return data_[i];
122 }
123
124 const T& operator()(int i) const
125 {
126 OPM_ERROR_IF(dims_.size() != 1, "Invalid indexing for tensor");
127
128 OPM_ERROR_IF(!(i < dims_[0] && i >= 0),
129 fmt::format(fmt::runtime(" Invalid i: "
130 "{}"
131 " max: "
132 "{}"),
133 i,
134 dims_[0]));
135
136 return data_[i];
137 }
138
139 T& operator()(int i, int j)
140 {
141 OPM_ERROR_IF(dims_.size() != 2, "Invalid indexing for tensor");
142 OPM_ERROR_IF(!(i < dims_[0] && i >= 0),
143 fmt::format(fmt::runtime(" Invalid i: "
144 "{}"
145 " max: "
146 "{}"),
147 i,
148 dims_[0]));
149 OPM_ERROR_IF(!(j < dims_[1] && j >= 0),
150 fmt::format(fmt::runtime(" Invalid j: "
151 "{}"
152 " max: "
153 "{}"),
154 j,
155 dims_[1]));
156
157 return data_[dims_[1] * i + j];
158 }
159
160 const T& operator()(int i, int j) const
161 {
162 OPM_ERROR_IF(dims_.size() != 2, "Invalid indexing for tensor");
163 OPM_ERROR_IF(!(i < dims_[0] && i >= 0),
164 fmt::format(fmt::runtime(" Invalid i: "
165 "{}"
166 " max: "
167 "{}"),
168 i,
169 dims_[0]));
170 OPM_ERROR_IF(!(j < dims_[1] && j >= 0),
171 fmt::format(fmt::runtime(" Invalid j: "
172 "{}"
173 " max: "
174 "{}"),
175 j,
176 dims_[1]));
177 return data_[dims_[1] * i + j];
178 }
179
180 T& operator()(int i, int j, int k)
181 {
182 OPM_ERROR_IF(dims_.size() != 3, "Invalid indexing for tensor");
183 OPM_ERROR_IF(!(i < dims_[0] && i >= 0),
184 fmt::format(fmt::runtime(" Invalid i: "
185 "{}"
186 " max: "
187 "{}"),
188 i,
189 dims_[0]));
190 OPM_ERROR_IF(!(j < dims_[1] && j >= 0),
191 fmt::format(fmt::runtime(" Invalid j: "
192 "{}"
193 " max: "
194 "{}"),
195 j,
196 dims_[1]));
197 OPM_ERROR_IF(!(k < dims_[2] && k >= 0),
198 fmt::format(fmt::runtime(" Invalid k: "
199 "{}"
200 " max: "
201 "{}"),
202 k,
203 dims_[2]));
204
205 return data_[dims_[2] * (dims_[1] * i + j) + k];
206 }
207
208 const T& operator()(int i, int j, int k) const
209 {
210 OPM_ERROR_IF(dims_.size() != 3, "Invalid indexing for tensor");
211 OPM_ERROR_IF(!(i < dims_[0] && i >= 0),
212 fmt::format(fmt::runtime(" Invalid i: "
213 "{}"
214 " max: "
215 "{}"),
216 i,
217 dims_[0]));
218 OPM_ERROR_IF(!(j < dims_[1] && j >= 0),
219 fmt::format(fmt::runtime(" Invalid j: "
220 "{}"
221 " max: "
222 "{}"),
223 j,
224 dims_[1]));
225 OPM_ERROR_IF(!(k < dims_[2] && k >= 0),
226 fmt::format(fmt::runtime(" Invalid k: "
227 "{}"
228 " max: "
229 "{}"),
230 k,
231 dims_[2]));
232
233 return data_[dims_[2] * (dims_[1] * i + j) + k];
234 }
235
236 T& operator()(int i, int j, int k, int l)
237 {
238 OPM_ERROR_IF(dims_.size() != 4, "Invalid indexing for tensor");
239 OPM_ERROR_IF(!(i < dims_[0] && i >= 0),
240 fmt::format(fmt::runtime(" Invalid i: "
241 "{}"
242 " max: "
243 "{}"),
244 i,
245 dims_[0]));
246 OPM_ERROR_IF(!(j < dims_[1] && j >= 0),
247 fmt::format(fmt::runtime(" Invalid j: "
248 "{}"
249 " max: "
250 "{}"),
251 j,
252 dims_[1]));
253 OPM_ERROR_IF(!(k < dims_[2] && k >= 0),
254 fmt::format(fmt::runtime(" Invalid k: "
255 "{}"
256 " max: "
257 "{}"),
258 k,
259 dims_[2]));
260 OPM_ERROR_IF(!(l < dims_[3] && l >= 0),
261 fmt::format(fmt::runtime(" Invalid l: "
262 "{}"
263 " max: "
264 "{}"),
265 l,
266 dims_[3]));
267
268 return data_[dims_[3] * (dims_[2] * (dims_[1] * i + j) + k) + l];
269 }
270
271 const T& operator()(int i, int j, int k, int l) const
272 {
273 OPM_ERROR_IF(dims_.size() != 4, "Invalid indexing for tensor");
274 OPM_ERROR_IF(!(i < dims_[0] && i >= 0),
275 fmt::format(fmt::runtime(" Invalid i: "
276 "{}"
277 " max: "
278 "{}"),
279 i,
280 dims_[0]));
281 OPM_ERROR_IF(!(j < dims_[1] && j >= 0),
282 fmt::format(fmt::runtime(" Invalid j: "
283 "{}"
284 " max: "
285 "{}"),
286 j,
287 dims_[1]));
288 OPM_ERROR_IF(!(k < dims_[2] && k >= 0),
289 fmt::format(fmt::runtime(" Invalid k: "
290 "{}"
291 " max: "
292 "{}"),
293 k,
294 dims_[2]));
295 OPM_ERROR_IF(!(l < dims_[3] && l >= 0),
296 fmt::format(fmt::runtime(" Invalid l: "
297 "{}"
298 " max: "
299 "{}"),
300 l,
301 dims_[3]));
302
303 return data_[dims_[3] * (dims_[2] * (dims_[1] * i + j) + k) + l];
304 }
305
306 void fill(const T& value)
307 {
308 std::ranges::fill(data_, value);
309 }
310
311 // Tensor addition
312 Tensor operator+(const Tensor& other)
313 {
314 OPM_ERROR_IF(dims_.size() != other.dims_.size(),
315 "Cannot add tensors with different dimensions");
316 Tensor result;
317 result.dims_ = dims_;
318 result.data_.resize(data_.size());
319
320 std::ranges::transform(data_, other.data_, result.data_.begin(),
321 [](const T& x, const T& y) { return x + y; });
322
323 return result;
324 }
325
326 // Tensor multiplication
327 Tensor multiply(const Tensor& other)
328 {
329 OPM_ERROR_IF(dims_.size() != other.dims_.size(),
330 "Cannot multiply elements with different dimensions");
331
332 Tensor result;
333 result.dims_ = dims_;
334 result.data_.resize(data_.size());
335
336 std::ranges::transform(data_, other.data_, result.data_.begin(),
337 [](const T& x, const T& y) { return x * y; });
338
339 return result;
340 }
341
342 // Tensor dot for 2d tensor
343 Tensor dot(const Tensor& other)
344 {
345 OPM_ERROR_IF(dims_.size() != 2, "Invalid tensor dimensions");
346 OPM_ERROR_IF(other.dims_.size() != 2, "Invalid tensor dimensions");
347
348 OPM_ERROR_IF(dims_[1] != other.dims_[0],
349 "Cannot multiply with different inner dimensions");
350
351 Tensor tmp(dims_[0], other.dims_[1]);
352
353 for (int i = 0; i < dims_[0]; i++) {
354 for (int j = 0; j < other.dims_[1]; j++) {
355 for (int k = 0; k < dims_[1]; k++) {
356 tmp(i, j) += (*this)(i, k) * other(k, j);
357 }
358 }
359 }
360
361 return tmp;
362 }
363
364 void swap(Tensor& other)
365 {
366 dims_.swap(other.dims_);
367 data_.swap(other.data_);
368 }
369
370 std::vector<int> dims_;
371 std::vector<T> data_;
372 };
373
374 // NN layer
375 // ---------------------
380 template <class Evaluation>
381 class NNLayer
382 {
383 public:
384 NNLayer()
385 {
386 }
387
388 virtual ~NNLayer()
389 {
390 }
391
392 // Loads the ML trained file, returns true if the file exists
393 virtual bool loadLayer(std::ifstream& file) = 0;
394 // Apply the NN layers
395 virtual bool apply(const Tensor<Evaluation>& in, Tensor<Evaluation>& out) = 0;
396 };
397
399 enum class ActivationType {
400 kLinear = 1,
401 kRelu = 2,
402 kSoftPlus = 3,
403 kSigmoid = 4,
404 kTanh = 5,
405 kHardSigmoid = 6
406 };
407
411 template <class Evaluation>
412 class NNLayerActivation : public NNLayer<Evaluation>
413 {
414 public:
415
416 NNLayerActivation(ActivationType activation_type = ActivationType::kLinear)
417 : activation_type_(activation_type)
418 {
419 }
420
421 bool loadLayer(std::ifstream& file) override;
422
423 bool apply(const Tensor<Evaluation>& in, Tensor<Evaluation>& out) override;
424
425 private:
426 ActivationType activation_type_;
427 };
428
432 template <class Evaluation>
433 class NNLayerScaling : public NNLayer<Evaluation>
434 {
435 public:
436 NNLayerScaling(float data_min = 1.0f,
437 float data_max = 1.0f,
438 float feat_inf = 1.0f,
439 float feat_sup = 1.0f);
440
441 bool loadLayer(std::ifstream& file) override;
442
443 bool apply(const Tensor<Evaluation>& in, Tensor<Evaluation>& out) override;
444
445 private:
446 float data_min_;
447 float data_max_;
448 float feat_inf_;
449 float feat_sup_;
450 };
451
455 template <class Evaluation>
456 class NNLayerUnScaling : public NNLayer<Evaluation>
457 {
458 public:
459 NNLayerUnScaling(float data_min = 1.0f,
460 float data_max = 1.0f,
461 float feat_inf = 1.0f,
462 float feat_sup = 1.0f);
463
464 bool loadLayer(std::ifstream& file) override;
465
466 bool apply(const Tensor<Evaluation>& in, Tensor<Evaluation>& out) override;
467
468 private:
469 float data_min_;
470 float data_max_;
471 float feat_inf_;
472 float feat_sup_;
473 };
474
478 template <class Evaluation>
479 class NNLayerDense : public NNLayer<Evaluation>
480 {
481 public:
482 NNLayerDense(Tensor<float> weights = {}, Tensor<float> biases = {}, ActivationType activation_type = ActivationType::kLinear);
483
484 bool loadLayer(std::ifstream& file) override;
485
486 bool apply(const Tensor<Evaluation>& in, Tensor<Evaluation>& out) override;
487
488 private:
489 Tensor<float> weights_;
490 Tensor<float> biases_;
491
493 };
494
498 template <class Evaluation>
500 {
501 public:
502 enum class LayerType { kScaling = 1, kUnScaling = 2, kDense = 3, kActivation = 4 };
503
504 virtual ~NNModel() = default;
505
506 // loads models (.model files) generated by Kerasify
507 virtual bool loadModel(const std::string& filename);
508
509 virtual bool apply(const Tensor<Evaluation>& in, Tensor<Evaluation>& out);
510
511 private:
512 std::vector<std::unique_ptr<NNLayer<Evaluation>>> layers_;
513 };
514
518 {
519 public:
521 void start();
523 float stop();
524
525 private:
526 std::chrono::time_point<std::chrono::high_resolution_clock> start_;
527 };
528
529
530
531
532 template <typename T>
533 bool readFile(std::ifstream& file, T& data)
534 {
535 file.read(reinterpret_cast<char*>(&data), sizeof(T));
536 return !file.fail();
537 }
538
539 template <typename T>
540 bool readFile(std::ifstream& file, T* data, size_t n)
541 {
542 file.read(reinterpret_cast<char*>(data), sizeof(T) * n);
543 return !file.fail();
544 }
545
546 template <class Evaluation>
547 bool NNLayerActivation<Evaluation>::loadLayer(std::ifstream& file)
548 {
549 unsigned int activation = 0;
550 OPM_ERROR_IF(!readFile<unsigned int>(file, activation), "Failed to read activation type");
551
552 switch (static_cast<ActivationType>(activation)) {
553 case ActivationType::kLinear:
554 activation_type_ = ActivationType::kLinear;
555 break;
556 case ActivationType::kRelu:
557 activation_type_ = ActivationType::kRelu;
558 break;
559 case ActivationType::kSoftPlus:
560 activation_type_ = ActivationType::kSoftPlus;
561 break;
562 case ActivationType::kHardSigmoid:
563 activation_type_ = ActivationType::kHardSigmoid;
564 break;
565 case ActivationType::kSigmoid:
566 activation_type_ = ActivationType::kSigmoid;
567 break;
568 case ActivationType::kTanh:
569 activation_type_ = ActivationType::kTanh;
570 break;
571 default:
572 OPM_ERROR_IF(true,
573 fmt::format(fmt::runtime("\n Unsupported activation type "
574 "{}"),
575 activation));
576 }
577
578 return true;
579 }
580
581 template <class Evaluation>
582 bool NNLayerActivation<Evaluation>::apply(const Tensor<Evaluation>& in, Tensor<Evaluation>& out)
583 {
584 out = in;
585
586 switch (activation_type_) {
587 case ActivationType::kLinear:
588 break;
589 case ActivationType::kRelu:
590 for (size_t i = 0; i < out.data_.size(); i++) {
591 if (out.data_[i] < 0.0) {
592 out.data_[i] = 0.0;
593 }
594 }
595 break;
596 case ActivationType::kSoftPlus:
597 for (size_t i = 0; i < out.data_.size(); i++) {
598 out.data_[i] = log(1.0 + exp(out.data_[i]));
599 }
600 break;
601 case ActivationType::kHardSigmoid:
602 for (size_t i = 0; i < out.data_.size(); i++) {
603 constexpr double sigmoid_scale = 0.2;
604 const Evaluation& x = (out.data_[i] * sigmoid_scale) + 0.5;
605
606 if (x <= 0) {
607 out.data_[i] = 0.0;
608 } else if (x >= 1) {
609 out.data_[i] = 1.0;
610 } else {
611 out.data_[i] = x;
612 }
613 }
614 break;
615 case ActivationType::kSigmoid:
616 for (size_t i = 0; i < out.data_.size(); i++) {
617 const Evaluation& x = out.data_[i];
618
619 if (x >= 0) {
620 out.data_[i] = 1.0 / (1.0 + exp(-x));
621 } else {
622 const Evaluation& z = exp(x);
623 out.data_[i] = z / (1.0 + z);
624 }
625 }
626 break;
627 case ActivationType::kTanh:
628 for (size_t i = 0; i < out.data_.size(); i++) {
629 out.data_[i] = sinh(out.data_[i]) / cosh(out.data_[i]);
630 }
631 break;
632 default:
633 break;
634 }
635
636 return true;
637 }
638
639 template <class Evaluation>
640 NNLayerScaling<Evaluation>::NNLayerScaling(float data_min, float data_max, float feat_inf, float feat_sup)
641 : data_min_(data_min)
642 , data_max_(data_max)
643 , feat_inf_(feat_inf)
644 , feat_sup_(feat_sup)
645 {
646 }
647
648 template <class Evaluation>
649 bool NNLayerScaling<Evaluation>::loadLayer(std::ifstream& file)
650 {
651 OPM_ERROR_IF(!readFile<float>(file, data_min_), "Failed to read data min");
652 OPM_ERROR_IF(!readFile<float>(file, data_max_), "Failed to read data max");
653 OPM_ERROR_IF(!readFile<float>(file, feat_inf_), "Failed to read feat inf");
654 OPM_ERROR_IF(!readFile<float>(file, feat_sup_), "Failed to read feat sup");
655 return true;
656 }
657
658 template <class Evaluation>
659 bool NNLayerScaling<Evaluation>::apply(const Tensor<Evaluation>& in, Tensor<Evaluation>& out)
660 {
661 out.data_.resize(in.data_.size());
662 out.dims_ = in.dims_;
663 for (size_t i = 0; i < out.data_.size(); i++) {
664 auto tempscale = (in.data_[i] - data_min_) / (data_max_ - data_min_);
665 out.data_[i] = tempscale * (feat_sup_ - feat_inf_) + feat_inf_;
666 }
667
668 return true;
669 }
670
671 template <class Evaluation>
672 NNLayerUnScaling<Evaluation>::NNLayerUnScaling(
673 float data_min, float data_max, float feat_inf, float feat_sup)
674 : data_min_(data_min)
675 , data_max_(data_max)
676 , feat_inf_(feat_inf)
677 , feat_sup_(feat_sup)
678 {
679 }
680
681 template <class Evaluation>
682 bool NNLayerUnScaling<Evaluation>::loadLayer(std::ifstream& file)
683 {
684 OPM_ERROR_IF(!readFile<float>(file, data_min_), "Failed to read data min");
685 OPM_ERROR_IF(!readFile<float>(file, data_max_), "Failed to read data max");
686 OPM_ERROR_IF(!readFile<float>(file, feat_inf_), "Failed to read feat inf");
687 OPM_ERROR_IF(!readFile<float>(file, feat_sup_), "Failed to read feat sup");
688
689 return true;
690 }
691
692 template <class Evaluation>
693 bool NNLayerUnScaling<Evaluation>::apply(const Tensor<Evaluation>& in, Tensor<Evaluation>& out)
694 {
695 out.data_.resize(in.data_.size());
696 out.dims_ = in.dims_;
697 for (size_t i = 0; i < out.data_.size(); i++) {
698 auto tempscale = (in.data_[i] - feat_inf_) / (feat_sup_ - feat_inf_);
699
700 out.data_[i] = tempscale * (data_max_ - data_min_) + data_min_;
701 }
702
703 return true;
704 }
705
706 template <class Evaluation>
707 NNLayerDense<Evaluation>::NNLayerDense(Tensor<float> weights, Tensor<float> biases, ActivationType activation_type)
708 : weights_(weights)
709 , biases_(biases)
710 , activation_(activation_type)
711 {
712 }
713
714 template <class Evaluation>
715 bool NNLayerDense<Evaluation>::loadLayer(std::ifstream& file)
716 {
717 unsigned int weights_rows = 0;
718 OPM_ERROR_IF(!readFile<unsigned int>(file, weights_rows), "Expected weight rows");
719 OPM_ERROR_IF(!(weights_rows > 0), "Invalid weights # rows");
720
721 unsigned int weights_cols = 0;
722 OPM_ERROR_IF(!readFile<unsigned int>(file, weights_cols), "Expected weight cols");
723 OPM_ERROR_IF(!(weights_cols > 0), "Invalid weights shape");
724
725 unsigned int biases_shape = 0;
726 OPM_ERROR_IF(!readFile<unsigned int>(file, biases_shape), "Expected biases shape");
727 OPM_ERROR_IF(!(biases_shape > 0), "Invalid biases shape");
728
729 weights_.resizeI<std::vector<unsigned int>>({weights_rows, weights_cols});
730 OPM_ERROR_IF(!readFile<float>(file, weights_.data_.data(), weights_rows * weights_cols), "Expected weights");
731
732 biases_.resizeI<std::vector<unsigned int>>({biases_shape});
733 OPM_ERROR_IF(!readFile<float>(file, biases_.data_.data(), biases_shape), "Expected biases");
734
735 OPM_ERROR_IF(!activation_.loadLayer(file), "Failed to load activation");
736
737 return true;
738 }
739
774 template <class Evaluation>
776 {
777 Tensor<Evaluation> tmp(weights_.dims_[1]);
778 Tensor<Evaluation> temp_in(in);
779 for (int i = 0; i < weights_.dims_[0]; i++) {
780 for (int j = 0; j < weights_.dims_[1]; j++) {
781 tmp(j) += (temp_in)(i)*weights_(i, j);
782 }
783 }
784
785 for (int i = 0; i < biases_.dims_[0]; i++) {
786 tmp(i) += biases_(i);
787 }
788
789 OPM_ERROR_IF(!activation_.apply(tmp, out), "Failed to apply activation");
790
791 return true;
792 }
793
794 template <class Evaluation>
795 bool NNModel<Evaluation>::loadModel(const std::string& filename)
796 {
797 std::ifstream file(filename.c_str(), std::ios::binary);
798 OPM_ERROR_IF(!file.is_open(),
799 fmt::format(fmt::runtime("\n Unable to open file "
800 "{}"),
801 filename.c_str()));
802
803 unsigned int num_layers = 0;
804 OPM_ERROR_IF(!readFile<unsigned int>(file, num_layers), "Expected number of layers");
805
806 for (unsigned int i = 0; i < num_layers; i++) {
807 unsigned int layer_type = 0;
808 OPM_ERROR_IF(!readFile<unsigned int>(file, layer_type), "Expected layer type");
809
810 std::unique_ptr<NNLayer<Evaluation>> layer = nullptr;
811
812 switch (static_cast<LayerType>(layer_type)) {
813 case LayerType::kScaling:
814 layer = std::make_unique<NNLayerScaling<Evaluation>>();
815 break;
816 case LayerType::kUnScaling:
817 layer = std::make_unique<NNLayerUnScaling<Evaluation>>();
818 break;
819 case LayerType::kDense:
820 layer = std::make_unique<NNLayerDense<Evaluation>>();
821 break;
822 case LayerType::kActivation:
823 layer = std::make_unique<NNLayerActivation<Evaluation>>();
824 break;
825 default:
826 break;
827 }
828
829 OPM_ERROR_IF(!layer,
830 fmt::format(fmt::runtime("\n Unknown layer type "
831 "{}"),
832 layer_type));
833 OPM_ERROR_IF(!layer->loadLayer(file),
834 fmt::format(fmt::runtime("\n Failed to load layer "
835 "{}"),
836 i));
837
838 layers_.emplace_back(std::move(layer));
839 }
840
841 return true;
842 }
843
844 template <class Evaluation>
845 bool NNModel<Evaluation>::apply(const Tensor<Evaluation>& in, Tensor<Evaluation>& out)
846 {
847 Tensor<Evaluation> temp_in(in);
848
849 for (unsigned int i = 0; i < layers_.size(); i++) {
850
851 if (i > 0) {
852 temp_in.swap(out);
853 }
854
855 OPM_ERROR_IF(!(layers_[i]->apply(temp_in, out)),
856 fmt::format(fmt::runtime("\n Failed to apply layer "
857 "{}"),
858 i));
859 }
860 return true;
861 }
862
863} // namespace ML
864
865} // namespace Opm
866
867#endif // ML_MODEL_H_
A number of commonly used algebraic functions for the localized OPM automatic differentiation (AD) fr...
Definition ml_model.hpp:413
bool apply(const Tensor< Evaluation > &in, Tensor< Evaluation > &out) override
Applies the forward pass of a dense (fully connected) neural-network layer.
Definition ml_model.hpp:775
Definition ml_model.hpp:500
Definition ml_model.hpp:518
void start()
Start the timer.
Definition ml_model.cpp:34
float stop()
Stop the timer and return elapsed time in milliseconds.
Definition ml_model.cpp:39
Implements mathematical tensor (Max 4d).
Definition ml_model.hpp:57
This class implements a small container which holds the transmissibility mulitpliers for all the face...
Definition Exceptions.hpp:30