30#include <fmt/format.h>
32#include <opm/common/ErrorMacros.hpp>
63 explicit Tensor(
int i)
65 resizeI<std::vector<int>>({i});
70 resizeI<std::vector<int>>({i, j});
73 Tensor(
int i,
int j,
int k)
75 resizeI<std::vector<int>>({i, j, k});
78 Tensor(
int i,
int j,
int k,
int l)
80 resizeI<std::vector<int>>({i, j, k, l});
83 template <
typename Sizes>
84 void resizeI(
const Sizes& sizes)
86 if (sizes.size() == 1)
87 dims_ = {(int)sizes[0]};
88 if (sizes.size() == 2)
89 dims_ = {(int)sizes[0], (
int)sizes[1]};
90 if (sizes.size() == 3)
91 dims_ = {(int)sizes[0], (
int)sizes[1], (int)sizes[2]};
92 if (sizes.size() == 4)
93 dims_ = {(int)sizes[0], (
int)sizes[1], (int)sizes[2], (
int)sizes[3]};
95 data_.resize(std::accumulate(begin(dims_), end(dims_), 1.0, std::multiplies<>()));
100 OPM_ERROR_IF(dims_.size() == 0,
"Invalid tensor");
102 int elements = dims_[0];
103 for (
unsigned int i = 1; i < dims_.size(); i++) {
104 elements *= dims_[i];
111 OPM_ERROR_IF(dims_.size() != 1,
"Invalid indexing for tensor");
113 OPM_ERROR_IF(!(i < dims_[0] && i >= 0),
114 fmt::format(fmt::runtime(
" Invalid i: "
124 const T& operator()(
int i)
const
126 OPM_ERROR_IF(dims_.size() != 1,
"Invalid indexing for tensor");
128 OPM_ERROR_IF(!(i < dims_[0] && i >= 0),
129 fmt::format(fmt::runtime(
" Invalid i: "
139 T& operator()(
int i,
int j)
141 OPM_ERROR_IF(dims_.size() != 2,
"Invalid indexing for tensor");
142 OPM_ERROR_IF(!(i < dims_[0] && i >= 0),
143 fmt::format(fmt::runtime(
" Invalid i: "
149 OPM_ERROR_IF(!(j < dims_[1] && j >= 0),
150 fmt::format(fmt::runtime(
" Invalid j: "
157 return data_[dims_[1] * i + j];
160 const T& operator()(
int i,
int j)
const
162 OPM_ERROR_IF(dims_.size() != 2,
"Invalid indexing for tensor");
163 OPM_ERROR_IF(!(i < dims_[0] && i >= 0),
164 fmt::format(fmt::runtime(
" Invalid i: "
170 OPM_ERROR_IF(!(j < dims_[1] && j >= 0),
171 fmt::format(fmt::runtime(
" Invalid j: "
177 return data_[dims_[1] * i + j];
180 T& operator()(
int i,
int j,
int k)
182 OPM_ERROR_IF(dims_.size() != 3,
"Invalid indexing for tensor");
183 OPM_ERROR_IF(!(i < dims_[0] && i >= 0),
184 fmt::format(fmt::runtime(
" Invalid i: "
190 OPM_ERROR_IF(!(j < dims_[1] && j >= 0),
191 fmt::format(fmt::runtime(
" Invalid j: "
197 OPM_ERROR_IF(!(k < dims_[2] && k >= 0),
198 fmt::format(fmt::runtime(
" Invalid k: "
205 return data_[dims_[2] * (dims_[1] * i + j) + k];
208 const T& operator()(
int i,
int j,
int k)
const
210 OPM_ERROR_IF(dims_.size() != 3,
"Invalid indexing for tensor");
211 OPM_ERROR_IF(!(i < dims_[0] && i >= 0),
212 fmt::format(fmt::runtime(
" Invalid i: "
218 OPM_ERROR_IF(!(j < dims_[1] && j >= 0),
219 fmt::format(fmt::runtime(
" Invalid j: "
225 OPM_ERROR_IF(!(k < dims_[2] && k >= 0),
226 fmt::format(fmt::runtime(
" Invalid k: "
233 return data_[dims_[2] * (dims_[1] * i + j) + k];
236 T& operator()(
int i,
int j,
int k,
int l)
238 OPM_ERROR_IF(dims_.size() != 4,
"Invalid indexing for tensor");
239 OPM_ERROR_IF(!(i < dims_[0] && i >= 0),
240 fmt::format(fmt::runtime(
" Invalid i: "
246 OPM_ERROR_IF(!(j < dims_[1] && j >= 0),
247 fmt::format(fmt::runtime(
" Invalid j: "
253 OPM_ERROR_IF(!(k < dims_[2] && k >= 0),
254 fmt::format(fmt::runtime(
" Invalid k: "
260 OPM_ERROR_IF(!(l < dims_[3] && l >= 0),
261 fmt::format(fmt::runtime(
" Invalid l: "
268 return data_[dims_[3] * (dims_[2] * (dims_[1] * i + j) + k) + l];
271 const T& operator()(
int i,
int j,
int k,
int l)
const
273 OPM_ERROR_IF(dims_.size() != 4,
"Invalid indexing for tensor");
274 OPM_ERROR_IF(!(i < dims_[0] && i >= 0),
275 fmt::format(fmt::runtime(
" Invalid i: "
281 OPM_ERROR_IF(!(j < dims_[1] && j >= 0),
282 fmt::format(fmt::runtime(
" Invalid j: "
288 OPM_ERROR_IF(!(k < dims_[2] && k >= 0),
289 fmt::format(fmt::runtime(
" Invalid k: "
295 OPM_ERROR_IF(!(l < dims_[3] && l >= 0),
296 fmt::format(fmt::runtime(
" Invalid l: "
303 return data_[dims_[3] * (dims_[2] * (dims_[1] * i + j) + k) + l];
306 void fill(
const T& value)
308 std::ranges::fill(data_, value);
312 Tensor operator+(
const Tensor& other)
314 OPM_ERROR_IF(dims_.size() != other.dims_.size(),
315 "Cannot add tensors with different dimensions");
317 result.dims_ = dims_;
318 result.data_.resize(data_.size());
320 std::ranges::transform(data_, other.data_, result.data_.begin(),
321 [](
const T& x,
const T& y) { return x + y; });
327 Tensor multiply(
const Tensor& other)
329 OPM_ERROR_IF(dims_.size() != other.dims_.size(),
330 "Cannot multiply elements with different dimensions");
333 result.dims_ = dims_;
334 result.data_.resize(data_.size());
336 std::ranges::transform(data_, other.data_, result.data_.begin(),
337 [](
const T& x,
const T& y) { return x * y; });
343 Tensor dot(
const Tensor& other)
345 OPM_ERROR_IF(dims_.size() != 2,
"Invalid tensor dimensions");
346 OPM_ERROR_IF(other.dims_.size() != 2,
"Invalid tensor dimensions");
348 OPM_ERROR_IF(dims_[1] != other.dims_[0],
349 "Cannot multiply with different inner dimensions");
351 Tensor tmp(dims_[0], other.dims_[1]);
353 for (
int i = 0; i < dims_[0]; i++) {
354 for (
int j = 0; j < other.dims_[1]; j++) {
355 for (
int k = 0; k < dims_[1]; k++) {
356 tmp(i, j) += (*this)(i, k) * other(k, j);
364 void swap(Tensor& other)
366 dims_.swap(other.dims_);
367 data_.swap(other.data_);
370 std::vector<int> dims_;
371 std::vector<T> data_;
380 template <
class Evaluation>
393 virtual bool loadLayer(std::ifstream& file) = 0;
399 enum class ActivationType {
411 template <
class Evaluation>
412 class NNLayerActivation :
public NNLayer<Evaluation>
416 NNLayerActivation(ActivationType activation_type = ActivationType::kLinear)
417 : activation_type_(activation_type)
421 bool loadLayer(std::ifstream& file)
override;
426 ActivationType activation_type_;
432 template <
class Evaluation>
433 class NNLayerScaling :
public NNLayer<Evaluation>
436 NNLayerScaling(
float data_min = 1.0f,
437 float data_max = 1.0f,
438 float feat_inf = 1.0f,
439 float feat_sup = 1.0f);
441 bool loadLayer(std::ifstream& file)
override;
455 template <
class Evaluation>
456 class NNLayerUnScaling :
public NNLayer<Evaluation>
459 NNLayerUnScaling(
float data_min = 1.0f,
460 float data_max = 1.0f,
461 float feat_inf = 1.0f,
462 float feat_sup = 1.0f);
464 bool loadLayer(std::ifstream& file)
override;
478 template <
class Evaluation>
479 class NNLayerDense :
public NNLayer<Evaluation>
482 NNLayerDense(
Tensor<float> weights = {},
Tensor<float> biases = {}, ActivationType activation_type = ActivationType::kLinear);
484 bool loadLayer(std::ifstream& file)
override;
498 template <
class Evaluation>
502 enum class LayerType { kScaling = 1, kUnScaling = 2, kDense = 3, kActivation = 4 };
507 virtual bool loadModel(
const std::string& filename);
512 std::vector<std::unique_ptr<NNLayer<Evaluation>>> layers_;
526 std::chrono::time_point<std::chrono::high_resolution_clock> start_;
532 template <
typename T>
533 bool readFile(std::ifstream& file, T& data)
535 file.read(
reinterpret_cast<char*
>(&data),
sizeof(T));
539 template <
typename T>
540 bool readFile(std::ifstream& file, T* data,
size_t n)
542 file.read(
reinterpret_cast<char*
>(data),
sizeof(T) * n);
546 template <
class Evaluation>
547 bool NNLayerActivation<Evaluation>::loadLayer(std::ifstream& file)
549 unsigned int activation = 0;
550 OPM_ERROR_IF(!readFile<unsigned int>(file, activation),
"Failed to read activation type");
552 switch (
static_cast<ActivationType
>(activation)) {
553 case ActivationType::kLinear:
554 activation_type_ = ActivationType::kLinear;
556 case ActivationType::kRelu:
557 activation_type_ = ActivationType::kRelu;
559 case ActivationType::kSoftPlus:
560 activation_type_ = ActivationType::kSoftPlus;
562 case ActivationType::kHardSigmoid:
563 activation_type_ = ActivationType::kHardSigmoid;
565 case ActivationType::kSigmoid:
566 activation_type_ = ActivationType::kSigmoid;
568 case ActivationType::kTanh:
569 activation_type_ = ActivationType::kTanh;
573 fmt::format(fmt::runtime(
"\n Unsupported activation type "
581 template <
class Evaluation>
586 switch (activation_type_) {
587 case ActivationType::kLinear:
589 case ActivationType::kRelu:
590 for (
size_t i = 0; i < out.data_.size(); i++) {
591 if (out.data_[i] < 0.0) {
596 case ActivationType::kSoftPlus:
597 for (
size_t i = 0; i < out.data_.size(); i++) {
598 out.data_[i] = log(1.0 + exp(out.data_[i]));
601 case ActivationType::kHardSigmoid:
602 for (
size_t i = 0; i < out.data_.size(); i++) {
603 constexpr double sigmoid_scale = 0.2;
604 const Evaluation& x = (out.data_[i] * sigmoid_scale) + 0.5;
615 case ActivationType::kSigmoid:
616 for (
size_t i = 0; i < out.data_.size(); i++) {
617 const Evaluation& x = out.data_[i];
620 out.data_[i] = 1.0 / (1.0 + exp(-x));
622 const Evaluation& z = exp(x);
623 out.data_[i] = z / (1.0 + z);
627 case ActivationType::kTanh:
628 for (
size_t i = 0; i < out.data_.size(); i++) {
629 out.data_[i] = sinh(out.data_[i]) / cosh(out.data_[i]);
639 template <
class Evaluation>
640 NNLayerScaling<Evaluation>::NNLayerScaling(
float data_min,
float data_max,
float feat_inf,
float feat_sup)
641 : data_min_(data_min)
642 , data_max_(data_max)
643 , feat_inf_(feat_inf)
644 , feat_sup_(feat_sup)
648 template <
class Evaluation>
649 bool NNLayerScaling<Evaluation>::loadLayer(std::ifstream& file)
651 OPM_ERROR_IF(!readFile<float>(file, data_min_),
"Failed to read data min");
652 OPM_ERROR_IF(!readFile<float>(file, data_max_),
"Failed to read data max");
653 OPM_ERROR_IF(!readFile<float>(file, feat_inf_),
"Failed to read feat inf");
654 OPM_ERROR_IF(!readFile<float>(file, feat_sup_),
"Failed to read feat sup");
658 template <
class Evaluation>
661 out.data_.resize(in.data_.size());
662 out.dims_ = in.dims_;
663 for (
size_t i = 0; i < out.data_.size(); i++) {
664 auto tempscale = (in.data_[i] - data_min_) / (data_max_ - data_min_);
665 out.data_[i] = tempscale * (feat_sup_ - feat_inf_) + feat_inf_;
671 template <
class Evaluation>
672 NNLayerUnScaling<Evaluation>::NNLayerUnScaling(
673 float data_min,
float data_max,
float feat_inf,
float feat_sup)
674 : data_min_(data_min)
675 , data_max_(data_max)
676 , feat_inf_(feat_inf)
677 , feat_sup_(feat_sup)
681 template <
class Evaluation>
682 bool NNLayerUnScaling<Evaluation>::loadLayer(std::ifstream& file)
684 OPM_ERROR_IF(!readFile<float>(file, data_min_),
"Failed to read data min");
685 OPM_ERROR_IF(!readFile<float>(file, data_max_),
"Failed to read data max");
686 OPM_ERROR_IF(!readFile<float>(file, feat_inf_),
"Failed to read feat inf");
687 OPM_ERROR_IF(!readFile<float>(file, feat_sup_),
"Failed to read feat sup");
692 template <
class Evaluation>
695 out.data_.resize(in.data_.size());
696 out.dims_ = in.dims_;
697 for (
size_t i = 0; i < out.data_.size(); i++) {
698 auto tempscale = (in.data_[i] - feat_inf_) / (feat_sup_ - feat_inf_);
700 out.data_[i] = tempscale * (data_max_ - data_min_) + data_min_;
706 template <
class Evaluation>
710 , activation_(activation_type)
714 template <
class Evaluation>
715 bool NNLayerDense<Evaluation>::loadLayer(std::ifstream& file)
717 unsigned int weights_rows = 0;
718 OPM_ERROR_IF(!readFile<unsigned int>(file, weights_rows),
"Expected weight rows");
719 OPM_ERROR_IF(!(weights_rows > 0),
"Invalid weights # rows");
721 unsigned int weights_cols = 0;
722 OPM_ERROR_IF(!readFile<unsigned int>(file, weights_cols),
"Expected weight cols");
723 OPM_ERROR_IF(!(weights_cols > 0),
"Invalid weights shape");
725 unsigned int biases_shape = 0;
726 OPM_ERROR_IF(!readFile<unsigned int>(file, biases_shape),
"Expected biases shape");
727 OPM_ERROR_IF(!(biases_shape > 0),
"Invalid biases shape");
729 weights_.resizeI<std::vector<unsigned int>>({weights_rows, weights_cols});
730 OPM_ERROR_IF(!readFile<float>(file, weights_.data_.data(), weights_rows * weights_cols),
"Expected weights");
732 biases_.resizeI<std::vector<unsigned int>>({biases_shape});
733 OPM_ERROR_IF(!readFile<float>(file, biases_.data_.data(), biases_shape),
"Expected biases");
735 OPM_ERROR_IF(!activation_.loadLayer(file),
"Failed to load activation");
774 template <
class Evaluation>
779 for (
int i = 0; i < weights_.dims_[0]; i++) {
780 for (
int j = 0; j < weights_.dims_[1]; j++) {
781 tmp(j) += (temp_in)(i)*weights_(i, j);
785 for (
int i = 0; i < biases_.dims_[0]; i++) {
786 tmp(i) += biases_(i);
789 OPM_ERROR_IF(!activation_.apply(tmp, out),
"Failed to apply activation");
794 template <
class Evaluation>
795 bool NNModel<Evaluation>::loadModel(
const std::string& filename)
797 std::ifstream file(filename.c_str(), std::ios::binary);
798 OPM_ERROR_IF(!file.is_open(),
799 fmt::format(fmt::runtime(
"\n Unable to open file "
803 unsigned int num_layers = 0;
804 OPM_ERROR_IF(!readFile<unsigned int>(file, num_layers),
"Expected number of layers");
806 for (
unsigned int i = 0; i < num_layers; i++) {
807 unsigned int layer_type = 0;
808 OPM_ERROR_IF(!readFile<unsigned int>(file, layer_type),
"Expected layer type");
810 std::unique_ptr<NNLayer<Evaluation>> layer =
nullptr;
812 switch (
static_cast<LayerType
>(layer_type)) {
813 case LayerType::kScaling:
814 layer = std::make_unique<NNLayerScaling<Evaluation>>();
816 case LayerType::kUnScaling:
817 layer = std::make_unique<NNLayerUnScaling<Evaluation>>();
819 case LayerType::kDense:
820 layer = std::make_unique<NNLayerDense<Evaluation>>();
822 case LayerType::kActivation:
823 layer = std::make_unique<NNLayerActivation<Evaluation>>();
830 fmt::format(fmt::runtime(
"\n Unknown layer type "
833 OPM_ERROR_IF(!layer->loadLayer(file),
834 fmt::format(fmt::runtime(
"\n Failed to load layer "
838 layers_.emplace_back(std::move(layer));
844 template <
class Evaluation>
849 for (
unsigned int i = 0; i < layers_.size(); i++) {
855 OPM_ERROR_IF(!(layers_[i]->apply(temp_in, out)),
856 fmt::format(fmt::runtime(
"\n Failed to apply layer "
A number of commonly used algebraic functions for the localized OPM automatic differentiation (AD) fr...
Definition ml_model.hpp:413
bool apply(const Tensor< Evaluation > &in, Tensor< Evaluation > &out) override
Applies the forward pass of a dense (fully connected) neural-network layer.
Definition ml_model.hpp:775
Definition ml_model.hpp:500
Definition ml_model.hpp:518
void start()
Start the timer.
Definition ml_model.cpp:34
float stop()
Stop the timer and return elapsed time in milliseconds.
Definition ml_model.cpp:39
Implements mathematical tensor (Max 4d).
Definition ml_model.hpp:57
This class implements a small container which holds the transmissibility mulitpliers for all the face...
Definition Exceptions.hpp:30