Machine learning in Ada (an ONNX runtime binding)

Incidentally, in my search for TTS engines usable from Ada, I’ve found a way to run PyTorch models without Python using libtorch (a C++ library) and a C+ wrapper function.

For example Silero TTS could be run like this (after exporting model from Python with model.model.save ("model.pt")

#include <torch/script.h> // One-stop header.

#include <iostream>
#include <memory>

int main(int argc, const char* argv[]) {
  torch::jit::script::Module module;
  try {
    // Deserialize the ScriptModule from a file using torch::jit::load().
    module = torch::jit::load("model.pt");
  }
  catch (const c10::Error& e) {
    std::cerr << "error loading the model\n";
    std::cerr << e.what();
    return -1;
  }
  std::vector<std::string> a1{"v nedrah tundry vydry v g+etrah t+yr9t v v1dra 9dra kedrov."};
  std::vector<std::string> a2{"v nedrah tundry vydry v getrah tyr9t v v1dra 9dra kedrov"};
  c10::optional<int64_t> o1;
  c10::List<c10::optional<int64_t>> a3{o1};
  c10::List<double> a4{1.0};
  // torch::jit::IValue list=torch::jit::IValue(v);
  std::vector<torch::jit::IValue> inputs;
  inputs.push_back(torch::jit::IValue(a1));
  inputs.push_back(torch::jit::IValue(a2));
  inputs.push_back(torch::jit::IValue(a3));
  inputs.push_back(torch::jit::IValue(a4));
  inputs.push_back(torch::jit::IValue(a4));
  inputs.push_back(torch::ones(1, torch::TensorOptions().dtype(torch::kInt32)));
  auto output = module.forward(inputs);
  auto output1 = output.toTuple()->elements()[0].toTensor();
  auto output2 = output.toTuple()->elements()[1].toTensor();
  std::cout << output1.sizes() << " " << output2[0].item().toInt() << "\n";
  std::cout << "ok\n";
}
1 Like