191 void NeuralModel::LoadModel(
char* filename,
int device) {
193 torch::jit::setTensorExprFuserEnabled(
false);
195 std::string filename_str(filename);
197 std::cout <<
"loading " << filename_str << std::endl;
200 Model->module = torch::jit::load(filename_str);
202 std::string cuda_str =
"cuda:" + std::to_string(device);
206 }
catch (
const c10::Error& e) {
208 std::cout <<
"Error loading model: " << e.msg() << std::endl;
211 std::cout <<
"loaded " << filename_str << std::endl;
219 void NeuralModel::Forward() {
221 std::vector<torch::jit::IValue> TorchInputs;
228 auto drv_inputs = torch::tensor(
229 {_input.steering, _input.throttle, _input.braking}, torch::kFloat32);
230 TorchInputs.push_back(drv_inputs);
232 if (_input.terrain_type >= 0) {
233 TorchInputs.push_back(_input.terrain_type);
236 TorchInputs.push_back(_input.verbose);
238 torch::jit::IValue Output;
241 Output = Model->module.forward(TorchInputs);
242 }
catch (
const c10::Error& e) {
244 std::cout <<
"Error running model: " << e.msg() << std::endl;
247 std::vector<torch::jit::IValue> Tensors = Output.toTuple()->elements();
250 Tensors[0].toTensor().cpu(), Tensors[4].toTensor().cpu() );
252 Tensors[1].toTensor().cpu(), Tensors[5].toTensor().cpu() );
254 Tensors[2].toTensor().cpu(), Tensors[6].toTensor().cpu() );
256 Tensors[3].toTensor().cpu(), Tensors[7].toTensor().cpu() );
259 void NeuralModel::ForwardDynamic() {
262 std::vector<torch::jit::IValue> TorchInputs;
267 auto drv_inputs = torch::tensor(
269 {_input.steering, _input.throttle, _input.braking}, torch::kFloat32);
271 TorchInputs.push_back(drv_inputs);
272 if (_input.terrain_type >= 0) {
273 TorchInputs.push_back(_input.terrain_type);
275 TorchInputs.push_back(_input.verbose);
277 torch::jit::IValue Output;
280 Output = Model->module.forward(TorchInputs);
281 }
catch (
const c10::Error& e) {
283 std::cout <<
"Error running model: " << e.msg() << std::endl;
286 std::vector<torch::jit::IValue> Tensors = Output.toTuple()->elements();
289 Tensors[0].toTensor().cpu(), Tensors[4].toTensor().cpu());
292 Tensors[1].toTensor().cpu(), Tensors[5].toTensor().cpu());
295 Tensors[2].toTensor().cpu(), Tensors[6].toTensor().cpu());
298 Tensors[3].toTensor().cpu(), Tensors[7].toTensor().cpu());
302 c10::cuda::CUDACachingAllocator::emptyCache();
306 void NeuralModel::ForwardCUDATensors()
309 std::vector<torch::jit::IValue> TorchInputs;
311 TorchInputs.push_back(Model->GetWheelTensorInputsCUDA(_input.wheel0, 0));
312 TorchInputs.push_back(Model->GetWheelTensorInputsCUDA(_input.wheel1, 1));
313 TorchInputs.push_back(Model->GetWheelTensorInputsCUDA(_input.wheel2, 2));
314 TorchInputs.push_back(Model->GetWheelTensorInputsCUDA(_input.wheel3, 3));
316 auto drv_inputs = torch::tensor(
317 {_input.steering, _input.throttle, _input.braking}, torch::kFloat32);
318 TorchInputs.push_back(drv_inputs.cuda());
320 if (_input.terrain_type >= 0) {
322 TorchInputs.push_back(_input.terrain_type);
324 TorchInputs.push_back(_input.verbose);
326 torch::jit::IValue Output;
329 Output = Model->module.forward(TorchInputs);
330 }
catch (
const c10::Error& e) {
331 std::cout <<
"Error running model: " << e.msg() << std::endl;
334 std::vector<torch::jit::IValue> Tensors = Output.toTuple()->elements();
337 Tensors[0].toTensor().cpu(), Tensors[4].toTensor().cpu() );
339 Tensors[1].toTensor().cpu(), Tensors[5].toTensor().cpu() );
341 Tensors[2].toTensor().cpu(), Tensors[6].toTensor().cpu() );
343 Tensors[3].toTensor().cpu(), Tensors[7].toTensor().cpu() );