diff --git a/examples/acasxu/acasxu.why b/examples/acasxu/acasxu.why
index 5c4a91ae13430797957885ee1049ec15c3ce1162..2d169bebfd74cfdcbbccf0c142758eb7c6aed350 100644
--- a/examples/acasxu/acasxu.why
+++ b/examples/acasxu/acasxu.why
@@ -12,7 +12,7 @@ theory ACASXU
   use int.Int
   use caisar.types.Vector
   use caisar.model.Model
-  use caisar.model.NN
+  
 
   (****************************************************************************)
   (* Meaningful names for indices of the input vector features. *)
@@ -43,7 +43,7 @@ theory ACASXU
   *)
 
   val constant model_filename: string
-  let constant nn: model nn = read_model model_filename
+  let constant nn: model = read_model model_filename
 
   (****************************************************************************)
   (* Utilities. *)
diff --git a/examples/mnist/mnist.why b/examples/mnist/mnist.why
index 0e7baa4a63b7ee94e209c294fca1965fe4911592..c4d0fb12cf7b25b2dc003077b4de93b02e2b2a64 100644
--- a/examples/mnist/mnist.why
+++ b/examples/mnist/mnist.why
@@ -4,7 +4,7 @@ theory MNIST
   use caisar.types.Float64WithBounds as Feature
   use caisar.types.IntWithBounds as Label
   use caisar.model.Model
-  use caisar.model.NN
+  
   use caisar.dataset.CSV
   use caisar.robust.ClassRobustCSV
 
diff --git a/lib/onnx/tests/dune b/lib/onnx/tests/dune
index 14925791b345e145bcc6b6891ad56d890e9a2606..eb789360f9b56ff0cbc9a0d01711bb0a2e80699a 100644
--- a/lib/onnx/tests/dune
+++ b/lib/onnx/tests/dune
@@ -1,6 +1,6 @@
 (tests
  (names print)
- (libraries caisar.onnx caisar.ir unix)
+ (libraries caisar.onnx caisar.nir unix)
  (deps
   ../../../examples/acasxu/nets/onnx/ACASXU_1_1.onnx
   ../../../tests/bin/inspect_onnx.py))
diff --git a/lib/onnx/tests/print.ml b/lib/onnx/tests/print.ml
index 166927b701182cfb950ea25e547757824c856886..a8f37b07730b4423e4eab195c6aaacfdb0b13b7e 100644
--- a/lib/onnx/tests/print.ml
+++ b/lib/onnx/tests/print.ml
@@ -7,13 +7,13 @@ let () =
 let temporary_file = "out/test.onnx"
 
 let () =
-  match Onnx.Simple.parse file with
+  match Onnx.Reader.from_file file with
   | Error s -> print_endline s
   | Ok { nir = Error s; _ } -> print_endline s
   | Ok { nir = Ok g; _ } -> (
     print_endline "ok";
-    Onnx.Simple.write g temporary_file;
-    match Onnx.Simple.parse temporary_file with
+    Onnx.Writer.to_file g temporary_file;
+    match Onnx.Reader.from_file temporary_file with
     | Error s -> print_endline s
     | Ok { nir = Error s; _ } -> print_endline s
     | Ok { nir = Ok _; _ } -> print_endline "ok")
diff --git a/src/interpretation/interpreter_theory.ml b/src/interpretation/interpreter_theory.ml
index 17876102066388f89dc8dfe24edaba4655050783..6f079ff1c9579000613adab0951dad4d0e780e98 100644
--- a/src/interpretation/interpreter_theory.ml
+++ b/src/interpretation/interpreter_theory.ml
@@ -35,13 +35,17 @@ let fail_on_unexpected_argument ls =
 module Vector = struct
   let (get : _ IRE.builtin) =
    fun engine ls vl ty ->
+    let th_model = Symbols.Model.create (IRE.user_env engine).ITypes.env in
     match vl with
     | [
      Term
-       ({ t_node = Tapp (_ (* @@ *), [ { t_node = Tapp (ls, _); _ }; _ ]); _ }
-       as _t1);
+       ({
+          t_node = Tapp (ls_atat (* @@ *), [ { t_node = Tapp (ls, _); _ }; _ ]);
+          _;
+        } as _t1);
      Term ({ t_node = Tconst (ConstInt i); _ } as t2);
-    ] -> (
+    ]
+      when Why3.Term.ls_equal ls_atat th_model.atat -> (
       let i = Why3.Number.to_small_integer i in
       if i < 0
       then
@@ -81,17 +85,14 @@ module Vector = struct
         Logging.code_error ~src:Logging.src_interpret_goal (fun m ->
           m "Unexpected interpreter operation %a" ITypes.pp_interpreter_op
             interpreter_op))
-    | [
-     Term ({ t_node = Tapp (ls1, tl1); _ } as t1);
-     Term ({ t_node = Tconst (ConstInt i); _ } as t2);
-    ] -> (
+    | [ Term t1; Term ({ t_node = Tconst (ConstInt i); _ } as t2) ] -> (
       let i = Why3.Number.to_small_integer i in
       if i < 0
       then
         Logging.user_error ?loc:t2.t_loc (fun m ->
           m "Index constant %d is negative" i);
-      match ITypes.op_of_ls engine ls1 with
-      | Dataset (_, DS_csv csv) ->
+      match ITypes.op_of_term engine t1 with
+      | Some (Dataset (_, DS_csv csv), _) ->
         let row = List.nth_exn csv i in
         let label, features =
           match row with
@@ -108,7 +109,7 @@ module Vector = struct
             Why3.(Term.t_int_const (BigInt.of_int (Int.of_string label))) )
         in
         IRE.value_term (Why3.Term.t_tuple [ t_label; t_features ])
-      | Vector v ->
+      | Some (Vector v, tl1) ->
         let n = Option.value_exn (Language.lookup_vector v) in
         if List.length tl1 <> n
         then
@@ -121,10 +122,11 @@ module Vector = struct
         else
           Logging.user_error ?loc:t1.t_loc (fun m ->
             m "Index constant %d is out-of-bounds [0,%d]" i (n - 1))
-      | interpreter_op ->
+      | Some (interpreter_op, _) ->
         Logging.code_error ~src:Logging.src_interpret_goal (fun m ->
           m "Unexpected interpreter operation %a" ITypes.pp_interpreter_op
-            interpreter_op))
+            interpreter_op)
+      | None -> IRE.reconstruct_term ())
     | [ Term _t1; Term _t2 ] -> IRE.reconstruct_term ()
     | _ -> fail_on_unexpected_argument ls
 
@@ -276,111 +278,31 @@ module Vector = struct
 end
 
 (* -------------------------------------------------------------------------- *)
-(* --- Model NN Builtins                                                      *)
-(* -------------------------------------------------------------------------- *)
-
-module NN = struct
-  let read_model : _ IRE.builtin =
-   fun engine ls vl ty ->
-    match vl with
-    | [ Term { t_node = Tconst (ConstStr neural_network); t_loc; _ } ] ->
-      let { ITypes.env; cwd; _ } = IRE.user_env engine in
-      let op =
-        let nn_ext = Stdlib.Filename.extension neural_network in
-        let nn_format, nn_type =
-          match String.lowercase nn_ext with
-          | ".nnet" -> (`NNet, ITypes.NNet)
-          | ".onnx" -> (`ONNX, ITypes.ONNX)
-          | _ ->
-            Logging.user_error ?loc:t_loc (fun m ->
-              m "Unrecognized neural network format '%s'" nn_ext)
-        in
-        let nn_filename = Stdlib.Filename.concat cwd neural_network in
-        let nn = Language.create_nn env nn_format nn_filename in
-        ITypes.Model (NN (nn, nn_type))
-      in
-      IRE.value_term (ITypes.term_of_op engine op ty)
-    | [ Term _t1; Term _t2 ] -> IRE.reconstruct_term ()
-    | _ -> fail_on_unexpected_argument ls
-
-  let apply : _ IRE.builtin =
-   fun engine ls vl _ty ->
-    match vl with
-    | [
-     Term ({ t_node = Tapp (ls1, []); _ } as t1);
-     Term ({ t_node = Tapp (ls2, tl2); _ } as t2);
-    ] -> (
-      match (ITypes.op_of_ls engine ls1, ITypes.op_of_ls engine ls2) with
-      | Model (NN (nn, _)), Vector v ->
-        let nn =
-          match Language.lookup_nn nn with
-          | None ->
-            Logging.code_error ~src:Logging.src_interpret_goal (fun m ->
-              m "Cannot find neural network model from lsymbol %a"
-                Why3.Pretty.print_ls nn)
-          | Some nn -> nn
-        in
-        let length_v =
-          match Language.lookup_vector v with
-          | None ->
-            Logging.code_error ~src:Logging.src_interpret_goal (fun m ->
-              m "Cannot find vector from lsymbol %a" Why3.Pretty.print_ls v)
-          | Some n ->
-            if List.length tl2 <> n
-            then
-              Logging.code_error ~src:Logging.src_interpret_goal (fun m ->
-                m
-                  "Mismatch between (container) vector length and number of \
-                   (contained) input variables.");
-            n
-        in
-        if nn.nn_nb_inputs <> length_v
-        then
-          Logging.user_error ?loc:t2.t_loc (fun m ->
-            m
-              "Unexpected vector of length %d in input to neural network model \
-               '%s',@ which expects input vectors of length %d"
-              length_v nn.nn_filename nn.nn_nb_inputs)
-        else IRE.reconstruct_term ()
-      | Model (SVM _), _ ->
-        (* Should be already catched by the Why3 typing. *)
-        assert false
-      | _, _ ->
-        Logging.user_error ?loc:t1.t_loc (fun m ->
-          m "Unexpected neural network model application"))
-    | _ -> fail_on_unexpected_argument ls
-
-  let builtins : _ IRE.built_in_theories =
-    ( [ "caisar"; "model" ],
-      "NN",
-      [],
-      [
-        ([ "read_model" ], None, read_model);
-        ([ Why3.Ident.op_infix "@@" ], None, apply);
-      ] )
-end
-
-(* -------------------------------------------------------------------------- *)
-(* --- Model SVM Builtins                                                     *)
+(* --- Model Builtins                                                      *)
 (* -------------------------------------------------------------------------- *)
 
-module SVM = struct
+module Model = struct
   let read_model : _ IRE.builtin =
    fun engine ls vl ty ->
     match vl with
-    | [ Term { t_node = Tconst (ConstStr svm); t_loc; _ } ] ->
+    | [ Term { t_node = Tconst (ConstStr model_filename); t_loc; _ } ] ->
       let { ITypes.env; cwd; _ } = IRE.user_env engine in
       let op =
-        let svm_ext = Stdlib.Filename.extension svm in
-        let svm_filename = Stdlib.Filename.concat cwd svm in
-        let svm =
-          match String.lowercase svm_ext with
-          | ".ovo" -> Language.create_svm env svm_filename
-          | _ ->
-            Logging.user_error ?loc:t_loc (fun m ->
-              m "Unrecognized SVM format '%s'" svm_ext)
+        let model_ext = Stdlib.Filename.extension model_filename in
+        let mk_nn nn_format nn_type =
+          let nn_filename = Stdlib.Filename.concat cwd model_filename in
+          let nn = Language.create_nn env nn_format nn_filename in
+          ITypes.Model (NN (nn, nn_type))
         in
-        ITypes.Model (SVM svm)
+        match String.lowercase model_ext with
+        | ".nnet" -> mk_nn `NNet ITypes.NNet
+        | ".onnx" -> mk_nn `ONNX ITypes.ONNX
+        | ".ovo" ->
+          let svm_filename = Stdlib.Filename.concat cwd model_filename in
+          ITypes.Model (SVM (Language.create_svm env svm_filename))
+        | _ ->
+          Logging.user_error ?loc:t_loc (fun m ->
+            m "Unrecognized model format '%s'" model_ext)
       in
       IRE.value_term (ITypes.term_of_op engine op ty)
     | [ Term _t1; Term _t2 ] -> IRE.reconstruct_term ()
@@ -420,54 +342,77 @@ module SVM = struct
     | _ -> fail_on_unexpected_argument ls
 
   let apply : _ IRE.builtin =
-   fun engine ls vl _ty ->
+   fun engine ls vl ty ->
     match vl with
-    | [
-     Term ({ t_node = Tapp (ls1, []); _ } as t1);
-     Term ({ t_node = Tapp (ls2, tl2); _ } as t2);
-    ] -> (
-      match (ITypes.op_of_ls engine ls1, ITypes.op_of_ls engine ls2) with
-      | Model (SVM svm), Vector v ->
-        let svm =
-          match Language.lookup_svm svm with
-          | None ->
-            Logging.code_error ~src:Logging.src_interpret_goal (fun m ->
-              m "Cannot find SVM model from lsymbol %a" Why3.Pretty.print_ls svm)
-          | Some svm -> svm
+    | [ Term t1; Term t2 ] -> (
+      match ITypes.op_of_term engine t1 with
+      | Some (Model model, []) ->
+        let { ITypes.env; _ } = IRE.user_env engine in
+        let ty_elt = (Symbols.Float64.create env).ty in
+        let nb_inputs, nb_outputs, filename =
+          match model with
+          | NN (nn, _) ->
+            let nn = Option.value_exn (Language.lookup_nn nn) in
+            (nn.nn_nb_inputs, nn.nn_nb_outputs, nn.nn_filename)
+          | SVM svm ->
+            let svm = Option.value_exn (Language.lookup_svm svm) in
+            (svm.svm_nb_inputs, svm.svm_nb_outputs, svm.svm_filename)
         in
-        let length_v =
-          match Language.lookup_vector v with
-          | None ->
-            Logging.code_error ~src:Logging.src_interpret_goal (fun m ->
-              m "Cannot find vector from lsymbol %a" Why3.Pretty.print_ls v)
-          | Some n ->
-            if List.length tl2 <> n
-            then
+        (match ITypes.op_of_term engine t2 with
+        | Some (Vector v, tl2) ->
+          let length_v =
+            match Language.lookup_vector v with
+            | None ->
               Logging.code_error ~src:Logging.src_interpret_goal (fun m ->
-                m
-                  "Mismatch between (container) vector length and number of \
-                   (contained) input variables.");
-            n
-        in
-        if svm.svm_nb_inputs <> length_v
+                m "Cannot find vector from lsymbol %a" Why3.Pretty.print_ls v)
+            | Some n ->
+              if List.length tl2 <> n
+              then
+                Logging.code_error ~src:Logging.src_interpret_goal (fun m ->
+                  m
+                    "Mismatch between (container) vector length and number of \
+                     (contained) input variables.");
+              n
+          in
+          if nb_inputs <> length_v
+          then
+            Logging.user_error ?loc:t2.t_loc (fun m ->
+              m
+                "Unexpected vector of length %d in input to model '%s',@ which \
+                 expects input vectors of length %d"
+                length_v filename nb_inputs)
+        | _ ->
+          ()
+          (* Logging.user_error ?loc:t1.t_loc (fun m -> m "Unexpected neural
+             network model application: %a" Why3.Pretty.print_term t2) *));
+        if false
         then
-          Logging.user_error ?loc:t2.t_loc (fun m ->
-            m
-              "Unexpected vector of length %d in input to SVM model '%s',@ \
-               which expects input vectors of length %d"
-              length_v svm.svm_filename svm.svm_nb_inputs)
+          let th = Why3.Env.read_theory env [ "caisar"; "types" ] "Vector" in
+          let get =
+            Why3.Theory.ns_find_ls th.th_export [ Why3.Ident.op_get "" ]
+          in
+          let t0 = Why3.Term.t_app ls [ t1; t2 ] ty in
+          let args =
+            List.init nb_outputs ~f:(fun i ->
+              ( Why3.Term.fs_app get
+                  [
+                    t0;
+                    Why3.Term.t_const
+                      (Why3.Constant.int_const_of_int i)
+                      Why3.Ty.ty_int;
+                  ]
+                  ty_elt,
+                ty_elt ))
+          in
+          let op = ITypes.Vector (Language.create_vector env nb_outputs) in
+          IRE.value_term (ITypes.term_of_op ~args engine op ty)
         else IRE.reconstruct_term ()
-      | Model (NN _), _ ->
-        (* Should be already catched by the Why3 typing. *)
-        assert false
-      | _, _ ->
-        Logging.user_error ?loc:t1.t_loc (fun m ->
-          m "Unexpected SVM model application"))
+      | _ -> IRE.reconstruct_term ())
     | _ -> fail_on_unexpected_argument ls
 
   let builtins : _ IRE.built_in_theories =
     ( [ "caisar"; "model" ],
-      "SVM",
+      "Model",
       [],
       [
         ([ "read_model" ], None, read_model);
@@ -549,4 +494,4 @@ module CSV = struct
       ] )
 end
 
-let builtins = [ Vector.builtins; NN.builtins; SVM.builtins; CSV.builtins ]
+let builtins = [ Vector.builtins; Model.builtins; CSV.builtins ]
diff --git a/src/interpretation/interpreter_types.ml b/src/interpretation/interpreter_types.ml
index d538870d9396179fb93f2178445c09437041b2bc..0e015b975d69ac2353dcefdeeea0b23935c10745 100644
--- a/src/interpretation/interpreter_types.ml
+++ b/src/interpretation/interpreter_types.ml
@@ -103,6 +103,14 @@ let term_of_op ?(args = []) engine interpreter_op ty =
   let t_args, ty_args = List.unzip args in
   Why3.Term.t_app_infer (ls_of_op engine interpreter_op ty_args ty) t_args
 
+let op_of_term engine t =
+  match t.Why3.Term.t_node with
+  | Tapp (ls, args) -> (
+    match op_of_ls engine ls with
+    | exception Stdlib.Not_found -> None
+    | v -> Some (v, args))
+  | _ -> None
+
 let interpreter_env ~cwd env =
   {
     ls_of_op = Hashtbl.Poly.create ();
diff --git a/src/interpretation/interpreter_types.mli b/src/interpretation/interpreter_types.mli
index 8eb1ce80f2233dfbc504662229ca5712c459f90b..2481c482ef4377a5bbbc43b2f0367729020e3174 100644
--- a/src/interpretation/interpreter_types.mli
+++ b/src/interpretation/interpreter_types.mli
@@ -54,6 +54,8 @@ type interpreter_env = private {
 
 val op_of_ls : interpreter_env IRE.engine -> Why3.Term.lsymbol -> interpreter_op
 
+val op_of_term : interpreter_env IRE.engine -> Why3.Term.term -> (interpreter_op * Why3.Term.term list) option
+
 val term_of_op :
   ?args:(Why3.Term.term * Why3.Ty.ty) list ->
   interpreter_env IRE.engine ->
diff --git a/src/language.ml b/src/language.ml
index 6f5e805fc6b6e85c027dfb683509707c31c96c19..7ee93c36974f8155b046c1f6a29c8ad767d11998 100644
--- a/src/language.ml
+++ b/src/language.ml
@@ -211,13 +211,9 @@ and nn_format =
 let nets = Term.Hls.create 10
 
 let fresh_nn_ls env name =
-  let ty_kind =
-    let th = Env.read_theory env [ "caisar"; "model" ] "NN" in
-    Ty.ty_app (Theory.ns_find_ts th.th_export [ "nn" ]) []
-  in
   let ty_model =
     let th = Env.read_theory env [ "caisar"; "model" ] "Model" in
-    Ty.ty_app (Theory.ns_find_ts th.th_export [ "model" ]) [ ty_kind ]
+    Ty.ty_app (Theory.ns_find_ts th.th_export [ "model" ]) []
   in
   let id = Ident.id_fresh name in
   Term.create_fsymbol id [] ty_model
@@ -316,13 +312,9 @@ let svm_abstraction_of_string s =
 let svms = Term.Hls.create 10
 
 let fresh_svm_ls env name =
-  let ty_kind =
-    let th = Env.read_theory env [ "caisar"; "model" ] "SVM" in
-    Ty.ty_app (Theory.ns_find_ts th.th_export [ "svm" ]) []
-  in
   let ty_model =
     let th = Env.read_theory env [ "caisar"; "model" ] "Model" in
-    Ty.ty_app (Theory.ns_find_ts th.th_export [ "model" ]) [ ty_kind ]
+    Ty.ty_app (Theory.ns_find_ts th.th_export [ "model" ]) []
   in
   let id = Ident.id_fresh name in
   Term.create_fsymbol id [] ty_model
diff --git a/src/transformations/native_nn_prover.ml b/src/transformations/native_nn_prover.ml
index b088f053b3cf4640e28188abdacd1bdfc8784ed2..0f8f05cc7210ae2928c029710bbf664c3fb5d236 100644
--- a/src/transformations/native_nn_prover.ml
+++ b/src/transformations/native_nn_prover.ml
@@ -45,11 +45,40 @@ let tempfile =
       Stdlib.Filename.concat dir (Fmt.str "caisar_%i.onnx" !c)
     | None -> Stdlib.Filename.temp_file "caisar" ".onnx"
 
+let match_nn_app th_model term =
+  match term.Why3.Term.t_node with
+  | Tapp
+      ( ls_get (* [ ] *),
+        [
+          {
+            t_node =
+              Why3.Term.Tapp
+                ( ls_atat (* @@ *),
+                  [
+                    { t_node = Tapp (ls_nn (* nn *), _); _ };
+                    {
+                      t_node = Tapp (ls (* input vector *), tl (* input vars *));
+                      _;
+                    };
+                  ] );
+            _;
+          };
+          { t_node = Tconst (ConstInt c); _ };
+        ] )
+    when String.equal ls_get.ls_name.id_string (Why3.Ident.op_get "")
+         && Why3.Term.ls_equal ls_atat th_model.Symbols.Model.atat -> (
+    match (Language.lookup_nn ls_nn, Language.lookup_vector ls) with
+    | Some ({ nn_nb_inputs; _ } as nn), Some vector_length ->
+      assert (nn_nb_inputs = vector_length && vector_length = List.length tl);
+      let c = Why3.Number.to_small_integer c in
+      Some (nn, tl, c)
+    | _, _ -> None)
+  | _ -> None
+
 let create_new_nn env input_vars outputs : string =
   let module IR = Nir in
   let th_f64 = Symbols.Float64.create env in
   let th_model = Symbols.Model.create env in
-  let th_nn = Symbols.NN.create env in
   let len_input = Why3.Term.Mls.cardinal input_vars in
   let input =
     IR.Node.create (Input { shape = IR.Shape.of_list [ len_input ] })
@@ -61,67 +90,80 @@ let create_new_nn env input_vars outputs : string =
       IR.Node.gather_int input i)
   in
   let cache = Why3.Term.Hterm.create 17 in
-  let nn_cache = Stdlib.Hashtbl.create 17 in
-  (* Instantiate the input of [old_nn] with the [old_nn_args] terms transformed
-     into nodes. *)
-  let rec convert_old_nn old_nn old_nn_args =
-    let converted_args = List.map ~f:convert_term old_nn_args in
-    let id =
-      ( old_nn.Language.nn_filename,
-        List.map converted_args ~f:(fun n -> n.Nir.Node.id) )
-    in
-    match Stdlib.Hashtbl.find_opt nn_cache id with
-    | None ->
-      let node_nn = convert_old_nn_aux old_nn converted_args in
-      Stdlib.Hashtbl.add nn_cache id node_nn;
-      node_nn
-    | Some node_nn -> node_nn
-  and convert_old_nn_aux old_nn converted_args =
+  (* Instantiate the input of [old_nn] with the [converted_arg] term transformed
+     into a node. *)
+  let convert_nn ?loc old_nn converted_arg =
+    if old_nn.Language.nn_nb_inputs
+       <> Nir.Shape.size converted_arg.Nir.Node.shape
+    then
+      Logging.user_error ?loc (fun m ->
+        m "Neural network applied with the wrong number of arguments");
     let old_nn_nir =
-      match Onnx.Reader.from_file old_nn.Language.nn_filename with
-      | Error s ->
-        Logging.code_error ~src (fun m ->
-          m "No parsed NN '%s': %s" old_nn.nn_filename s)
-      | Ok { nir = Error s; _ } ->
+      let nir_opt =
+        match old_nn.Language.nn_format with
+        | NNet nir_opt -> nir_opt
+        | ONNX nir_opt -> nir_opt
+      in
+      match nir_opt with
+      | None ->
         Logging.code_error ~src (fun m ->
-          m "No NIR available for NN '%s': %s" old_nn.nn_filename s)
-      | Ok { nir = Ok g; _ } -> g
+          m "No NIR available for NN '%s'" old_nn.nn_filename)
+      | Some g -> g
     in
     (* Create the graph to replace the old input of the nn *)
     let input () =
-      (* Regroup the terms into one node *)
-      let node =
-        IR.Node.create (Concat { inputs = converted_args; axis = 0 })
-      in
-      IR.Node.reshape (IR.Ngraph.input_shape old_nn_nir) node
+      let node = converted_arg in
+      Nir.Node.reshape (Nir.Ngraph.input_shape old_nn_nir) node
     in
     let out =
-      IR.Node.replace_input input (IR.Ngraph.output old_nn_nir)
+      IR.Node.replace_input input (Nir.Ngraph.output old_nn_nir)
       |> IR.Node.reshape (Nir.Shape.of_array [| old_nn.nn_nb_outputs |])
     in
     out
-  and convert_old_nn_at_old_index old_nn old_index old_nn_args =
-    let out = convert_old_nn old_nn old_nn_args in
-    let old_index = Why3.Number.to_small_integer old_index in
-    Nir.Node.gather_int out old_index
-  and convert_term term =
+  in
+  let rec convert_term term =
     match Why3.Term.Hterm.find_opt cache term with
     | None ->
       let n = convert_term_aux term in
       Why3.Term.Hterm.add cache term n;
       n
     | Some n -> n
-  and convert_term_aux term : IR.Node.t =
-    if not (Why3.Ty.ty_equal (Option.value_exn term.Why3.Term.t_ty) th_f64.ty)
+  and convert_term_aux term : Nir.Node.t =
+    let loc = term.Why3.Term.t_loc in
+    if false
+       && not
+            (Why3.Ty.ty_equal (Option.value_exn term.Why3.Term.t_ty) th_f64.ty)
     then
       Logging.user_error ?loc:term.t_loc (fun m ->
         m "Cannot convert non Float64 term %a" Why3.Pretty.print_term term);
     match term.Why3.Term.t_node with
+    | Tapp (ls_get (* [ ] *), [ t1; { t_node = Tconst (ConstInt c); _ } ])
+      when String.equal ls_get.ls_name.id_string (Why3.Ident.op_get "") ->
+      let c = Why3.Number.to_small_integer c in
+      let t1 = convert_term t1 in
+      if Nir.Shape.size t1.Nir.Node.shape <= c
+      then
+        Logging.user_error ?loc (fun m ->
+          m "Vector accessed outside its bounds");
+      Nir.Node.gather_int t1 c
+    | Tapp (ls_atat (* @@ *), [ { t_node = Tapp (ls_nn (* nn *), _); _ }; t2 ])
+      when Why3.Term.ls_equal ls_atat th_model.Symbols.Model.atat -> (
+      match Language.lookup_nn ls_nn with
+      | Some nn -> convert_nn ?loc nn (convert_term t2)
+      | _ ->
+        Logging.code_error ~src (fun m ->
+          m "Neural network application without fixed NN: %a"
+            Why3.Pretty.print_term term))
+    | Tapp (ls (* input vector *), tl (* input vars *))
+      when Language.mem_vector ls ->
+      let inputs = List.map tl ~f:convert_term in
+      IR.Node.create (Concat { inputs; axis = 0 })
     | Tconst (Why3.Constant.ConstReal r) ->
       IR.Node.create
         (Constant
            {
-             data = IR.Gentensor.create_1_float (Utils.float_of_real_constant r);
+             data =
+               Nir.Gentensor.create_1_float (Utils.float_of_real_constant r);
            })
     | Tapp (ls, []) -> get_input ls
     | Tapp (ls, [ _; a; b ]) when Why3.Term.ls_equal ls th_f64.add ->
@@ -140,36 +182,6 @@ let create_new_nn env input_vars outputs : string =
           (Div { input1 = convert_term a; input2 = convert_term b }))
     | Tapp (ls, [ a ]) when Why3.Term.ls_equal ls th_f64.neg ->
       Nir.Node.mul_float (convert_term a) (-1.)
-    | Tapp
-        ( ls_get (* [ ] *),
-          [
-            {
-              t_node =
-                Tapp
-                  ( ls_atat (* @@ *),
-                    [
-                      { t_node = Tapp (ls_nn (* nn *), _); _ };
-                      {
-                        t_node =
-                          Tapp (ls (* input vector *), tl (* input vars *));
-                        _;
-                      };
-                    ] );
-              _;
-            };
-            { t_node = Tconst (ConstInt old_index); _ };
-          ] )
-      when String.equal ls_get.ls_name.id_string (Why3.Ident.op_get "")
-           && (Why3.Term.ls_equal ls_atat th_model.atat
-              || Why3.Term.ls_equal ls_atat th_nn.atat) -> (
-      match (Language.lookup_nn ls_nn, Language.lookup_vector ls) with
-      | Some ({ nn_nb_inputs; _ } as old_nn), Some vector_length ->
-        assert (nn_nb_inputs = vector_length && vector_length = List.length tl);
-        convert_old_nn_at_old_index old_nn old_index tl
-      | _, _ ->
-        Logging.code_error ~src (fun m ->
-          m "Neural network application without fixed NN or arguments: %a"
-            Why3.Pretty.print_term term))
     | Tconst _
     | Tapp (_, _)
     | Tif (_, _, _)
@@ -201,41 +213,60 @@ let create_new_nn env input_vars outputs : string =
   Onnx.Writer.to_file nn filename;
   filename
 
+let check_if_new_nn_needed env input_vars outputs =
+  let th_model = Symbols.Model.create env in
+  let exception New_NN_Needed in
+  try
+    (* outputs must be direct application, inputs must be lsymbols *)
+    let outputs =
+      List.map outputs ~f:(fun (term, ls) ->
+        match match_nn_app th_model term with
+        | None -> raise New_NN_Needed
+        | Some (nn, tl, c) ->
+          let tl =
+            List.map tl ~f:(function
+              | { t_node = Why3.Term.Tapp (ls, []); _ } -> ls
+              | _ -> raise New_NN_Needed)
+          in
+          (ls, nn, tl, c))
+    in
+    match outputs with
+    | [] -> raise New_NN_Needed
+    | (_, nn, tl, _) :: _ ->
+      (* with the same nn and arguments *)
+      if not
+           (List.for_all outputs ~f:(fun (_, nn', tl', _) ->
+              String.equal nn'.Language.nn_filename nn.Language.nn_filename
+              && List.equal Why3.Term.ls_equal tl tl'))
+      then raise New_NN_Needed;
+      assert (Why3.Term.Sls.equal (Why3.Term.Sls.of_list tl) input_vars);
+      let outputs =
+        List.map outputs ~f:(fun (ls, _, _, index) -> (index, ls))
+      in
+      let compare (a, _) (b, _) = Int.compare a b in
+      let outputs = List.sort outputs ~compare in
+      assert (List.is_sorted_strictly outputs ~compare);
+      let indexed_input_vars =
+        List.mapi tl ~f:(fun i x -> (x, i)) |> Why3.Term.Mls.of_list
+      in
+      (nn.nn_filename, indexed_input_vars, outputs)
+  with New_NN_Needed ->
+    let _, indexed_input_vars =
+      Why3.Term.Mls.mapi_fold (fun _ _ i -> (i + 1, i)) input_vars 0
+    in
+    let outputs1 =
+      List.mapi outputs ~f:(fun index (term, _) -> { index; term })
+    in
+    let outputs2 = List.mapi outputs ~f:(fun index (_, ls) -> (index, ls)) in
+    (create_new_nn env indexed_input_vars outputs1, indexed_input_vars, outputs2)
+
 (* Choose the term pattern for starting the conversion to ONNX. *)
 let has_start_pattern env term =
-  let th_model = Symbols.Model.create env in
-  let th_nn = Symbols.NN.create env in
   let th_f = Symbols.Float64.create env in
   match term.Why3.Term.t_node with
-  | Tapp
-      ( ls_get (* [ ] *),
-        [
-          {
-            t_node =
-              Why3.Term.Tapp
-                ( ls_atat (* @@ *),
-                  [
-                    { t_node = Tapp (ls_nn (* nn *), _); _ };
-                    {
-                      t_node = Tapp (ls (* input vector *), tl (* input vars *));
-                      _;
-                    };
-                  ] );
-            _;
-          };
-          { t_node = Tconst (ConstInt _); _ };
-        ] )
-    when String.equal ls_get.ls_name.id_string (Why3.Ident.op_get "")
-         && (Why3.Term.ls_equal ls_atat th_model.Symbols.Model.atat
-            || Why3.Term.ls_equal ls_atat th_nn.Symbols.NN.atat) -> (
-    match (Language.lookup_nn ls_nn, Language.lookup_vector ls) with
-    | Some { nn_nb_inputs; _ }, Some vector_length ->
-      assert (nn_nb_inputs = vector_length && vector_length = List.length tl);
-      true
-    | _, _ ->
-      Logging.code_error ~src (fun m ->
-        m "Neural network application without fixed NN or arguments: %a"
-          Why3.Pretty.print_term term))
+  | Tapp (ls_get (* [ ] *), _)
+    when String.equal ls_get.ls_name.id_string (Why3.Ident.op_get "") ->
+    true
   | Tapp ({ ls_value = Some ty; _ }, []) ->
     (* input symbol *) Why3.Ty.ty_equal ty th_f.ty
   | Tapp (ls_app, _) ->
@@ -248,27 +279,25 @@ let has_start_pattern env term =
 let abstract_nn_term env =
   let th_float64 = Symbols.Float64.create env in
   let simplify_as_output_vars term =
-    let rec do_simplify ((new_index, output_vars) as acc) term =
+    let rec do_simplify output_vars term =
       if has_start_pattern env term
       then
-        let acc, ls_output_var =
+        let output_vars, ls_output_var =
           match Why3.Term.Mterm.find_opt term output_vars with
           | None ->
             let ls =
               Why3.Term.create_fsymbol (Why3.Ident.id_fresh "y") []
                 th_float64.ty
             in
-            let output_vars =
-              Why3.Term.Mterm.add term (new_index, ls) output_vars
-            in
-            ((new_index + 1, output_vars), ls)
-          | Some (_, ls) -> (acc, ls)
+            let output_vars = Why3.Term.Mterm.add term ls output_vars in
+            (output_vars, ls)
+          | Some ls -> (output_vars, ls)
         in
         let output_var = Why3.Term.fs_app ls_output_var [] th_float64.ty in
-        (acc, output_var)
-      else Why3.Term.t_map_fold do_simplify acc term
+        (output_vars, output_var)
+      else Why3.Term.t_map_fold do_simplify output_vars term
     in
-    let (_, output_vars), term = do_simplify (0, Why3.Term.Mterm.empty) term in
+    let output_vars, term = do_simplify Why3.Term.Mterm.empty term in
     (output_vars, term)
   in
   Why3.Trans.fold_map
@@ -302,37 +331,23 @@ let abstract_nn_term env =
         let output_vars, goal = simplify_as_output_vars goal in
         Logs.debug ~src:src_term (fun f ->
           f "new goal:%a" Why3.Pretty.print_term goal);
-        (* Add output variable declarations and corresponding meta. *)
-        let task =
-          Why3.Term.Mterm.fold_left
-            (fun task _ (index, output_var) ->
-              let task =
-                Why3.Task.add_meta task Meta.meta_output
-                  [ MAls output_var; MAint index ]
-              in
-              Why3.Task.add_param_decl task output_var)
-            task output_vars
-        in
-        let new_outputs =
-          Why3.Term.Mterm.fold_left
-            (fun acc term (index, _) -> { index; term } :: acc)
-            [] output_vars
-        in
+        let new_outputs = Why3.Term.Mterm.bindings output_vars in
         (* Collect all lsymbols appearing in terms simplified as output
            variables: at the moment, vector selection on a neural network
            application to an input vector (ie, (nn @@ v)[_]). *)
         let used_ls =
           List.fold
-            ~f:(fun acc new_output ->
+            ~f:(fun acc (new_output, _) ->
               Why3.Term.t_s_fold
                 (fun acc _ -> acc)
                 (fun acc ls -> Why3.Term.Sls.add ls acc)
-                acc new_output.term)
+                acc new_output)
             ~init:Why3.Term.Sls.empty new_outputs
         in
         let input_vars = Why3.Term.Sls.inter input_vars used_ls in
-        let _, indexed_input_vars =
-          Why3.Term.Mls.mapi_fold (fun _ _ i -> (i + 1, i)) input_vars 0
+        (* Create new ONNX and add the corresponding meta. *)
+        let nn_filename, indexed_input_vars, output_vars =
+          check_if_new_nn_needed env input_vars new_outputs
         in
         (* Add meta for input variable declarations. *)
         let task =
@@ -341,8 +356,18 @@ let abstract_nn_term env =
               Why3.Task.add_meta task Meta.meta_input [ MAls ls; MAint index ])
             task indexed_input_vars
         in
-        (* Create new ONNX and add the corresponding meta. *)
-        let nn_filename = create_new_nn env indexed_input_vars new_outputs in
+
+        (* Add output variable declarations and corresponding meta. *)
+        let task =
+          List.fold_left
+            ~f:(fun task (index, output_var) ->
+              let task =
+                Why3.Task.add_meta task Meta.meta_output
+                  [ MAls output_var; MAint index ]
+              in
+              Why3.Task.add_param_decl task output_var)
+            ~init:task output_vars
+        in
         let task =
           Why3.Task.add_meta task Meta.meta_nn_filename [ MAstr nn_filename ]
         in
@@ -357,115 +382,4 @@ let abstract_nn_term env =
       | _ -> (input_vars, Why3.Task.add_tdecl task tdecl))
     Why3.Term.Sls.empty None
 
-(* -------------------------------------------------------------------------- *)
-(* --- Old method                                                             *)
-(* -------------------------------------------------------------------------- *)
-
-(* Creates a list of pairs made of output variables and respective indices in
-   the list, for each neural network application to an input vector appearing in
-   a task. Such a list stands for the resulting output vector of a neural
-   network application to an input vector (ie, something of the form: nn @@ v).
-   The creation process is memoized wrt terms corresponding to neural network
-   applications to input vectors. *)
-let output_vars env =
-  let th_model = Symbols.Model.create env in
-  let th_nn = Symbols.NN.create env in
-  let rec create_output_var mt term =
-    match term.Why3.Term.t_node with
-    | Tapp (ls_atat (* @@ *), [ { t_node = Tapp (ls_nn (* nn *), _); _ }; _ ])
-      when Why3.Term.ls_equal ls_atat th_model.atat
-           || Why3.Term.ls_equal ls_atat th_nn.atat -> (
-      match Language.lookup_nn ls_nn with
-      | Some { nn_nb_outputs; nn_ty_elt; _ } ->
-        if Why3.Term.Mterm.mem term mt
-        then mt
-        else
-          let output_vars =
-            List.init nn_nb_outputs ~f:(fun index ->
-              ( index,
-                Why3.Term.create_fsymbol (Why3.Ident.id_fresh "y") [] nn_ty_elt
-              ))
-          in
-          Why3.Term.Mterm.add term output_vars mt
-      | None -> mt)
-    | _ -> Why3.Term.t_fold create_output_var mt term
-  in
-  Why3.Trans.fold_decl
-    (fun decl mt -> Why3.Decl.decl_fold create_output_var mt decl)
-    Why3.Term.Mterm.empty
-
-(* Simplifies a task goal exhibiting a vector selection on a neural network
-   application to an input vector (ie, (nn @@ v)[_]) by the corresponding output
-   variable. Morevoer, each input variable declaration is annotated with a meta
-   that describes the respective index in the input vector. Output variables are
-   all declared, each with a meta that describes the respective index in the
-   output vector. *)
-let simplify_nn_application env input_vars output_vars =
-  let th_model = Symbols.Model.create env in
-  let th_nn = Symbols.NN.create env in
-  let rec do_simplify (term : Why3.Term.term) =
-    match term.t_node with
-    | Why3.Term.Tapp
-        ( ls_get (* [_] *),
-          [
-            ({ t_node = Tapp (ls_atat (* @@ *), _); _ } as t1);
-            ({ t_node = Tconst (ConstInt index); _ } as _t2);
-          ] )
-      when String.equal ls_get.ls_name.id_string (Why3.Ident.op_get "")
-           && (Why3.Term.ls_equal ls_atat th_nn.atat
-              || Why3.Term.ls_equal ls_atat th_model.atat) -> (
-      match Why3.Term.Mterm.find_opt t1 output_vars with
-      | None -> Why3.Term.t_map do_simplify term
-      | Some output_vars ->
-        let index = Why3.Number.to_small_integer index in
-        assert (index < List.length output_vars);
-        let ls = Stdlib.List.assoc index output_vars in
-        Why3.Term.t_app_infer ls [])
-    | _ -> Why3.Term.t_map do_simplify term
-  in
-  Why3.Trans.fold
-    (fun task_hd task ->
-      match task_hd.task_decl.td_node with
-      | Decl { d_node = Dparam ls; _ } ->
-        (* Add meta for input variable declarations. Note that each meta needs
-           to appear before the corresponding declaration in order to be
-           leveraged by prover printers. *)
-        let task =
-          match Why3.Term.Mls.find_opt ls input_vars with
-          | None -> task
-          | Some index ->
-            Why3.Task.add_meta task Meta.meta_input [ MAls ls; MAint index ]
-        in
-        Why3.Task.add_tdecl task task_hd.task_decl
-      | Decl ({ d_node = Dprop (Pgoal, _, _); _ } as decl) ->
-        let decl = Why3.Decl.decl_map do_simplify decl in
-        let task =
-          (* Output variables are not declared yet in the task as they are
-             created on the fly for each (different) neural network application
-             on an input vector. We add here their declarations in the task. *)
-          Why3.Term.Mterm.fold
-            (fun _t output_vars task ->
-              (* Again, for each output variable, add the meta first, then its
-                 actual declaration. *)
-              List.fold output_vars ~init:task
-                ~f:(fun task (index, output_var) ->
-                let task =
-                  Why3.Task.add_meta task Meta.meta_output
-                    [ MAls output_var; MAint index ]
-                in
-                let decl = Why3.Decl.create_param_decl output_var in
-                Why3.Task.add_decl task decl))
-            output_vars task
-        in
-        Why3.Task.add_decl task decl
-      | Use _ | Clone _ | Meta _ | Decl _ ->
-        Why3.Task.add_tdecl task task_hd.task_decl)
-    None
-
-let trans =
-  Why3.Env.Wenv.memoize 10 (fun env ->
-    Why3.Trans.bind Utils.input_terms (function
-      | Utils.Others -> abstract_nn_term env
-      | Vars input_vars ->
-        Why3.Trans.bind (output_vars env) (fun output_vars ->
-          simplify_nn_application env input_vars output_vars)))
+let trans = Why3.Env.Wenv.memoize 10 abstract_nn_term
diff --git a/src/transformations/symbols.ml b/src/transformations/symbols.ml
index 9b6574034c15ca5c57c21ffa404940fcd9635024..059920b8660ede5334055079b0da705a78d62886 100644
--- a/src/transformations/symbols.ml
+++ b/src/transformations/symbols.ml
@@ -135,16 +135,3 @@ module Model = struct
 
   let create = Why3.Env.Wenv.memoize 10 (fun env -> create_t env)
 end
-
-module NN = struct
-  type t = { atat : Why3.Term.lsymbol (* NN.( @@ ) *) }
-
-  let create_t env =
-    let th = Why3.Env.read_theory env [ "caisar"; "model" ] "NN" in
-    let atat =
-      Why3.Theory.ns_find_ls th.th_export [ Why3.Ident.op_infix "@@" ]
-    in
-    { atat }
-
-  let create = Why3.Env.Wenv.memoize 10 (fun env -> create_t env)
-end
diff --git a/src/transformations/symbols.mli b/src/transformations/symbols.mli
index 7f32f6e3abb7c6924085128245302f90532f2c6a..12f52fe006d1f68495d6148b5c048ebc5718a42b 100644
--- a/src/transformations/symbols.mli
+++ b/src/transformations/symbols.mli
@@ -74,12 +74,6 @@ end
 (* -------------------------------------------------------------------------- *)
 
 module Model : sig
-  type t = private { atat : Why3.Term.lsymbol  (** Model.( \@\@ ) *) }
-
-  val create : Why3.Env.env -> t
-end
-
-module NN : sig
   type t = private { atat : Why3.Term.lsymbol  (** NN.( \@\@ ) *) }
 
   val create : Why3.Env.env -> t
diff --git a/src/transformations/utils.ml b/src/transformations/utils.ml
index 659bd88f41c9f3a9118448922700427e0c66f119..ad9e5e48a3b3b711c434b4415cb2d485d5f21882 100644
--- a/src/transformations/utils.ml
+++ b/src/transformations/utils.ml
@@ -35,19 +35,17 @@ let float_of_real_constant rc =
   if is_neg then Float.neg f else f
 
 let real_constant_of_float value =
-  let neg = Float.is_negative value in
-  let value = Fmt.str "%.64f" (Float.abs value) in
-  (* Split into integer and fractional parts. *)
-  let int_frac = String.split value ~on:'.' in
-  let int = List.hd_exn int_frac in
-  let frac =
-    match List.tl_exn int_frac with
-    | [] -> ""
-    | [ s ] -> s
-    | _ -> assert false (* Since every float has one '.' at most. *)
-  in
+  assert (Float.is_finite value);
+  let m, e = Float.frexp value in
+  (* put into the form m * 2^e, where m is an integer *)
+  let m, e = (Int.of_float (Float.ldexp m 53), e - 53) in
   Why3.Constant.ConstReal
-    (Why3.Number.real_literal ~radix:10 ~neg ~int ~frac ~exp:None)
+    {
+      rl_kind = RLitDec 0;
+      rl_real =
+        Why3.Number.real_value ~pow2:(Why3.BigInt.of_int e)
+          (Why3.BigInt.of_int m);
+    }
 
 let term_of_float env f =
   let th = Symbols.Float64.create env in
diff --git a/stdlib/caisar/model.mlw b/stdlib/caisar/model.mlw
index 8a07cb813a9b585eadd20dbd2993f791d8f02a0f..e73a0e3b61b10a234808d0105f555330601902b8 100644
--- a/stdlib/caisar/model.mlw
+++ b/stdlib/caisar/model.mlw
@@ -22,41 +22,24 @@
 
 (** {1 Models} *)
 
-(** {2 Generic Models} *)
-
 theory Model
 
+  use ieee_float.Float64
   use caisar.types.Vector
 
-  type kind
-  type model 'kind
-
-  val function read_model (n: string) : model 'kind
-  val function (@@) (m: model 'kind) (v: vector 'a) : vector 'a
-
-end
-
-(** {2 Neural Networks} *)
 
-theory NN
+  type model
 
-  type nn
-
-  use Model
-  clone export Model with type model 'kind = model nn
-
-end
+  type kind
 
-(** {2 Support Vector Machines} *)
+  val function read_model (n: string) : model 
+  val function (@@) (m: model) (v: vector t) : vector t
 
-theory SVM
 
-  type svm
+(** For SVM *)
+   
   type abstraction = Interval | Raf | Hybrid
+  function read_model_with_abstraction (n: string) (a: abstraction) : model
 
-  use Model
-  clone export Model with type kind = svm, type model 'kind = model svm
-
-  function read_model_with_abstraction (n: string) (a: abstraction) : model svm
 
 end
diff --git a/stdlib/caisar/robust.mlw b/stdlib/caisar/robust.mlw
index 747e0ab636f220a47243465dfd6c3ba742dabf5e..4c9741d5110863500999217affe7bc7865cfdcbc 100644
--- a/stdlib/caisar/robust.mlw
+++ b/stdlib/caisar/robust.mlw
@@ -36,7 +36,7 @@ theory ClassRobustVector
   predicate bounded_by_epsilon (e: FeatureVector.t) (eps: t) =
     forall i: index. valid_index e i -> .- eps .<= e[i] .<= eps
 
-  predicate advises (label_bounds: Label.bounds) (m: model 'm)
+  predicate advises (label_bounds: Label.bounds) (m: model)
                     (e: FeatureVector.t) (l: Label.t) =
     Label.valid label_bounds l ->
     forall j. Label.valid label_bounds j -> j <> l ->
@@ -44,7 +44,7 @@ theory ClassRobustVector
 
   predicate robust (feature_bounds: Feature.bounds)
                    (label_bounds: Label.bounds)
-                   (m: model 'm) (eps: t)
+                   (m: model) (eps: t)
                    (l: Label.t) (e: FeatureVector.t) =
     forall perturbed_e: FeatureVector.t.
       has_length perturbed_e (length e) ->
@@ -68,7 +68,7 @@ theory ClassRobustCSV
 
   predicate robust (feature_bounds: Feature.bounds)
                    (label_bounds: Label.bounds)
-                   (m: model 'a) (d: dataset) (eps: t) =
+                   (m: model) (d: dataset) (eps: t) =
     CSV.forall_ d (ClassRobustVector.robust feature_bounds label_bounds m eps)
 
 end
diff --git a/tests/acasxu.t b/tests/acasxu.t
index a80abb726d3ebe8b28c2ee96bf7c3c459f0a9b04..4889b82f6e24f5c591335b2c16b1114eb5483da2 100644
--- a/tests/acasxu.t
+++ b/tests/acasxu.t
@@ -10,12 +10,12 @@ Test verify on acasxu
   >   use int.Int
   >   use caisar.types.Vector
   >   use caisar.model.Model
-  >   use caisar.model.NN
+  >  
   > 
-  >   constant nn: model nn = read_model "TestNetwork.nnet"
-  >   let constant nn_onnx: model nn = read_model "../examples/acasxu/nets/onnx/ACASXU_1_1.onnx"
-  >   let constant nn_onnx_1_1: model nn = read_model "../examples/acasxu/nets/onnx/ACASXU_1_1.onnx"
-  >   let constant nn_onnx_1_9: model nn = read_model "../examples/acasxu/nets/onnx/ACASXU_1_9.onnx"
+  >   constant nn: model = read_model "TestNetwork.nnet"
+  >   let constant nn_onnx: model = read_model "../examples/acasxu/nets/onnx/ACASXU_1_1.onnx"
+  >   let constant nn_onnx_1_1: model = read_model "../examples/acasxu/nets/onnx/ACASXU_1_1.onnx"
+  >   let constant nn_onnx_1_9: model = read_model "../examples/acasxu/nets/onnx/ACASXU_1_9.onnx"
   > 
   >   type input = vector t
   > 
@@ -44,7 +44,7 @@ Test verify on acasxu
   > 
   >   predicate valid_action (a: action) = clear_of_conflict <= a <= strong_right
   > 
-  >   predicate advises (n: model nn) (i: input) (a: action) =
+  >   predicate advises (n: model) (i: input) (a: action) =
   >     valid_action a ->
   >     forall b: action.
   >       valid_action b ->  a <> b -> (n@@i)[a] .< (n@@i)[b]
@@ -263,22 +263,21 @@ Test verify on acasxu
      [0] (373.9499200000000200816430151462554931640625:t))
     (7.51888402010059753166615337249822914600372314453125:t))
    (1500.0:t)
-  nn_onnx1,
+  nn_onnx,
   (Interpreter_types.Model
      (Interpreter_types.ONNX, { Language.nn_nb_inputs = 5; nn_nb_outputs = 5;
                                 nn_ty_elt = t;
                                 nn_filename =
-                                "./../examples/acasxu/nets/onnx/ACASXU_1_9.onnx";
+                                "./../examples/acasxu/nets/onnx/ACASXU_1_1.onnx";
                                 nn_format = <nir> }))
-  nn_onnx,
+  vector, 5
+  nn_onnx1,
   (Interpreter_types.Model
      (Interpreter_types.ONNX, { Language.nn_nb_inputs = 5; nn_nb_outputs = 5;
                                 nn_ty_elt = t;
                                 nn_filename =
-                                "./../examples/acasxu/nets/onnx/ACASXU_1_1.onnx";
+                                "./../examples/acasxu/nets/onnx/ACASXU_1_9.onnx";
                                 nn_format = <nir> }))
-  vector,
-  5
   [DEBUG]{NN-Gen-Term} new goal:le y1 (1500.0:t) /\ le y2 (1500.0:t)
   [DEBUG]{ProverSpec} Prover-tailored specification:
   0.0 <= x0
@@ -368,7 +367,7 @@ Test verify on acasxu
   55947.6909999999988940544426441192626953125 <= x0
   1145.0 <= x3
   x4 <= 60.0
-  y0 <= y1 and y1 <= y0
+  y1 <= y0 and y0 <= y1
   
   [DEBUG]{InterpretGoal} Interpreted formula for goal 'run2v2'vc':
   forall x:t, x1:t, x2:t, x3:t, x4:t, x5:t, x6:t, x7:t, x8:t, x9:t.
@@ -447,7 +446,7 @@ Test verify on acasxu
   55947.6909999999988940544426441192626953125 <= x5
   1145.0 <= x8
   x9 <= 60.0
-  y0 <= y1 and y1 <= y0
+  y1 <= y0 and y0 <= y1
   
   [DEBUG]{InterpretGoal} Interpreted formula for goal 'run3'vc':
   forall x:t, x1:t, x2:t, x3:t, x4:t, eps:t.
@@ -648,6 +647,7 @@ Test verify on acasxu
                                 nn_ty_elt = t;
                                 nn_filename = "./TestNetwork.nnet";
                                 nn_format = <nir> }))
+  [DEBUG]{NN-Gen-Term} new goal:le y10 (1500.0:t)
   [DEBUG]{ProverSpec} Prover-tailored specification:
   -0.328421367053318091766556108268559910356998443603515625 <= x0
   x0 <= 0.67985927880386987087746319957659579813480377197265625
@@ -662,7 +662,7 @@ Test verify on acasxu
   0.60000151009774149724051994780893437564373016357421875 <= x0
   0.450000000000000011102230246251565404236316680908203125 <= x3
   x4 <= -0.450000000000000011102230246251565404236316680908203125
-  y0 <= 3.991125645861615112153231166303157806396484375
+  y0 <= 1500.0
   
   [DEBUG]{InterpretGoal} Interpreted formula for goal 'P3':
   forall x:t, x1:t, x2:t, x3:t, x4:t.
@@ -722,6 +722,8 @@ Test verify on acasxu
                                 nn_ty_elt = t;
                                 nn_filename = "./TestNetwork.nnet";
                                 nn_format = <nir> }))
+  [DEBUG]{NN-Gen-Term} new goal:not (((lt y11 y12 /\ lt y11 y13) /\ lt y11 y14) /\
+                                     lt y11 y15)
   [DEBUG]{ProverSpec} Prover-tailored specification:
   -0.328421367053318091766556108268559910356998443603515625 <= x0
   x0 <= 0.67985927880386987087746319957659579813480377197265625
@@ -874,15 +876,15 @@ Test verify on acasxu
   (assert (>= X_3 1145.0))
   (assert (<= X_4 60.0))
   
-  ;; Y_1
-  (declare-const Y_1 Real)
-  
   ;; Y_0
   (declare-const Y_0 Real)
   
+  ;; Y_1
+  (declare-const Y_1 Real)
+  
   ;; Goal run2'vc
-  (assert (or (and (>= Y_0 Y_1))
-              (and (>= Y_1 Y_0))
+  (assert (or (and (>= Y_1 Y_0))
+              (and (>= Y_0 Y_1))
               ))
   
   [DEBUG]{ProverSpec} Prover-tailored specification:
@@ -940,15 +942,15 @@ Test verify on acasxu
   (assert (>= X_8 1145.0))
   (assert (<= X_9 60.0))
   
-  ;; Y_1
-  (declare-const Y_1 Real)
-  
   ;; Y_0
   (declare-const Y_0 Real)
   
+  ;; Y_1
+  (declare-const Y_1 Real)
+  
   ;; Goal run2v2'vc
-  (assert (or (and (>= Y_0 Y_1))
-              (and (>= Y_1 Y_0))
+  (assert (or (and (>= Y_1 Y_0))
+              (and (>= Y_0 Y_1))
               ))
   
   [DEBUG]{ProverSpec} Prover-tailored specification:
@@ -1113,20 +1115,8 @@ Test verify on acasxu
   ;; Y_0
   (declare-const Y_0 Real)
   
-  ;; Y_1
-  (declare-const Y_1 Real)
-  
-  ;; Y_2
-  (declare-const Y_2 Real)
-  
-  ;; Y_3
-  (declare-const Y_3 Real)
-  
-  ;; Y_4
-  (declare-const Y_4 Real)
-  
   ;; Goal P1
-  (assert (>= Y_0 3.991125645861615112153231166303157806396484375))
+  (assert (>= Y_0 1500.0))
   
   [DEBUG]{ProverSpec} Prover-tailored specification:
   ;;; produced by PyRAT/VNN-LIB driver
@@ -1290,7 +1280,7 @@ Test verify on acasxu
   x0 >= 55947.6909999999988940544426441192626953125
   x3 >= 1145.0
   x4 <= 60.0
-  +y0 -y1 >= 0
+  +y1 -y0 >= 0
   
   [DEBUG]{ProverSpec} Prover-tailored specification:
   x0 >= 0.0
@@ -1306,7 +1296,7 @@ Test verify on acasxu
   x0 >= 55947.6909999999988940544426441192626953125
   x3 >= 1145.0
   x4 <= 60.0
-  +y1 -y0 >= 0
+  +y0 -y1 >= 0
   
   [DEBUG]{ProverSpec} Prover-tailored specification:
   x0 >= 0.0
@@ -1325,7 +1315,7 @@ Test verify on acasxu
   x5 >= 55947.6909999999988940544426441192626953125
   x8 >= 1145.0
   x9 <= 60.0
-  +y0 -y1 >= 0
+  +y1 -y0 >= 0
   
   [DEBUG]{ProverSpec} Prover-tailored specification:
   x0 >= 0.0
@@ -1344,7 +1334,7 @@ Test verify on acasxu
   x5 >= 55947.6909999999988940544426441192626953125
   x8 >= 1145.0
   x9 <= 60.0
-  +y1 -y0 >= 0
+  +y0 -y1 >= 0
   
   [DEBUG]{ProverSpec} Prover-tailored specification:
   x0 >= 0.0
@@ -1426,7 +1416,7 @@ Test verify on acasxu
   x0 >= 0.60000151009774149724051994780893437564373016357421875
   x3 >= 0.450000000000000011102230246251565404236316680908203125
   x4 <= -0.450000000000000011102230246251565404236316680908203125
-  y0 >= 3.991125645861615112153231166303157806396484375
+  y0 >= 1500.0
   
   [DEBUG]{ProverSpec} Prover-tailored specification:
   x0 >= -0.328421367053318091766556108268559910356998443603515625
diff --git a/tests/acasxu_ci.t b/tests/acasxu_ci.t
index b9b6cb3c669275ed2e370bf90f2a2fa7fe903e62..b4ea0da485646e51a2bf55c649ad043cd9b61980 100644
--- a/tests/acasxu_ci.t
+++ b/tests/acasxu_ci.t
@@ -13,10 +13,10 @@ Test verify on acasxu
   >   use int.Int
   >   use caisar.types.Vector
   >   use caisar.model.Model
-  >   use caisar.model.NN
+  >  
   > 
-  >   let constant nn: model nn = read_model "../examples/acasxu/nets/onnx/ACASXU_1_1.onnx"
-  >   let constant nn_: model nn = read_model "../examples/acasxu/nets/onnx/ACASXU_1_9.onnx"
+  >   let constant nn: model = read_model "../examples/acasxu/nets/onnx/ACASXU_1_1.onnx"
+  >   let constant nn_: model = read_model "../examples/acasxu/nets/onnx/ACASXU_1_9.onnx"
   > 
   >   type input = vector t
   > 
@@ -325,7 +325,7 @@ Test verify on acasxu
   55947.6909999999988940544426441192626953125 <= x0
   1145.0 <= x3
   x4 <= 60.0
-  y0 <= y1 and y1 <= y0
+  y1 <= y0 and y0 <= y1
   
   [DEBUG]{InterpretGoal} Interpreted formula for goal 'run3'vc':
   forall x:t, x1:t, x2:t, x3:t, x4:t, eps:t.
@@ -650,15 +650,15 @@ Test verify on acasxu
   (assert (>= X_3 1145.0))
   (assert (<= X_4 60.0))
   
-  ;; Y_1
-  (declare-const Y_1 Real)
-  
   ;; Y_0
   (declare-const Y_0 Real)
   
+  ;; Y_1
+  (declare-const Y_1 Real)
+  
   ;; Goal run2'vc
-  (assert (or (and (>= Y_0 Y_1))
-              (and (>= Y_1 Y_0))
+  (assert (or (and (>= Y_1 Y_0))
+              (and (>= Y_0 Y_1))
               ))
   
   [DEBUG]{ProverSpec} Prover-tailored specification:
@@ -847,7 +847,7 @@ Test verify on acasxu
   x0 >= 55947.6909999999988940544426441192626953125
   x3 >= 1145.0
   x4 <= 60.0
-  +y0 -y1 >= 0
+  +y1 -y0 >= 0
   
   [DEBUG]{ProverSpec} Prover-tailored specification:
   x0 >= 0.0
@@ -863,7 +863,7 @@ Test verify on acasxu
   x0 >= 55947.6909999999988940544426441192626953125
   x3 >= 1145.0
   x4 <= 60.0
-  +y1 -y0 >= 0
+  +y0 -y1 >= 0
   
   [DEBUG]{ProverSpec} Prover-tailored specification:
   x0 >= 0.0
@@ -934,11 +934,11 @@ Test verify on acasxu
   out/caisar_0.onnx has 1 input nodes
   {'name': '38', 'type': {'tensorType': {'elemType': 1, 'shape': {'dim': [{'dimValue': '5'}]}}}}
   out/caisar_1.onnx has 1 input nodes
-  {'name': '221', 'type': {'tensorType': {'elemType': 1, 'shape': {'dim': [{'dimValue': '5'}]}}}}
+  {'name': '183', 'type': {'tensorType': {'elemType': 1, 'shape': {'dim': [{'dimValue': '5'}]}}}}
   out/caisar_2.onnx has 1 input nodes
-  {'name': '440', 'type': {'tensorType': {'elemType': 1, 'shape': {'dim': [{'dimValue': '5'}]}}}}
+  {'name': '325', 'type': {'tensorType': {'elemType': 1, 'shape': {'dim': [{'dimValue': '5'}]}}}}
   out/caisar_3.onnx has 1 input nodes
-  {'name': '588', 'type': {'tensorType': {'elemType': 1, 'shape': {'dim': [{'dimValue': '6'}]}}}}
+  {'name': '435', 'type': {'tensorType': {'elemType': 1, 'shape': {'dim': [{'dimValue': '6'}]}}}}
   out/caisar_4.onnx has 1 input nodes
-  {'name': '815', 'type': {'tensorType': {'elemType': 1, 'shape': {'dim': [{'dimValue': '5'}]}}}}
+  {'name': '586', 'type': {'tensorType': {'elemType': 1, 'shape': {'dim': [{'dimValue': '5'}]}}}}
   5 files checked
diff --git a/tests/cvc5.t b/tests/cvc5.t
index 5d97a193ce917c079617fdaaf277c65252cecbf0..ce6c0a686b5d60a552f1dcff0411520ad7c95d1a 100644
--- a/tests/cvc5.t
+++ b/tests/cvc5.t
@@ -9,9 +9,9 @@ Test verify
   >   use ieee_float.Float64
   >   use caisar.types.Vector
   >   use caisar.model.Model
-  >   use caisar.model.NN
+  >  
   > 
-  >   constant nn: model nn = read_model "TestNetworkONNX.onnx"
+  >   constant nn: model = read_model "TestNetworkONNX.onnx"
   > 
   >   goal G:
   >     forall i: vector t.
@@ -31,38 +31,6 @@ Test verify
   ;;; SMT-LIB2: real arithmetic
   (define-fun fp.isFinite64 ((x Float64)) Bool (not (or (fp.isInfinite x) (fp.isNaN x))))
   (define-fun fp.isIntegral64 ((x Float64)) Bool (or (fp.isZero x) (and (fp.isNormal x) (= x (fp.roundToIntegral RNE x)))))
-  (declare-sort uni 0)
-  
-  (declare-sort ty 0)
-  
-  ;; "sort"
-  (declare-fun sort (ty
-    uni) Bool)
-  
-  ;; "witness"
-  (declare-fun witness (ty) uni)
-  
-  ;; "witness_sort"
-  (assert (forall ((a ty)) (sort a (witness a))))
-  
-  ;; "int"
-  (declare-fun int () ty)
-  
-  ;; "real"
-  (declare-fun real () ty)
-  
-  ;; "t"
-  (declare-fun t () ty)
-  
-  ;; "mode"
-  (declare-fun mode () ty)
-  
-  ;; "model"
-  (declare-fun model1 (ty) ty)
-  
-  ;; "nn"
-  (declare-fun nn () ty)
-  
   ;; "add_div"
   (assert
     (forall ((x Real) (y Real) (z Real))
@@ -109,10 +77,8 @@ Test verify
   (declare-fun max_int () Int)
   
   ;; "sqr"
-  (declare-fun sqr (Real) Real)
-  
-  ;; "sqr'def"
-  (assert (forall ((x Real)) (= (sqr x) (* x x))))
+  (define-fun sqr ((x Real)) Real
+    (* x x))
   
   ;; "sqrt"
   (declare-fun sqrt1 (Real) Real)
@@ -139,20 +105,15 @@ Test verify
       (=> (and (<= 0.0 x) (<= x y)) (<= (sqrt1 x) (sqrt1 y)))))
   
   ;; "relu"
-  (declare-fun relu (Float64) Float64)
+  (define-fun relu ((a Float64)) Float64
+    (ite (fp.lt (fp #b0 #b00000000000 #b0000000000000000000000000000000000000000000000000000) a)
+      a
+      (fp #b0 #b00000000000 #b0000000000000000000000000000000000000000000000000000)))
   
-  ;; "relu'def"
-  (assert
-    (forall ((a Float64))
-      (ite (fp.lt (fp #b0 #b00000000000 #b0000000000000000000000000000000000000000000000000000) a)
-        (= (relu a) a)
-        (= (relu a) (fp #b0 #b00000000000 #b0000000000000000000000000000000000000000000000000000)))))
+  (declare-sort model1 0)
   
   ;; "nn_onnx"
-  (declare-fun nn_onnx () uni)
-  
-  ;; "nn_onnx_sort"
-  (assert (sort (model1 nn) nn_onnx))
+  (declare-fun nn_onnx () model1)
   
   ;; "x"
   (declare-fun x () Float64)
@@ -173,184 +134,114 @@ Test verify
     (fp.leq x (fp #b0 #b01111111110 #b0000000000000000000000000000000000000000000000000000)))
   
   ;; "n0_0"
-  (declare-fun n0_0 () Float64)
-  
-  ;; "n0_0'def"
-  (assert (= n0_0 x))
+  (define-fun n0_0 () Float64
+    x)
   
   ;; "n4_0"
-  (declare-fun n4_0 () Float64)
-  
-  ;; "n4_0'def"
-  (assert
-    (= n4_0 (fp #b1 #b01111111100 #b0010110110101110110001100000000000000000000000000000)))
+  (define-fun n4_0 () Float64
+    (fp #b1 #b01111111100 #b0010110110101110110001100000000000000000000000000000))
   
   ;; "n0_1"
-  (declare-fun n0_1 () Float64)
-  
-  ;; "n0_1'def"
-  (assert (= n0_1 x1))
+  (define-fun n0_1 () Float64
+    x1)
   
   ;; "n4_2"
-  (declare-fun n4_2 () Float64)
-  
-  ;; "n4_2'def"
-  (assert
-    (= n4_2 (fp #b0 #b01111111100 #b0001010101101010011100100000000000000000000000000000)))
+  (define-fun n4_2 () Float64
+    (fp #b0 #b01111111100 #b0001010101101010011100100000000000000000000000000000))
   
   ;; "n0_2"
-  (declare-fun n0_2 () Float64)
-  
-  ;; "n0_2'def"
-  (assert (= n0_2 x2))
+  (define-fun n0_2 () Float64
+    x2)
   
   ;; "n4_4"
-  (declare-fun n4_4 () Float64)
-  
-  ;; "n4_4'def"
-  (assert
-    (= n4_4 (fp #b1 #b01111111100 #b0001110001000101101000100000000000000000000000000000)))
+  (define-fun n4_4 () Float64
+    (fp #b1 #b01111111100 #b0001110001000101101000100000000000000000000000000000))
   
   ;; "n5_0"
-  (declare-fun n5_0 () Float64)
-  
-  ;; "n5_0'def"
-  (assert
-    (= n5_0 (fp.add RNE (fp.add RNE (fp.mul RNE n0_0 n4_0) (fp.mul RNE 
-    n0_1 n4_2)) (fp.mul RNE n0_2 n4_4))))
+  (define-fun n5_0 () Float64
+    (fp.add RNE (fp.add RNE (fp.mul RNE n0_0 n4_0) (fp.mul RNE n0_1 n4_2)) (fp.mul RNE 
+    n0_2 n4_4)))
   
   ;; "n3_0"
-  (declare-fun n3_0 () Float64)
-  
-  ;; "n3_0'def"
-  (assert
-    (= n3_0 (fp #b0 #b01111111101 #b0110010010001010001001100000000000000000000000000000)))
+  (define-fun n3_0 () Float64
+    (fp #b0 #b01111111101 #b0110010010001010001001100000000000000000000000000000))
   
   ;; "n6_0"
-  (declare-fun n6_0 () Float64)
-  
-  ;; "n6_0'def"
-  (assert (= n6_0 (fp.add RNE n5_0 n3_0)))
+  (define-fun n6_0 () Float64
+    (fp.add RNE n5_0 n3_0))
   
   ;; "n7_0"
-  (declare-fun n7_0 () Float64)
-  
-  ;; "n7_0'def"
-  (assert (= n7_0 (relu n6_0)))
+  (define-fun n7_0 () Float64
+    (relu n6_0))
   
   ;; "n2_1"
-  (declare-fun n2_1 () Float64)
-  
-  ;; "n2_1'def"
-  (assert
-    (= n2_1 (fp #b0 #b01111111101 #b1101010100101011101010000000000000000000000000000000)))
+  (define-fun n2_1 () Float64
+    (fp #b0 #b01111111101 #b1101010100101011101010000000000000000000000000000000))
   
   ;; "n4_1"
-  (declare-fun n4_1 () Float64)
-  
-  ;; "n4_1'def"
-  (assert
-    (= n4_1 (fp #b0 #b01111111101 #b0001111010001101100111100000000000000000000000000000)))
+  (define-fun n4_1 () Float64
+    (fp #b0 #b01111111101 #b0001111010001101100111100000000000000000000000000000))
   
   ;; "n4_3"
-  (declare-fun n4_3 () Float64)
-  
-  ;; "n4_3'def"
-  (assert
-    (= n4_3 (fp #b1 #b01111111101 #b0101111011101101101001100000000000000000000000000000)))
+  (define-fun n4_3 () Float64
+    (fp #b1 #b01111111101 #b0101111011101101101001100000000000000000000000000000))
   
   ;; "n4_5"
-  (declare-fun n4_5 () Float64)
-  
-  ;; "n4_5'def"
-  (assert
-    (= n4_5 (fp #b1 #b01111111100 #b1000001001110010000011000000000000000000000000000000)))
+  (define-fun n4_5 () Float64
+    (fp #b1 #b01111111100 #b1000001001110010000011000000000000000000000000000000))
   
   ;; "n5_1"
-  (declare-fun n5_1 () Float64)
-  
-  ;; "n5_1'def"
-  (assert
-    (= n5_1 (fp.add RNE (fp.add RNE (fp.mul RNE n0_0 n4_1) (fp.mul RNE 
-    n0_1 n4_3)) (fp.mul RNE n0_2 n4_5))))
+  (define-fun n5_1 () Float64
+    (fp.add RNE (fp.add RNE (fp.mul RNE n0_0 n4_1) (fp.mul RNE n0_1 n4_3)) (fp.mul RNE 
+    n0_2 n4_5)))
   
   ;; "n3_1"
-  (declare-fun n3_1 () Float64)
-  
-  ;; "n3_1'def"
-  (assert
-    (= n3_1 (fp #b1 #b01111111110 #b0001011011111110000011100000000000000000000000000000)))
+  (define-fun n3_1 () Float64
+    (fp #b1 #b01111111110 #b0001011011111110000011100000000000000000000000000000))
   
   ;; "n6_1"
-  (declare-fun n6_1 () Float64)
-  
-  ;; "n6_1'def"
-  (assert (= n6_1 (fp.add RNE n5_1 n3_1)))
+  (define-fun n6_1 () Float64
+    (fp.add RNE n5_1 n3_1))
   
   ;; "n7_1"
-  (declare-fun n7_1 () Float64)
-  
-  ;; "n7_1'def"
-  (assert (= n7_1 (relu n6_1)))
+  (define-fun n7_1 () Float64
+    (relu n6_1))
   
   ;; "n2_3"
-  (declare-fun n2_3 () Float64)
-  
-  ;; "n2_3'def"
-  (assert
-    (= n2_3 (fp #b0 #b01111111100 #b1101010111101000110000100000000000000000000000000000)))
+  (define-fun n2_3 () Float64
+    (fp #b0 #b01111111100 #b1101010111101000110000100000000000000000000000000000))
   
   ;; "n8_1"
-  (declare-fun n8_1 () Float64)
-  
-  ;; "n8_1'def"
-  (assert (= n8_1 (fp.add RNE (fp.mul RNE n7_0 n2_1) (fp.mul RNE n7_1 n2_3))))
+  (define-fun n8_1 () Float64
+    (fp.add RNE (fp.mul RNE n7_0 n2_1) (fp.mul RNE n7_1 n2_3)))
   
   ;; "n1_1"
-  (declare-fun n1_1 () Float64)
-  
-  ;; "n1_1'def"
-  (assert
-    (= n1_1 (fp #b1 #b01111111110 #b0011101101010011010001100000000000000000000000000000)))
+  (define-fun n1_1 () Float64
+    (fp #b1 #b01111111110 #b0011101101010011010001100000000000000000000000000000))
   
   ;; "n9_1"
-  (declare-fun n9_1 () Float64)
-  
-  ;; "n9_1'def"
-  (assert (= n9_1 (fp.add RNE n8_1 n1_1)))
+  (define-fun n9_1 () Float64
+    (fp.add RNE n8_1 n1_1))
   
   ;; "n2_0"
-  (declare-fun n2_0 () Float64)
-  
-  ;; "n2_0'def"
-  (assert
-    (= n2_0 (fp #b1 #b01111111101 #b0111110011110010111010000000000000000000000000000000)))
+  (define-fun n2_0 () Float64
+    (fp #b1 #b01111111101 #b0111110011110010111010000000000000000000000000000000))
   
   ;; "n2_2"
-  (declare-fun n2_2 () Float64)
-  
-  ;; "n2_2'def"
-  (assert
-    (= n2_2 (fp #b0 #b01111111110 #b0100011001000110100010000000000000000000000000000000)))
+  (define-fun n2_2 () Float64
+    (fp #b0 #b01111111110 #b0100011001000110100010000000000000000000000000000000))
   
   ;; "n8_0"
-  (declare-fun n8_0 () Float64)
-  
-  ;; "n8_0'def"
-  (assert (= n8_0 (fp.add RNE (fp.mul RNE n7_0 n2_0) (fp.mul RNE n7_1 n2_2))))
+  (define-fun n8_0 () Float64
+    (fp.add RNE (fp.mul RNE n7_0 n2_0) (fp.mul RNE n7_1 n2_2)))
   
   ;; "n1_0"
-  (declare-fun n1_0 () Float64)
-  
-  ;; "n1_0'def"
-  (assert
-    (= n1_0 (fp #b0 #b01111111110 #b0011000001100111100001000000000000000000000000000000)))
+  (define-fun n1_0 () Float64
+    (fp #b0 #b01111111110 #b0011000001100111100001000000000000000000000000000000))
   
   ;; "n9_0"
-  (declare-fun n9_0 () Float64)
-  
-  ;; "n9_0'def"
-  (assert (= n9_0 (fp.add RNE n8_0 n1_0)))
+  (define-fun n9_0 () Float64
+    (fp.add RNE n8_0 n1_0))
   
   ;; Goal "G"
   ;; File "stdin", line 9, characters 7-8
diff --git a/tests/dataset.t b/tests/dataset.t
index 19eb49462e2ffa02b0a1d90e1d0b3f0aa5ca8f25..c27e24253239b6cbb8331fb013e04efea54dc3be 100644
--- a/tests/dataset.t
+++ b/tests/dataset.t
@@ -17,7 +17,7 @@ Test verify on dataset
   >   use ieee_float.Float64
   >   use caisar.types.Float64WithBounds as Feature
   >   use caisar.types.IntWithBounds as Label
-  >   use caisar.model.NN
+  >   use caisar.model.Model
   >   use caisar.dataset.CSV
   >   use caisar.robust.ClassRobustCSV
   > 
diff --git a/tests/goal.t b/tests/goal.t
index 7c23a18a40b0f6d9020f94c9875f09bc1c5ef84f..7b3a23cca48a37e9b1de62c4abee42d0aa20c6fb 100644
--- a/tests/goal.t
+++ b/tests/goal.t
@@ -9,9 +9,9 @@ Test verify
   >   use ieee_float.Float64
   >   use caisar.types.Vector
   >   use caisar.model.Model
-  >   use caisar.model.NN
+  >  
   > 
-  >   constant nn: model nn = read_model "TestNetwork.nnet"
+  >   constant nn: model = read_model "TestNetwork.nnet"
   > 
   >   goal G:
   >     forall i: vector t.
@@ -35,9 +35,9 @@ Test verify
   >   use ieee_float.Float64
   >   use caisar.types.Vector
   >   use caisar.model.Model
-  >   use caisar.model.NN
+  >  
   > 
-  >   constant nn: model nn = read_model "TestNetwork.nnet"
+  >   constant nn: model = read_model "TestNetwork.nnet"
   > 
   >   goal G:
   >     forall i: vector t.
diff --git a/tests/interpretation_fail.t b/tests/interpretation_fail.t
index a389622e32687f4c40c1c8504faa887c96b42f54..bd06a5427d4e707ccae9a4a27d918c27528197ba 100644
--- a/tests/interpretation_fail.t
+++ b/tests/interpretation_fail.t
@@ -6,9 +6,9 @@ Test interpret fail
   >   use ieee_float.Float64
   >   use caisar.types.Vector
   >   use caisar.model.Model
-  >   use caisar.model.NN
+  >  
   > 
-  >   constant nn: model nn = read_model "TestNetwork.nnet"
+  >   constant nn: model = read_model "TestNetwork.nnet"
   > 
   >   goal G1:
   >     forall i.
@@ -26,9 +26,9 @@ Test interpret fail
   >   use ieee_float.Float64
   >   use caisar.types.Vector
   >   use caisar.model.Model
-  >   use caisar.model.NN
+  >  
   > 
-  >   constant nn: model nn = read_model "TestNetwork.nnet"
+  >   constant nn: model = read_model "TestNetwork.nnet"
   > 
   >   predicate off_by_one (i: vector t) = let dummy = i[5] in dummy .<= (1.0:t)
   > 
@@ -48,9 +48,9 @@ Test interpret fail
   >   use ieee_float.Float64
   >   use caisar.types.Vector
   >   use caisar.model.Model
-  >   use caisar.model.NN
+  >  
   > 
-  >   constant nn: model nn = read_model "TestNetwork.nnet"
+  >   constant nn: model = read_model "TestNetwork.nnet"
   > 
   >   predicate valid_input (i: vector t) = (0.0:t) .<= i[0] \/ (0.0:t) .>= i[0]
   > 
@@ -71,9 +71,9 @@ Test interpret fail
   >   use ieee_float.Float64
   >   use caisar.types.Vector
   >   use caisar.model.Model
-  >   use caisar.model.NN
+  >  
   > 
-  >   constant nn: model nn = read_model "nonexistent.nnet"
+  >   constant nn: model = read_model "nonexistent.nnet"
   > 
   >   goal G4:
   >     forall i: vector t.
@@ -91,9 +91,9 @@ Test interpret fail
   >   use ieee_float.Float64
   >   use caisar.types.Vector
   >   use caisar.model.Model
-  >   use caisar.model.NN
+  >  
   > 
-  >   constant nn: model nn = read_model "TestNetworkONNX.onnx"
+  >   constant nn: model = read_model "TestNetworkONNX.onnx"
   > 
   >   goal G5:
   >     forall i: vector t.
@@ -105,7 +105,7 @@ Test interpret fail
 
   $ caisar verify --prover nnenum file.mlw
   [ERROR] "file.mlw", line 12, characters 23-24:
-          Unexpected vector of length 5 in input to neural network model './TestNetworkONNX.onnx',
+          Unexpected vector of length 5 in input to model './TestNetworkONNX.onnx',
           which expects input vectors of length 3
 
   $ cat > file.mlw <<EOF
@@ -113,9 +113,9 @@ Test interpret fail
   >   use ieee_float.Float64
   >   use caisar.types.Vector
   >   use caisar.model.Model
-  >   use caisar.model.NN
+  >  
   > 
-  >   constant nn: model nn = read_model "TestNetwork.nnet"
+  >   constant nn: model = read_model "TestNetwork.nnet"
   > 
   >   goal G6:
   >     forall i: vector t.
@@ -133,9 +133,9 @@ Test interpret fail
   >   use ieee_float.Float64
   >   use caisar.types.Vector
   >   use caisar.model.Model
-  >   use caisar.model.NN
+  >  
   > 
-  >   constant nn: model nn = read_model "TestNetwork.nnet"
+  >   constant nn: model = read_model "TestNetwork.nnet"
   > 
   >   goal G7:
   >     forall i: vector t.
@@ -154,7 +154,7 @@ Test interpret fail
   >   use ieee_float.Float64
   >   use caisar.types.Vector
   >   use caisar.model.Model
-  >   use caisar.model.SVM
+  > 
   > 
   >   goal G8:
   >     let svm = read_model "TestSVM.ovo" in
@@ -165,15 +165,14 @@ Test interpret fail
 
   $ caisar verify --prover SAVer file.mlw
   [ERROR] "file.mlw", line 10, characters 14-15:
-          Unexpected vector of length 5 in input to SVM model './TestSVM.ovo',
+          Unexpected vector of length 5 in input to model './TestSVM.ovo',
           which expects input vectors of length 3
 
   $ cat > file.mlw <<EOF
   > theory T9
   >   use ieee_float.Float64
   >   use caisar.types.Vector
-  >   use caisar.model.Model
-  >   use caisar.model.SVM
+  > 
   > 
   >   goal G9:
   >     let svm = read_model "TestSVM.ovo" in
@@ -182,6 +181,6 @@ Test interpret fail
   > end
   > EOF
 
-  $ caisar verify --prover SAVer file.mlw
-  [ERROR] "file.mlw", line 10, characters 35-36:
-          Index constant 4 is out-of-bounds [0,1]
+  $ caisar verify --ltag InterpretGoal --prover SAVer file.mlw
+  [ERROR] File "file.mlw", line 7, characters 14-24:
+          unbound function or predicate symbol 'read_model'
diff --git a/tests/marabou.t b/tests/marabou.t
index f8bda433c368258ca50135604bf9cc4f1c8c1a08..877caabd60842bcca9082a97e8c29b9667c87b66 100644
--- a/tests/marabou.t
+++ b/tests/marabou.t
@@ -9,9 +9,9 @@ Test verify
   >   use ieee_float.Float64
   >   use caisar.types.Vector
   >   use caisar.model.Model
-  >   use caisar.model.NN
+  >  
   > 
-  >   constant nn: model nn = read_model "TestNetwork.nnet"
+  >   constant nn: model = read_model "TestNetwork.nnet"
   > 
   >   goal G:
   >     forall i: vector t.
diff --git a/tests/nir_to_onnx.t b/tests/nir_to_onnx.t
index 6eab31a9d053fde60da04a0a56520ca73479071a..b66e9002045dd8b3d25c793bcf4dcb385cf3ae76 100644
--- a/tests/nir_to_onnx.t
+++ b/tests/nir_to_onnx.t
@@ -9,9 +9,8 @@ Test verify
   >   use ieee_float.Float64
   >   use caisar.types.Vector
   >   use caisar.model.Model
-  >   use caisar.model.NN
   > 
-  >   constant nn: model nn = read_model "TestNetworkONNX.onnx"
+  >   constant nn: model = read_model "TestNetworkONNX.onnx"
   > 
   >   goal G:
   >     forall i: vector t.
@@ -35,9 +34,8 @@ Input name should be 0
   >   use ieee_float.Float64
   >   use caisar.types.Vector
   >   use caisar.model.Model
-  >   use caisar.model.NN
   > 
-  >   constant nn: model nn = read_model "TestNetwork.nnet"
+  >   constant nn: model = read_model "TestNetwork.nnet"
   > 
   >   goal G:
   >     forall i: vector t.
diff --git a/tests/pyrat.t b/tests/pyrat.t
index 17f51fb46f96a20580ca50402bd836bb4392d300..1a40ef828386b030dcc1e8044aef66e908d33fd4 100644
--- a/tests/pyrat.t
+++ b/tests/pyrat.t
@@ -9,9 +9,9 @@ Test verify
   >   use ieee_float.Float64
   >   use caisar.types.Vector
   >   use caisar.model.Model
-  >   use caisar.model.NN
+  >  
   > 
-  >   constant nn: model nn = read_model "TestNetwork.nnet"
+  >   constant nn: model = read_model "TestNetwork.nnet"
   > 
   >   goal G:
   >     forall i: vector t.
diff --git a/tests/pyrat_onnx.t b/tests/pyrat_onnx.t
index 88831808d8a461092423a81e642939249a3e275d..1d028cc6c002d7455a3197e6c9b0b5fdff9c5f5d 100644
--- a/tests/pyrat_onnx.t
+++ b/tests/pyrat_onnx.t
@@ -9,9 +9,9 @@ Test verify
   >   use ieee_float.Float64
   >   use caisar.types.Vector
   >   use caisar.model.Model
-  >   use caisar.model.NN
+  >  
   > 
-  >   constant nn: model nn = read_model "TestNetworkONNX.onnx"
+  >   constant nn: model = read_model "TestNetworkONNX.onnx"
   > 
   >   goal G:
   >     forall i: vector t.
diff --git a/tests/saver.t b/tests/saver.t
index 3e474d64de06d88ca0daef6f54dedb3ba5851f32..e5f482b1b7d19482068c878cd60121e08238c006 100644
--- a/tests/saver.t
+++ b/tests/saver.t
@@ -36,7 +36,7 @@ Test verify
   >   use int.Int
   >   use caisar.types.Float64WithBounds as Feature
   >   use caisar.types.IntWithBounds as Label
-  >   use caisar.model.SVM
+  >   use caisar.model.Model
   >   use caisar.dataset.CSV
   >   use caisar.robust.ClassRobustCSV
   > 
@@ -69,7 +69,7 @@ Test verify
   >   use caisar.types.Vector
   >   use caisar.types.Float64WithBounds as Feature
   >   use caisar.types.IntWithBounds as Label
-  >   use caisar.model.SVM
+  >   use caisar.model.Model
   >   use caisar.dataset.CSV
   >   use caisar.robust.ClassRobustCSV
   > 
diff --git a/utils/dune b/utils/dune
index a70bed45a80178d4c3ebaa4256f5184b220e882a..a9a3d356749fe59eff68c361bcc17ff099389b1e 100644
--- a/utils/dune
+++ b/utils/dune
@@ -1,5 +1,5 @@
 (library
  (name caisar_logging)
  (package caisar)
- (libraries base csv logs logs.cli logs.fmt fmt why3)
+ (libraries base csv logs logs.cli logs.fmt fmt fmt.tty why3)
  (synopsis "Logging utilities for CAISAR"))