diff --git a/doc/usage.rst b/doc/usage.rst
index 9e00ec4c4d1f7670c3f5bfa4e0ccb2212bcd4e49..70e7ccc875ad9b293bab3d99e3dbc9b6e26072e5 100644
--- a/doc/usage.rst
+++ b/doc/usage.rst
@@ -65,6 +65,7 @@ tag, use the option ``--ltag=TAG``, with ``TAG`` one of the following:
 * ``Autodetect``: display the output of the autodetect routine of CAISAR described in :ref:`prover-registration`
 * ``ProverSpec``: display the actual specification provided to the prover
 * ``ProverCall``: display the actual command used to call the prover
+* ``InterpretGoal``: display the goal interpretation
 * ``StackTrace``: display the stack trace upon a CAISAR error
 
 Built-in properties
diff --git a/examples/acasxu/property_1.why b/examples/acasxu/property_1.why
index 6e51d7b8cb79d41f4bda8a80f8fb5456225cabb0..d1218b2508038c185dac5fa716f5056819edc198 100644
--- a/examples/acasxu/property_1.why
+++ b/examples/acasxu/property_1.why
@@ -1,13 +1,20 @@
 theory ACASXU_P1
-  use ACASXU_1_1.AsTuple
   use ieee_float.Float64
+  use bool.Bool
+  use int.Int
+  use interpretation.Vector
+  use interpretation.NeuralNetwork
 
-  goal P1_1_1: forall x0 x1 x2 x3 x4.
-     (0.5999999999999999777955395074968691915273666381835937500000000000:t) .<= x0 .<= (0.6798577687000000313588543576770462095737457275390625000000000000:t) ->
-     (-0.5:t) .<= x1 .<= (0.5:t) ->
-     (-0.5:t) .<= x2 .<= (0.5:t) ->
-     (0.4500000000000000111022302462515654042363166809082031250000000000:t) .<= x3 .<= (0.5:t) ->
-     (-0.5:t) .<= x4 .<= (-0.4500000000000000111022302462515654042363166809082031250000000000:t) ->
-     let (y0, _, _, _, _) = nn_apply x0 x1 x2 x3 x4 in
-     y0 .<= (3.9911256458999999630066213285317644476890563964843750000000000000:t)
+  constant nn_1_1: nn = read_neural_network "nets/onnx/ACASXU_1_1.onnx" ONNX
+
+  predicate valid_input (i: vector t) =
+    (0.5999999999999999777955395074968691915273666381835937500000000000:t) .<= i[0] .<= (0.6798577687000000313588543576770462095737457275390625000000000000:t)
+    /\ (-0.5:t) .<= i[1] .<= (0.5:t)
+    /\ (-0.5:t) .<= i[2] .<= (0.5:t)
+    /\ (0.4500000000000000111022302462515654042363166809082031250000000000:t) .<= i[3] .<= (0.5:t)
+    /\ (-0.5:t) .<= i[4] .<= (-0.4500000000000000111022302462515654042363166809082031250000000000:t)
+
+  goal P1_1_1:
+    forall i: vector t. has_length i 5 -> valid_input i ->
+      (nn_1_1@@i)[0] .<= (3.9911256458999999630066213285317644476890563964843750000000000000:t)
 end
diff --git a/examples/acasxu/property_10.why b/examples/acasxu/property_10.why
index 6e260fabebd5e52a51fa5640be659b426eccdec2..9850069a7ca5359f2f144933637098a34354689c 100644
--- a/examples/acasxu/property_10.why
+++ b/examples/acasxu/property_10.why
@@ -1,13 +1,22 @@
 theory ACASXU_P10
-  use ACASXU_4_5.AsTuple
   use ieee_float.Float64
+  use bool.Bool
+  use int.Int
+  use interpretation.Vector
+  use interpretation.NeuralNetwork
 
-  goal P10_4_5: forall x0 x1 x2 x3 x4.
-     (0.2689779999999999948734341614908771589398384094238281250000000000:t) .<= x0 .<= (0.6798579999999999623483404320722911506891250610351562500000000000:t) ->
-     (0.1114079999999999931459271351741335820406675338745117187500000000:t) .<= x1 .<= (0.5:t) ->
-     (-0.5:t) .<= x2 .<= (-0.4933803236000000036476365039561642333865165710449218750000000000:t) ->
-     (0.2272730000000000027959856652159942314028739929199218750000000000:t) .<= x3 .<= (0.5:t) ->
-     (0.0:t) .<= x4 .<= (0.5:t) ->
-     let (y0, y1, y2, y3, y4) = nn_apply x0 x1 x2 x3 x4 in
-     y0 .<= y1 /\ y0 .<= y2 /\ y0 .<= y3 /\ y0 .<= y4
+  constant nn_4_5: nn = read_neural_network "nets/onnx/ACASXU_4_5.onnx" ONNX
+
+  predicate valid_input (i: vector t) =
+    (0.2689779999999999948734341614908771589398384094238281250000000000:t) .<= i[0] .<= (0.6798579999999999623483404320722911506891250610351562500000000000:t)
+    /\ (0.1114079999999999931459271351741335820406675338745117187500000000:t) .<= i[1] .<= (0.5:t)
+    /\ (-0.5:t) .<= i[2] .<= (-0.4933803236000000036476365039561642333865165710449218750000000000:t)
+    /\ (0.2272730000000000027959856652159942314028739929199218750000000000:t) .<= i[3] .<= (0.5:t)
+    /\ (0.0:t) .<= i[4] .<= (0.5:t)
+
+  predicate is_min (o: vector t) (i: int) =
+    forall j: int. 0 <= j < 5 -> i <> j -> o[i] .<= o[j]
+
+  goal P10_4_5:
+    forall i: vector t. has_length i 5 -> valid_input i -> is_min (nn_4_5@@i) 0
 end
diff --git a/examples/acasxu/property_2.why b/examples/acasxu/property_2.why
index dafa456db9a5d52b3cab03d4194b548dce7c3848..80f7309e32d26da8387197a988de696a4e1f54aa 100644
--- a/examples/acasxu/property_2.why
+++ b/examples/acasxu/property_2.why
@@ -1,34 +1,26 @@
 theory ACASXU_P2
-  use ACASXU_1_9.AsTuple as N19
-  use ACASXU_2_2.AsTuple as N22
   use ieee_float.Float64
+  use bool.Bool
+  use int.Int
+  use interpretation.Vector
+  use interpretation.NeuralNetwork
 
-  constant x0:t
-  constant x1:t
-  constant x2:t
-  constant x3:t
-  constant x4:t
+  constant nn_1_9: nn = read_neural_network "nets/onnx/ACASXU_1_9.onnx" ONNX
+  constant nn_2_2: nn = read_neural_network "nets/onnx/ACASXU_2_2.onnx" ONNX
 
-  axiom H0:
-     (0.5999999999999999777955395074968691915273666381835937500000000000:t) .<= x0 .<= (0.6798577687000000313588543576770462095737457275390625000000000000:t)
+  predicate valid_input (i: vector t) =
+    (0.5999999999999999777955395074968691915273666381835937500000000000:t) .<= i[0] .<= (0.6798577687000000313588543576770462095737457275390625000000000000:t)
+    /\ (-0.5:t) .<= i[1] .<= (0.5:t)
+    /\ (-0.5:t) .<= i[2] .<= (0.5:t)
+    /\ (0.4500000000000000111022302462515654042363166809082031250000000000:t) .<= i[3] .<= (0.5:t)
+    /\ (-0.5:t) .<= i[4] .<= (-0.4500000000000000111022302462515654042363166809082031250000000000:t)
 
-  axiom H1:
-     (-0.5:t) .<= x1 .<= (0.5:t)
-
-  axiom H2:
-     (-0.5:t) .<= x2 .<= (0.5:t)
-
-  axiom H3:
-     (0.4500000000000000111022302462515654042363166809082031250000000000:t) .<= x3 .<= (0.5:t)
-
-  axiom H4:
-     (-0.5:t) .<= x4 .<= (-0.4500000000000000111022302462515654042363166809082031250000000000:t)
+  predicate is_max (o: vector t) (i: int) =
+    forall j: int. 0 <= j < 5 -> i <> j -> o[i] .>= o[j]
 
   goal P2_1_9:
-     let (y0, y1, y2, y3, y4) = N19.nn_apply x0 x1 x2 x3 x4 in
-     y0 .<= y1 \/ y0 .<= y2 \/ y0 .<= y3 \/ y0 .<= y4
+    forall i: vector t. has_length i 5 -> valid_input i -> not (is_max (nn_1_9@@i) 0)
 
   goal P2_2_2:
-     let (y0, y1, y2, y3, y4) = N22.nn_apply x0 x1 x2 x3 x4 in
-     y0 .<= y1 \/ y0 .<= y2 \/ y0 .<= y3 \/ y0 .<= y4
+    forall i: vector t. has_length i 5 -> valid_input i -> not (is_max (nn_2_2@@i) 0)
 end
diff --git a/examples/acasxu/property_3.why b/examples/acasxu/property_3.why
index 84f816bf108f543074a6b27cf065b04d6e0c6cdc..d6ba092fb992ea370edf09d804bd8f4f873c6f83 100644
--- a/examples/acasxu/property_3.why
+++ b/examples/acasxu/property_3.why
@@ -1,34 +1,26 @@
 theory ACASXU_P3
-  use ACASXU_1_1.AsTuple as N11
-  use ACASXU_2_7.AsTuple as N27
   use ieee_float.Float64
+  use bool.Bool
+  use int.Int
+  use interpretation.Vector
+  use interpretation.NeuralNetwork
 
-  constant x0:t
-  constant x1:t
-  constant x2:t
-  constant x3:t
-  constant x4:t
+  constant nn_1_1: nn = read_neural_network "nets/onnx/ACASXU_1_1.onnx" ONNX
+  constant nn_2_7: nn = read_neural_network "nets/onnx/ACASXU_2_7.onnx" ONNX
 
-  axiom H0:
-     (-0.3035311560999999769272506000561406835913658142089843750000000000:t) .<= x0 .<= (-0.2985528118999999924731980627257144078612327575683593750000000000:t)
+  predicate valid_input (i: vector t) =
+    (-0.3035311560999999769272506000561406835913658142089843750000000000:t) .<= i[0] .<= (-0.2985528118999999924731980627257144078612327575683593750000000000:t)
+    /\ (-0.0095492965999999998572000947660853853449225425720214843750000000:t) .<= i[1] .<= (0.0095492965999999998572000947660853853449225425720214843750000000:t)
+    /\ (0.4933803236000000036476365039561642333865165710449218750000000000:t) .<= i[2] .<= (0.5:t)
+    /\ (0.2999999999999999888977697537484345957636833190917968750000000000:t) .<= i[3] .<= (0.5:t)
+    /\ (0.2999999999999999888977697537484345957636833190917968750000000000:t) .<= i[4] .<= (0.5:t)
 
-  axiom H1:
-     (-0.0095492965999999998572000947660853853449225425720214843750000000:t) .<= x1 .<= (0.0095492965999999998572000947660853853449225425720214843750000000:t)
-
-  axiom H2:
-     (0.4933803236000000036476365039561642333865165710449218750000000000:t) .<= x2 .<= (0.5:t)
-
-  axiom H3:
-     (0.2999999999999999888977697537484345957636833190917968750000000000:t) .<= x3 .<= (0.5:t)
-
-  axiom H4:
-     (0.2999999999999999888977697537484345957636833190917968750000000000:t) .<= x4 .<= (0.5:t)
+  predicate is_min (o: vector t) (i: int) =
+    forall j: int. 0 <= j < 5 -> i <> j -> o[i] .<= o[j]
 
   goal P3_1_1:
-     let (y0, y1, y2, y3, y4) = N11.nn_apply x0 x1 x2 x3 x4 in
-     y0 .>= y1 \/ y0 .>= y2 \/ y0 .>= y3 \/ y0 .>= y4
+    forall i: vector t. has_length i 5 -> valid_input i -> not (is_min (nn_1_1@@i) 0)
 
   goal P3_2_7:
-     let (y0, y1, y2, y3, y4) = N27.nn_apply x0 x1 x2 x3 x4 in
-     y0 .>= y1 \/ y0 .>= y2 \/ y0 .>= y3 \/ y0 .>= y4
+    forall i: vector t. has_length i 5 -> valid_input i -> not (is_min (nn_2_7@@i) 0)
 end
diff --git a/examples/acasxu/property_4.why b/examples/acasxu/property_4.why
index cfc7df2af37117109cfb4c544497dfe297188be1..39aca92f3aa47834ab9a665ab7e04ce97423d700 100644
--- a/examples/acasxu/property_4.why
+++ b/examples/acasxu/property_4.why
@@ -1,13 +1,26 @@
 theory ACASXU_P4
-  use ACASXU_1_1.AsTuple
   use ieee_float.Float64
+  use bool.Bool
+  use int.Int
+  use interpretation.Vector
+  use interpretation.NeuralNetwork
 
-  goal P4_1_1: forall x0 x1 x2 x3 x4.
-     (-0.3035311560999999769272506000561406835913658142089843750000000000:t) .<= x0 .<= (-0.2985528118999999924731980627257144078612327575683593750000000000:t) ->
-     (-0.0095492965999999998572000947660853853449225425720214843750000000:t) .<= x1 .<= (0.0095492965999999998572000947660853853449225425720214843750000000:t) ->
-     (0.0:t) .<= x2 .<= (0.0:t) ->
-     (0.3181818182000000216902435568044893443584442138671875000000000000:t) .<= x3 .<= (0.5:t) ->
-     (0.0833333333000000064938461719066253863275051116943359375000000000:t) .<= x4 .<= (0.1666666666999999935061538280933746136724948883056640625000000000:t) ->
-     let (y0, y1, y2, y3, y4) = nn_apply x0 x1 x2 x3 x4 in
-     y0 .>= y1 \/ y0 .>= y2 \/ y0 .>= y3 \/ y0 .>= y4
+  constant nn_1_1: nn = read_neural_network "nets/onnx/ACASXU_1_1.onnx" ONNX
+  constant nn_1_9: nn = read_neural_network "nets/onnx/ACASXU_1_9.onnx" ONNX
+
+  predicate valid_input (i: vector t) =
+    (-0.3035311560999999769272506000561406835913658142089843750000000000:t) .<= i[0] .<= (-0.2985528118999999924731980627257144078612327575683593750000000000:t)
+    /\ (-0.0095492965999999998572000947660853853449225425720214843750000000:t) .<= i[1] .<= (0.0095492965999999998572000947660853853449225425720214843750000000:t)
+    /\ (0.0:t) .<= i[2] .<= (0.0:t)
+    /\ (0.3181818182000000216902435568044893443584442138671875000000000000:t) .<= i[3] .<= (0.5:t)
+    /\ (0.0833333333000000064938461719066253863275051116943359375000000000:t) .<= i[4] .<= (0.1666666666999999935061538280933746136724948883056640625000000000:t)
+
+  predicate is_min (o: vector t) (i: int) =
+    forall j: int. 0 <= j < 5 -> i <> j -> o[i] .<= o[j]
+
+  goal P4_1_1:
+    forall i: vector t. has_length i 5 -> valid_input i -> not (is_min (nn_1_1@@i) 0)
+
+  goal P4_1_9:
+    forall i: vector t. has_length i 5 -> valid_input i -> not (is_min (nn_1_9@@i) 0)
 end
diff --git a/examples/acasxu/property_5.why b/examples/acasxu/property_5.why
index df38218dfffe142a11f8bb46828124817e6bdefc..167e06d597f5bd03e1065af5a3912084c007e611 100644
--- a/examples/acasxu/property_5.why
+++ b/examples/acasxu/property_5.why
@@ -1,13 +1,22 @@
 theory ACASXU_P5
-  use ACASXU_1_1.AsTuple
   use ieee_float.Float64
+  use bool.Bool
+  use int.Int
+  use interpretation.Vector
+  use interpretation.NeuralNetwork
 
-  goal P5_1_1: forall x0 x1 x2 x3 x4.
-     (-0.3242740000000000066826544298237422481179237365722656250000000000:t) .<= x0 .<= (-0.3217849999999999877076106713502667844295501708984375000000000000:t) ->
-     (0.0318309999999999981845633101329440250992774963378906250000000000:t) .<= x1 .<= (0.0636619999999999963691266202658880501985549926757812500000000000:t) ->
-     (-0.5:t) .<= x2 .<= (-0.4992039999999999810853523740661330521106719970703125000000000000:t) ->
-     (-0.5:t) .<= x3 .<= (-0.2272730000000000027959856652159942314028739929199218750000000000:t) ->
-     (-0.5:t) .<= x4 .<= (-0.1666670000000000095852215054037515074014663696289062500000000000:t) ->
-     let (y0, y1, y2, y3, y4) = nn_apply x0 x1 x2 x3 x4 in
-     y4 .<= y1 \/ y4 .<= y2 \/ y4 .<= y3 \/ y4 .<= y0
+  constant nn_1_1: nn = read_neural_network "nets/onnx/ACASXU_1_1.onnx" ONNX
+
+  predicate valid_input (i: vector t) =
+    (-0.3242740000000000066826544298237422481179237365722656250000000000:t) .<= i[0] .<= (-0.3217849999999999877076106713502667844295501708984375000000000000:t)
+    /\ (0.0318309999999999981845633101329440250992774963378906250000000000:t) .<= i[1] .<= (0.0636619999999999963691266202658880501985549926757812500000000000:t)
+    /\ (-0.5:t) .<= i[2] .<= (-0.4992039999999999810853523740661330521106719970703125000000000000:t)
+    /\ (-0.5:t) .<= i[3] .<= (-0.2272730000000000027959856652159942314028739929199218750000000000:t)
+    /\ (-0.5:t) .<= i[4] .<= (-0.1666670000000000095852215054037515074014663696289062500000000000:t)
+
+  predicate is_min (o: vector t) (i: int) =
+    forall j: int. 0 <= j < 5 -> i <> j -> o[i] .<= o[j]
+
+  goal P5_1_1:
+    forall i: vector t. has_length i 5 -> valid_input i -> is_min (nn_1_1@@i) 4
 end
diff --git a/examples/acasxu/property_6.why b/examples/acasxu/property_6.why
index aea0e63aba341e4fb66a1b34dce7bfdf0595501f..0b63c8ccaf68951ecc16f95d59ee10c741841294 100644
--- a/examples/acasxu/property_6.why
+++ b/examples/acasxu/property_6.why
@@ -1,15 +1,24 @@
 theory ACASXU_P6
-  use ACASXU_1_1.AsTuple
   use ieee_float.Float64
+  use bool.Bool
+  use int.Int
+  use interpretation.Vector
+  use interpretation.NeuralNetwork
 
-  goal P6_1_1: forall x0 x1 x2 x3 x4.
-     (-0.1292889999999999872670741751790046691894531250000000000000000000:t) .<= x0 .<= (0.7004349250000000415283807342348154634237289428710937500000000000:t) ->
-     (-0.5:t) .<= x1 .<= (-0.1114079999999999931459271351741335820406675338745117187500000000:t)
+  constant nn_1_1: nn = read_neural_network "nets/onnx/ACASXU_1_1.onnx" ONNX
+
+  predicate valid_input (i: vector t) =
+    (-0.1292889999999999872670741751790046691894531250000000000000000000:t) .<= i[0] .<= (0.7004349250000000415283807342348154634237289428710937500000000000:t)
+    /\ ((-0.5:t) .<= i[1] .<= (-0.1114079999999999931459271351741335820406675338745117187500000000:t)
      \/
-     (0.1114079999999999931459271351741335820406675338745117187500000000:t) .<= x1 .<= (0.5:t) ->
-     (-0.5:t) .<= x2 .<= (-0.4992039999999999810853523740661330521106719970703125000000000000:t) ->
-     (-0.5:t) .<= x3 .<= (0.5:t) ->
-     (-0.5:t) .<= x4 .<= (0.5:t) ->
-     let (y0, y1, y2, y3, y4) = nn_apply x0 x1 x2 x3 x4 in
-     y0 .<= y1 /\ y0 .<= y2 /\ y0 .<= y3 /\ y0 .<= y4
+     (0.1114079999999999931459271351741335820406675338745117187500000000:t) .<= i[1] .<= (0.5:t))
+    /\ (-0.5:t) .<= i[2] .<= (-0.4992039999999999810853523740661330521106719970703125000000000000:t)
+    /\ (-0.5:t) .<= i[3] .<= (0.5:t)
+    /\ (-0.5:t) .<= i[4] .<= (0.5:t)
+
+  predicate is_min (o: vector t) (i: int) =
+    forall j: int. 0 <= j < 5 -> i <> j -> o[i] .<= o[j]
+
+  goal P6_1_1:
+    forall i: vector t. has_length i 5 -> valid_input i -> is_min (nn_1_1@@i) 0
 end
diff --git a/examples/acasxu/property_7.why b/examples/acasxu/property_7.why
index f33ea31617d538d777347d2436945ce2d07645b6..9f76b7b6e44f1b74650de9ebd44d4b141e90909b 100644
--- a/examples/acasxu/property_7.why
+++ b/examples/acasxu/property_7.why
@@ -1,13 +1,24 @@
 theory ACASXU_P7
-  use ACASXU_1_9.AsTuple
   use ieee_float.Float64
+  use bool.Bool
+  use int.Int
+  use interpretation.Vector
+  use interpretation.NeuralNetwork
 
-  goal P7_1_9: forall x0 x1 x2 x3 x4.
-     (-0.3284230000000000204707362172484863549470901489257812500000000000:t) .<= x0 .<= (0.6798579999999999623483404320722911506891250610351562500000000000:t) ->
-     (-0.5:t) .<= x1 .<= (0.5:t) ->
-     (-0.5:t) .<= x2 .<= (0.5:t) ->
-     (-0.5:t) .<= x3 .<= (0.5:t) ->
-     (-0.5:t) .<= x4 .<= (0.5:t) ->
-     let (y0, y1, y2, y3, y4) = nn_apply x0 x1 x2 x3 x4 in
-     (y4 .>= y1 \/ y4 .>= y2 \/ y4 .>= y3 \/ y4 .>= y0) /\ (y3 .>= y1 \/ y3 .>= y2 \/ y3 .>= y0 \/ y3 .>= y4)
+  constant nn_1_9: nn = read_neural_network "nets/onnx/ACASXU_1_9.onnx" ONNX
+
+  predicate valid_input (i: vector t) =
+    (-0.3284230000000000204707362172484863549470901489257812500000000000:t) .<= i[0] .<= (0.6798579999999999623483404320722911506891250610351562500000000000:t)
+    /\ (-0.5:t) .<= i[1] .<= (0.5:t)
+    /\ (-0.5:t) .<= i[2] .<= (0.5:t)
+    /\ (-0.5:t) .<= i[3] .<= (0.5:t)
+    /\ (-0.5:t) .<= i[4] .<= (0.5:t)
+
+  predicate is_min (o: vector t) (i: int) =
+    forall j: int. 0 <= j < 5 -> i <> j -> o[i] .<= o[j]
+
+  goal P7_1_9:
+    forall i: vector t.
+      has_length i 5 -> valid_input i ->
+      (not (is_min (nn_1_9@@i) 4)) /\ (not (is_min (nn_1_9@@i) 3))
 end
diff --git a/examples/acasxu/property_8.why b/examples/acasxu/property_8.why
index 1e4703d2c08908ca1dc765486b1826135e55fdec..1bd941a74b4c8eb4aa5edf57f6d1540719650455 100644
--- a/examples/acasxu/property_8.why
+++ b/examples/acasxu/property_8.why
@@ -1,13 +1,24 @@
 theory ACASXU_P8
-  use ACASXU_2_9.AsTuple
   use ieee_float.Float64
+  use bool.Bool
+  use int.Int
+  use interpretation.Vector
+  use interpretation.NeuralNetwork
 
-  goal P8_2_9: forall x0 x1 x2 x3 x4.
-     (-0.3284230000000000204707362172484863549470901489257812500000000000:t) .<= x0 .<= (0.6798579999999999623483404320722911506891250610351562500000000000:t) ->
-     (-0.5:t) .<= x1 .<= (0.375:t) ->
-     (-0.0159149999999999985922372047753015067428350448608398437500000000:t) .<= x2 .<= (0.0159149999999999985922372047753015067428350448608398437500000000:t) ->
-     (-0.0454550000000000023470114740575809264555573463439941406250000000:t) .<= x3 .<= (0.5:t) ->
-     (0.0:t) .<= x4 .<= (0.5:t) ->
-     let (y0, y1, y2, y3, y4) = nn_apply x0 x1 x2 x3 x4 in
-     (y1 .<= y0 /\ y1 .<= y2 /\ y1 .<= y3 /\ y1 .<= y4) \/ (y0 .<= y1 /\ y0 .<= y2 /\ y0 .<= y3 /\ y0 .<= y4)
+  constant nn_2_9: nn = read_neural_network "nets/onnx/ACASXU_2_9.onnx" ONNX
+
+  predicate valid_input (i: vector t) =
+    (-0.3284230000000000204707362172484863549470901489257812500000000000:t) .<= i[0] .<= (0.6798579999999999623483404320722911506891250610351562500000000000:t)
+    /\ (-0.5:t) .<= i[1] .<= (0.375:t)
+    /\ (-0.0159149999999999985922372047753015067428350448608398437500000000:t) .<= i[2] .<= (0.0159149999999999985922372047753015067428350448608398437500000000:t)
+    /\ (-0.0454550000000000023470114740575809264555573463439941406250000000:t) .<= i[3] .<= (0.5:t)
+    /\ (0.0:t) .<= i[4] .<= (0.5:t)
+
+  predicate is_min (o: vector t) (i: int) =
+    forall j: int. 0 <= j < 5 -> i <> j -> o[i] .<= o[j]
+
+  goal P8_2_9:
+    forall i: vector t.
+      has_length i 5 -> valid_input i ->
+      (is_min (nn_2_9@@i) 0) \/ (is_min (nn_2_9@@i) 1)
 end
diff --git a/examples/acasxu/property_9.why b/examples/acasxu/property_9.why
index 34d9b007c38c845df84cea528e9150f12b01d99a..630a527d294c0c041fd9b1e4976a4a63aa3a7b06 100644
--- a/examples/acasxu/property_9.why
+++ b/examples/acasxu/property_9.why
@@ -1,13 +1,22 @@
 theory ACASXU_P9
-  use ACASXU_3_3.AsTuple
   use ieee_float.Float64
+  use bool.Bool
+  use int.Int
+  use interpretation.Vector
+  use interpretation.NeuralNetwork
 
-  goal P9_3_3: forall x0 x1 x2 x3 x4.
-     (-0.2952339158000000240988924815610516816377639770507812500000000000:t) .<= x0 .<= (-0.2122615123999999908743774312824825756251811981201171875000000000:t) ->
-     (-0.0636619771999999972678097037714906036853790283203125000000000000:t) .<= x1 .<= (-0.0222816919999999987767047571196599164977669715881347656250000000:t) ->
-     (-0.4999998959999999992298569395643426105380058288574218750000000000:t) .<= x2 .<= (-0.4984083464999999879552206039079464972019195556640625000000000000:t) ->
-     (-0.5:t) .<= x3 .<= (-0.4545454545000000012855423392466036602854728698730468750000000000:t) ->
-     (-0.5:t) .<= x4 .<= (-0.375:t) ->
-     let (y0, y1, y2, y3, y4) = nn_apply x0 x1 x2 x3 x4 in
-     y3 .<= y1 /\ y3 .<= y2 /\ y3 .<= y0 /\ y3 .<= y4
+  constant nn_3_3: nn = read_neural_network "nets/onnx/ACASXU_3_3.onnx" ONNX
+
+  predicate valid_input (i: vector t) =
+    (-0.2952339158000000240988924815610516816377639770507812500000000000:t) .<= i[0] .<= (-0.2122615123999999908743774312824825756251811981201171875000000000:t)
+    /\ (-0.0636619771999999972678097037714906036853790283203125000000000000:t) .<= i[1] .<= (-0.0222816919999999987767047571196599164977669715881347656250000000:t)
+    /\ (-0.4999998959999999992298569395643426105380058288574218750000000000:t) .<= i[2] .<= (-0.4984083464999999879552206039079464972019195556640625000000000000:t)
+    /\ (-0.5:t) .<= i[3] .<= (-0.4545454545000000012855423392466036602854728698730468750000000000:t)
+    /\ (-0.5:t) .<= i[4] .<= (-0.375:t)
+
+  predicate is_min (o: vector t) (i: int) =
+    forall j: int. 0 <= j < 5 -> i <> j -> o[i] .<= o[j]
+
+  goal P9_3_3:
+    forall i: vector t. has_length i 5 -> valid_input i -> is_min (nn_3_3@@i) 3
 end
diff --git a/src/interpretation.ml b/src/interpretation.ml
index 4d5af8bad9c4fa51374ba4900e9a3b8ae71b1a5c..7e0bddfc54774e270951d169e97df4bbee55f67a 100644
--- a/src/interpretation.ml
+++ b/src/interpretation.ml
@@ -373,6 +373,9 @@ let interpret_task ~cwd env task =
   in
   let g, f = (Task.task_goal task, Task.task_goal_fmla task) in
   let f = CRE.normalize ~limit:Int.max_value engine Term.Mvs.empty f in
+  Logs.debug ~src:Logging.src_interpret_goal (fun m ->
+    m "Interpreted formula for goal '%a':@.%a@.%a" Pretty.print_pr g
+      Pretty.print_term f print_caisar_op_of_ls caisar_env);
   let _, task = Task.task_separate_goal task in
   let task = declare_language_lsymbols caisar_env task in
   let task = Task.(add_prop_decl task Pgoal g f) in
diff --git a/src/logging.ml b/src/logging.ml
index f15df62bc1de0e489d51a0b76536c93762f5895f..049a3d73c25fc6294bc48229be5148714f920110 100644
--- a/src/logging.ml
+++ b/src/logging.ml
@@ -26,10 +26,20 @@ let src_prover_spec =
   Logs.Src.create "ProverSpec" ~doc:"Prover-tailored specification"
 
 let src_prover_call = Logs.Src.create "ProverCall" ~doc:"Prover call"
+
+let src_interpret_goal =
+  Logs.Src.create "InterpretGoal" ~doc:"Goal interpretation"
+
 let src_stack_trace = Logs.Src.create "StackTrace" ~doc:"Print stack trace"
 
 let all_srcs () =
-  [ src_autodetect; src_prover_spec; src_prover_call; src_stack_trace ]
+  [
+    src_autodetect;
+    src_prover_spec;
+    src_prover_call;
+    src_interpret_goal;
+    src_stack_trace;
+  ]
 
 let is_debug_level src =
   match Logs.Src.level src with Some Debug -> true | _ -> false
diff --git a/src/logging.mli b/src/logging.mli
index 48e43c3b9e5caf1c36004dd8d25f6c58dea3d5e6..315bbc7251b96f6e0d95c1ddff9f30b951da6733 100644
--- a/src/logging.mli
+++ b/src/logging.mli
@@ -25,6 +25,7 @@
 val src_autodetect : Logs.src
 val src_prover_spec : Logs.src
 val src_prover_call : Logs.src
+val src_interpret_goal : Logs.src
 val src_stack_trace : Logs.src
 
 val all_srcs : unit -> Logs.src list