diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b2243fb73cbb478acf89b2041a07bc04dab24629..8fe240f9e137d9a95cd9a51302cea8ff7042f172 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -12,6 +12,7 @@ tests: - if [ ! -d _opam ]; then echo "no local switch in the CI cache, we setup a new switch"; opam switch create --yes --no-install . ocaml-base-compiler.4.11.1; fi - eval $(opam env) - sudo apt-get update + - sudo apt install -y protobuf-compiler - opam repository add remote https://opam.ocaml.org - opam depext --yes ocplib-endian base fmt alt-ergo.2.4.0 - opam install . --deps-only --with-test --yes diff --git a/Makefile b/Makefile index b9f6d170932ea621b1b18f44a6cf2747023130fd..88393974dd9593a87f098410a54d45a4879f8bcf 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ all: - dune build --root=. @install caisar.opam nnet.opam + dune build --root=. @install caisar.opam nnet.opam onnx.opam test: dune runtest --root=. diff --git a/caisar.opam b/caisar.opam index bd71b86a1a5be649e93339a6afa88260b58dfd71..b2a9fd775a6f73d2d6edca3561298f9f1f4f7236 100644 --- a/caisar.opam +++ b/caisar.opam @@ -4,10 +4,14 @@ version: "0.1" synopsis: "Framework for neural network verification" depends: [ "ocaml" {>= "4.10"} - "dune" {>= "2.9" & >= "2.9.0"} "dune-site" {>= "2.9.0"} - "why3" + "piqi" {>= "0.7.6"} + "piqilib" {>= "0.6.14"} + "zarith" {>= "1.7"} + "ocplib-endian" {>= "1.0"} + "dune" {>= "2.9" & >= "2.7.1"} "base" {>= "v0.14.0"} + "stdio" {>= "v0.14.0"} "cmdliner" {>= "1.0.4"} "fmt" {>= "0.8.9"} "logs" {>= "0.7.0"} @@ -18,10 +22,11 @@ depends: [ "csv" {>= "2.4"} "why3" {>= "1.4"} "re" + "onnx" "odoc" {with-doc} ] build: [ - ["dune" "subst" "--root" "."] {dev} + ["dune" "subst"] {dev} [ "dune" "build" @@ -29,8 +34,7 @@ build: [ name "-j" jobs - "--promote-install-files" - "false" + "--promote-install-files=false" "@install" "@runtest" {with-test} "@doc" {with-doc} diff --git a/dune-project b/dune-project index a601dce70a850343279eecbf445d1ffe3d596429..c0e7bd02c1cfce445db33902de2f142a99a6afde 100644 --- a/dune-project +++ b/dune-project @@ -14,10 +14,14 @@ (synopsis "Framework for neural network verification") (depends (ocaml (>= 4.10)) - (dune (>= 2.9.0)) (dune-site (>= 2.9.0)) - why3 + (piqi (>= 0.7.6)) + (piqilib (>= 0.6.14)) + (zarith (>= 1.7)) + (ocplib-endian (>= 1.0)) + (dune (>= 2.7.1)) (base (>= v0.14.0)) + (stdio (>= v0.14.0)) (cmdliner (>= 1.0.4)) (fmt (>= 0.8.9)) (logs (>= 0.7.0)) @@ -28,6 +32,7 @@ (csv (>= 2.4)) (why3 (>= 1.4)) re + onnx ) (sites (share stdlib) @@ -40,7 +45,18 @@ (synopsis "NNet parser") (depends (ocaml (>= 4.10)) - (dune (>= 2.7.1)) + (dune (>= 2.9.1)) + (base (>= v0.14.0)) + ) +) + +(package + (name onnx) + (synopsis "ONNX parser") + (depends + (ocaml (>= 4.10)) + (dune (>= 2.9.1)) (base (>= v0.14.0)) + (ocaml-protoc-plugin (= 4.2.0)) ) ) diff --git a/lib/onnx/dune b/lib/onnx/dune new file mode 100644 index 0000000000000000000000000000000000000000..65ba5412c35cefcd13c2ea542342c4e022282fc5 --- /dev/null +++ b/lib/onnx/dune @@ -0,0 +1,14 @@ +(library + (name onnx) + (public_name onnx) + (libraries base stdio piqirun.pb ocaml-protoc-plugin) + (synopsis "ONNX parser")) +(rule + (targets onnx_protoc.ml) + (action + (run ./generate_onnx_interface.sh) + ) + (deps + onnx_protoc.proto + generate_onnx_interface.sh) + ) diff --git a/lib/onnx/generate_onnx_interface.sh b/lib/onnx/generate_onnx_interface.sh new file mode 100755 index 0000000000000000000000000000000000000000..5ba8d64d663c92f7e2e1026f4f8065a087777bc8 --- /dev/null +++ b/lib/onnx/generate_onnx_interface.sh @@ -0,0 +1,2 @@ +#!/bin/sh +protoc --ocaml_out=. onnx_protoc.proto diff --git a/lib/onnx/onnx.ml b/lib/onnx/onnx.ml new file mode 100644 index 0000000000000000000000000000000000000000..9e61f4cac120e01f0cf0c8a22587ad84b2f5401f --- /dev/null +++ b/lib/onnx/onnx.ml @@ -0,0 +1,70 @@ +(**************************************************************************) +(* *) +(* This file is part of CAISAR. *) +(* *) +(**************************************************************************) + +open Base +module Format = Caml.Format +module Fun = Caml.Fun +module Oproto = Onnx_protoc (* Autogenerated during compilation *) + +module Oprotom = Oproto.Onnx.ModelProto + +type t = { + n_inputs : int; (* Number of inputs. *) + n_outputs : int; (* Number of outputs. *) +} + +(* ONNX format handling. *) + +let get_nested_dims (s : Oproto.Onnx.ValueInfoProto.t list) = + match List.nth s 0 with + | Some { type' = Some { value = `Tensor_type { shape = Some v; _ }; _ }; _ } + -> + v + | _ -> [] + +let flattened_dim (dim : Oproto.Onnx.TensorShapeProto.Dimension.t list) = + List.fold ~init:1 dim ~f:(fun acc x -> + match x.value with + | `Dim_value v -> acc * v + | `Dim_param _ -> acc + | `not_set -> acc) + +let get_input_output_dim (model : Oprotom.t) = + let ins, outs = + match model.graph with + | Some g -> (Some g.input, Some g.output) + | None -> (None, None) + in + let input_shape, output_shape = + match (ins, outs) with + | Some i, Some o -> (get_nested_dims i, get_nested_dims o) + | _ -> ([], []) + in + (* TODO: here we only get the flattened dimension of inputs and outputs, but + more interesting parsing could be done later on. *) + let input_flat_dim = flattened_dim input_shape in + let output_flat_dim = flattened_dim output_shape in + (input_flat_dim, output_flat_dim) + +let parse_in_channel in_channel = + let open Result in + try + let buf = Stdio.In_channel.input_all in_channel in + let reader = Ocaml_protoc_plugin.Reader.create buf in + match Oprotom.from_proto reader with + | Ok r -> + let n_inputs, n_outputs = get_input_output_dim r in + Ok { n_inputs; n_outputs } + | _ -> Error "Error parsing protobuf" + with + | Sys_error s -> Error s + | Failure msg -> Error (Format.sprintf "Unexpected error: %s." msg) + +let parse filename = + let in_channel = Stdlib.open_in filename in + Fun.protect + ~finally:(fun () -> Stdlib.close_in in_channel) + (fun () -> parse_in_channel in_channel) diff --git a/lib/onnx/onnx.mli b/lib/onnx/onnx.mli new file mode 100644 index 0000000000000000000000000000000000000000..b017810b076de7a704598079545f57d161a5344e --- /dev/null +++ b/lib/onnx/onnx.mli @@ -0,0 +1,14 @@ +(**************************************************************************) +(* *) +(* This file is part of CAISAR. *) +(* *) +(**************************************************************************) + +type t = private { + n_inputs : int; (** Number of inputs. *) + n_outputs : int; (** Number of outputs. *) +} +(** ONNX model metadata. *) + +val parse : string -> (t, string) Result.t +(** Parse an ONNX file. *) diff --git a/lib/onnx/onnx_protoc.proto b/lib/onnx/onnx_protoc.proto new file mode 100644 index 0000000000000000000000000000000000000000..8a11511842b7c85876e10a099da2ca103d825db0 --- /dev/null +++ b/lib/onnx/onnx_protoc.proto @@ -0,0 +1,817 @@ +// +// WARNING: This file is automatically generated! Please edit onnx.in.proto. +// + + +// SPDX-License-Identifier: Apache-2.0 + + +syntax = "proto2"; + +package onnx; + +// Overview +// +// ONNX is an open specification that is comprised of the following components: +// +// 1) A definition of an extensible computation graph model. +// 2) Definitions of standard data types. +// 3) Definitions of built-in operators. +// +// This document describes the syntax of models and their computation graphs, +// as well as the standard data types. Together, they are referred to as the ONNX +// Intermediate Representation, or 'IR' for short. +// +// The normative semantic specification of the ONNX IR is found in docs/IR.md. +// Definitions of the built-in neural network operators may be found in docs/Operators.md. + +// Notes +// +// Protobuf compatibility +// +// To simplify framework compatibility, ONNX is defined using the subset of protobuf +// that is compatible with both protobuf v2 and v3. This means that we do not use any +// protobuf features that are only available in one of the two versions. +// +// Here are the most notable contortions we have to carry out to work around +// these limitations: +// +// - No 'map' (added protobuf 3.0). We instead represent mappings as lists +// of key-value pairs, where order does not matter and duplicates +// are not allowed. + + +// Versioning +// +// ONNX versioning is specified in docs/IR.md and elaborated on in docs/Versioning.md +// +// To be compatible with both proto2 and proto3, we will use a version number +// that is not defined by the default value but an explicit enum number. +enum Version { + // proto3 requires the first enum value to be zero. + // We add this just to appease the compiler. + _START_VERSION = 0; + // The version field is always serialized and we will use it to store the + // version that the graph is generated from. This helps us set up version + // control. + // For the IR, we are using simple numbers starting with 0x00000001, + // which was the version we published on Oct 10, 2017. + IR_VERSION_2017_10_10 = 0x0000000000000001; + + // IR_VERSION 2 published on Oct 30, 2017 + // - Added type discriminator to AttributeProto to support proto3 users + IR_VERSION_2017_10_30 = 0x0000000000000002; + + // IR VERSION 3 published on Nov 3, 2017 + // - For operator versioning: + // - Added new message OperatorSetIdProto + // - Added opset_import in ModelProto + // - For vendor extensions, added domain in NodeProto + IR_VERSION_2017_11_3 = 0x0000000000000003; + + // IR VERSION 4 published on Jan 22, 2019 + // - Relax constraint that initializers should be a subset of graph inputs + // - Add type BFLOAT16 + IR_VERSION_2019_1_22 = 0x0000000000000004; + + // IR VERSION 5 published on March 18, 2019 + // - Add message TensorAnnotation. + // - Add quantization annotation in GraphProto to map tensor with its scale and zero point quantization parameters. + IR_VERSION_2019_3_18 = 0x0000000000000005; + + // IR VERSION 6 published on Sep 19, 2019 + // - Add support for sparse tensor constants stored in model. + // - Add message SparseTensorProto + // - Add sparse initializers + IR_VERSION_2019_9_19 = 0x0000000000000006; + + // IR VERSION 7 published on May 8, 2020 + // - Add support to allow function body graph to rely on multiple external opreator sets. + // - Add a list to promote inference graph's initializers to global and + // mutable variables. Global variables are visible in all graphs of the + // stored models. + // - Add message TrainingInfoProto to store initialization + // method and training algorithm. The execution of TrainingInfoProto + // can modify the values of mutable variables. + // - Implicitly add inference graph into each TrainingInfoProto's algorithm. + IR_VERSION_2020_5_8 = 0x0000000000000007; + + // IR VERSION 8 published on <TBD> + // Introduce TypeProto.SparseTensor + // Introduce TypeProto.Optional + // Added a list of FunctionProtos local to the model + // Deprecated since_version and operator status from FunctionProto + IR_VERSION = 0x0000000000000008; + +} + +// Attributes +// +// A named attribute containing either singular float, integer, string, graph, +// and tensor values, or repeated float, integer, string, graph, and tensor values. +// An AttributeProto MUST contain the name field, and *only one* of the +// following content fields, effectively enforcing a C/C++ union equivalent. +message AttributeProto { + + // Note: this enum is structurally identical to the OpSchema::AttrType + // enum defined in schema.h. If you rev one, you likely need to rev the other. + enum AttributeType { + UNDEFINED = 0; + FLOAT = 1; + INT = 2; + STRING = 3; + TENSOR = 4; + GRAPH = 5; + SPARSE_TENSOR = 11; + TYPE_PROTO = 13; + + FLOATS = 6; + INTS = 7; + STRINGS = 8; + TENSORS = 9; + GRAPHS = 10; + SPARSE_TENSORS = 12; + TYPE_PROTOS = 14; + } + + // The name field MUST be present for this version of the IR. + optional string name = 1; // namespace Attribute + + // if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function. + // In this case, this AttributeProto does not contain data, and it's a reference of attribute + // in parent scope. + // NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph. + optional string ref_attr_name = 21; + + // A human-readable documentation for this attribute. Markdown is allowed. + optional string doc_string = 13; + + // The type field MUST be present for this version of the IR. + // For 0.0.1 versions of the IR, this field was not defined, and + // implementations needed to use has_field heuristics to determine + // which value field was in use. For IR_VERSION 0.0.2 or later, this + // field MUST be set and match the f|i|s|t|... field in use. This + // change was made to accommodate proto3 implementations. + optional AttributeType type = 20; // discriminator that indicates which field below is in use + + // Exactly ONE of the following fields must be present for this version of the IR + optional float f = 2; // float + optional int64 i = 3; // int + optional bytes s = 4; // UTF-8 string + optional TensorProto t = 5; // tensor value + optional GraphProto g = 6; // graph + optional SparseTensorProto sparse_tensor = 22; // sparse tensor value + // Do not use field below, it's deprecated. + // optional ValueProto v = 12; // value - subsumes everything but graph + optional TypeProto tp = 14; // type proto + + repeated float floats = 7; // list of floats + repeated int64 ints = 8; // list of ints + repeated bytes strings = 9; // list of UTF-8 strings + repeated TensorProto tensors = 10; // list of tensors + repeated GraphProto graphs = 11; // list of graph + repeated SparseTensorProto sparse_tensors = 23; // list of sparse tensors + repeated TypeProto type_protos = 15;// list of type protos +} + +// Defines information on value, including the name, the type, and +// the shape of the value. +message ValueInfoProto { + // This field MUST be present in this version of the IR. + optional string name = 1; // namespace Value + // This field MUST be present in this version of the IR for + // inputs and outputs of the top-level graph. + optional TypeProto type = 2; + // A human-readable documentation for this value. Markdown is allowed. + optional string doc_string = 3; +} + +// Nodes +// +// Computation graphs are made up of a DAG of nodes, which represent what is +// commonly called a "layer" or "pipeline stage" in machine learning frameworks. +// +// For example, it can be a node of type "Conv" that takes in an image, a filter +// tensor and a bias tensor, and produces the convolved output. +message NodeProto { + repeated string input = 1; // namespace Value + repeated string output = 2; // namespace Value + + // An optional identifier for this node in a graph. + // This field MAY be absent in ths version of the IR. + optional string name = 3; // namespace Node + + // The symbolic identifier of the Operator to execute. + optional string op_type = 4; // namespace Operator + // The domain of the OperatorSet that specifies the operator named by op_type. + optional string domain = 7; // namespace Domain + + // Additional named attributes. + repeated AttributeProto attribute = 5; + + // A human-readable documentation for this node. Markdown is allowed. + optional string doc_string = 6; +} + +// Training information +// TrainingInfoProto stores information for training a model. +// In particular, this defines two functionalities: an initialization-step +// and a training-algorithm-step. Initialization resets the model +// back to its original state as if no training has been performed. +// Training algorithm improves the model based on input data. +// +// The semantics of the initialization-step is that the initializers +// in ModelProto.graph and in TrainingInfoProto.algorithm are first +// initialized as specified by the initializers in the graph, and then +// updated by the "initialization_binding" in every instance in +// ModelProto.training_info. +// +// The field "algorithm" defines a computation graph which represents a +// training algorithm's step. After the execution of a +// TrainingInfoProto.algorithm, the initializers specified by "update_binding" +// may be immediately updated. If the targeted training algorithm contains +// consecutive update steps (such as block coordinate descent methods), +// the user needs to create a TrainingInfoProto for each step. +message TrainingInfoProto { + // This field describes a graph to compute the initial tensors + // upon starting the training process. Initialization graph has no input + // and can have multiple outputs. Usually, trainable tensors in neural + // networks are randomly initialized. To achieve that, for each tensor, + // the user can put a random number operator such as RandomNormal or + // RandomUniform in TrainingInfoProto.initialization.node and assign its + // random output to the specific tensor using "initialization_binding". + // This graph can also set the initializers in "algorithm" in the same + // TrainingInfoProto; a use case is resetting the number of training + // iteration to zero. + // + // By default, this field is an empty graph and its evaluation does not + // produce any output. Thus, no initializer would be changed by default. + optional GraphProto initialization = 1; + + // This field represents a training algorithm step. Given required inputs, + // it computes outputs to update initializers in its own or inference graph's + // initializer lists. In general, this field contains loss node, gradient node, + // optimizer node, increment of iteration count. + // + // An execution of the training algorithm step is performed by executing the + // graph obtained by combining the inference graph (namely "ModelProto.graph") + // and the "algorithm" graph. That is, the actual the actual + // input/initializer/output/node/value_info/sparse_initializer list of + // the training graph is the concatenation of + // "ModelProto.graph.input/initializer/output/node/value_info/sparse_initializer" + // and "algorithm.input/initializer/output/node/value_info/sparse_initializer" + // in that order. This combined graph must satisfy the normal ONNX conditions. + // Now, let's provide a visualization of graph combination for clarity. + // Let the inference graph (i.e., "ModelProto.graph") be + // tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d + // and the "algorithm" graph be + // tensor_d -> Add -> tensor_e + // The combination process results + // tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d -> Add -> tensor_e + // + // Notice that an input of a node in the "algorithm" graph may reference the + // output of a node in the inference graph (but not the other way round). Also, inference + // node cannot reference inputs of "algorithm". With these restrictions, inference graph + // can always be run independently without training information. + // + // By default, this field is an empty graph and its evaluation does not + // produce any output. Evaluating the default training step never + // update any initializers. + optional GraphProto algorithm = 2; + + // This field specifies the bindings from the outputs of "initialization" to + // some initializers in "ModelProto.graph.initializer" and + // the "algorithm.initializer" in the same TrainingInfoProto. + // See "update_binding" below for details. + // + // By default, this field is empty and no initializer would be changed + // by the execution of "initialization". + repeated StringStringEntryProto initialization_binding = 3; + + // Gradient-based training is usually an iterative procedure. In one gradient + // descent iteration, we apply + // + // x = x - r * g + // + // where "x" is the optimized tensor, "r" stands for learning rate, and "g" is + // gradient of "x" with respect to a chosen loss. To avoid adding assignments + // into the training graph, we split the update equation into + // + // y = x - r * g + // x = y + // + // The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To + // tell that "y" should be assigned to "x", the field "update_binding" may + // contain a key-value pair of strings, "x" (key of StringStringEntryProto) + // and "y" (value of StringStringEntryProto). + // For a neural network with multiple trainable (mutable) tensors, there can + // be multiple key-value pairs in "update_binding". + // + // The initializers appears as keys in "update_binding" are considered + // mutable variables. This implies some behaviors + // as described below. + // + // 1. We have only unique keys in all "update_binding"s so that two + // variables may not have the same name. This ensures that one + // variable is assigned up to once. + // 2. The keys must appear in names of "ModelProto.graph.initializer" or + // "TrainingInfoProto.algorithm.initializer". + // 3. The values must be output names of "algorithm" or "ModelProto.graph.output". + // 4. Mutable variables are initialized to the value specified by the + // corresponding initializer, and then potentially updated by + // "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s. + // + // This field usually contains names of trainable tensors + // (in ModelProto.graph), optimizer states such as momentums in advanced + // stochastic gradient methods (in TrainingInfoProto.graph), + // and number of training iterations (in TrainingInfoProto.graph). + // + // By default, this field is empty and no initializer would be changed + // by the execution of "algorithm". + repeated StringStringEntryProto update_binding = 4; +} + +// Models +// +// ModelProto is a top-level file/container format for bundling a ML model and +// associating its computation graph with metadata. +// +// The semantics of the model are described by the associated GraphProto's. +message ModelProto { + // The version of the IR this model targets. See Version enum above. + // This field MUST be present. + optional int64 ir_version = 1; + + // The OperatorSets this model relies on. + // All ModelProtos MUST have at least one entry that + // specifies which version of the ONNX OperatorSet is + // being imported. + // + // All nodes in the ModelProto's graph will bind against the operator + // with the same-domain/same-op_type operator with the HIGHEST version + // in the referenced operator sets. + repeated OperatorSetIdProto opset_import = 8; + + // The name of the framework or tool used to generate this model. + // This field SHOULD be present to indicate which implementation/tool/framework + // emitted the model. + optional string producer_name = 2; + + // The version of the framework or tool used to generate this model. + // This field SHOULD be present to indicate which implementation/tool/framework + // emitted the model. + optional string producer_version = 3; + + // Domain name of the model. + // We use reverse domain names as name space indicators. For example: + // `com.facebook.fair` or `com.microsoft.cognitiveservices` + // + // Together with `model_version` and GraphProto.name, this forms the unique identity of + // the graph. + optional string domain = 4; + + // The version of the graph encoded. See Version enum below. + optional int64 model_version = 5; + + // A human-readable documentation for this model. Markdown is allowed. + optional string doc_string = 6; + + // The parameterized graph that is evaluated to execute the model. + optional GraphProto graph = 7; + + // Named metadata values; keys should be distinct. + repeated StringStringEntryProto metadata_props = 14; + + // Training-specific information. Sequentially executing all stored + // `TrainingInfoProto.algorithm`s and assigning their outputs following + // the corresponding `TrainingInfoProto.update_binding`s is one training + // iteration. Similarly, to initialize the model + // (as if training hasn't happened), the user should sequentially execute + // all stored `TrainingInfoProto.initialization`s and assigns their outputs + // using `TrainingInfoProto.initialization_binding`s. + // + // If this field is empty, the training behavior of the model is undefined. + repeated TrainingInfoProto training_info = 20; + + // A list of function protos local to the model. + // + // Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain". + // In case of any conflicts the behavior (whether the model local functions are given higher priority, + // or standard opserator sets are given higher priotity or this is treated as error) is defined by + // the runtimes. + // + // The operator sets imported by FunctionProto should be compatible with the ones + // imported by ModelProto and other model local FunctionProtos. + // Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto + // or by 2 FunctionProtos then versions for the operator set may be different but, + // the operator schema returned for op_type, domain, version combination + // for both the versions should be same for every node in the function body. + // + // One FunctionProto can reference other FunctionProto in the model, however, recursive reference + // is not allowed. + repeated FunctionProto functions = 25; +}; + +// StringStringEntryProto follows the pattern for cross-proto-version maps. +// See https://developers.google.com/protocol-buffers/docs/proto3#maps +message StringStringEntryProto { + optional string key = 1; + optional string value= 2; +}; + +message TensorAnnotation { + optional string tensor_name = 1; + // <key, value> pairs to annotate tensor specified by <tensor_name> above. + // The keys used in the mapping below must be pre-defined in ONNX spec. + // For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as + // quantization parameter keys. + repeated StringStringEntryProto quant_parameter_tensor_names = 2; +} + + + +// Graphs +// +// A graph defines the computational logic of a model and is comprised of a parameterized +// list of nodes that form a directed acyclic graph based on their inputs and outputs. +// This is the equivalent of the "network" or "graph" in many deep learning +// frameworks. +message GraphProto { + // The nodes in the graph, sorted topologically. + repeated NodeProto node = 1; + + // The name of the graph. + optional string name = 2; // namespace Graph + + // A list of named tensor values, used to specify constant inputs of the graph. + // Each initializer (both TensorProto as well SparseTensorProto) MUST have a name. + // The name MUST be unique across both initializer and sparse_initializer, + // but the name MAY also appear in the input list. + repeated TensorProto initializer = 5; + + // Initializers (see above) stored in sparse format. + repeated SparseTensorProto sparse_initializer = 15; + + // A human-readable documentation for this graph. Markdown is allowed. + optional string doc_string = 10; + + // The inputs and outputs of the graph. + repeated ValueInfoProto input = 11; + repeated ValueInfoProto output = 12; + + // Information for the values in the graph. The ValueInfoProto.name's + // must be distinct. It is optional for a value to appear in value_info list. + repeated ValueInfoProto value_info = 13; + + // This field carries information to indicate the mapping among a tensor and its + // quantization parameter tensors. For example: + // For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated, + // which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model. + repeated TensorAnnotation quantization_annotation = 14; + + // DO NOT USE the following fields, they were deprecated from earlier versions. + // repeated string input = 3; + // repeated string output = 4; + // optional int64 ir_version = 6; + // optional int64 producer_version = 7; + // optional string producer_tag = 8; + // optional string domain = 9; +} + +// Tensors +// +// A serialized tensor value. +message TensorProto { + enum DataType { + UNDEFINED = 0; + // Basic types. + FLOAT = 1; // float + UINT8 = 2; // uint8_t + INT8 = 3; // int8_t + UINT16 = 4; // uint16_t + INT16 = 5; // int16_t + INT32 = 6; // int32_t + INT64 = 7; // int64_t + STRING = 8; // string + BOOL = 9; // bool + + // IEEE754 half-precision floating-point format (16 bits wide). + // This format has 1 sign bit, 5 exponent bits, and 10 mantissa bits. + FLOAT16 = 10; + + DOUBLE = 11; + UINT32 = 12; + UINT64 = 13; + COMPLEX64 = 14; // complex with float32 real and imaginary components + COMPLEX128 = 15; // complex with float64 real and imaginary components + + // Non-IEEE floating-point format based on IEEE754 single-precision + // floating-point number truncated to 16 bits. + // This format has 1 sign bit, 8 exponent bits, and 7 mantissa bits. + BFLOAT16 = 16; + + // Future extensions go here. + } + + // The shape of the tensor. + repeated int64 dims = 1; + + // The data type of the tensor. + // This field MUST have a valid TensorProto.DataType value + optional int32 data_type = 2; + + // For very large tensors, we may want to store them in chunks, in which + // case the following fields will specify the segment that is stored in + // the current TensorProto. + message Segment { + optional int64 begin = 1; + optional int64 end = 2; + } + optional Segment segment = 3; + + // Tensor content must be organized in row-major order. + // + // Depending on the data_type field, exactly one of the fields below with + // name ending in _data is used to store the elements of the tensor. + + // For float and complex64 values + // Complex64 tensors are encoded as a single array of floats, + // with the real components appearing in odd numbered positions, + // and the corresponding imaginary component appearing in the + // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] + // is encoded as [1.0, 2.0 ,3.0 ,4.0] + // When this field is present, the data_type field MUST be FLOAT or COMPLEX64. + repeated float float_data = 4 [packed = true]; + + // For int32, uint8, int8, uint16, int16, bool, and float16 values + // float16 values must be bit-wise converted to an uint16_t prior + // to writing to the buffer. + // When this field is present, the data_type field MUST be + // INT32, INT16, INT8, UINT16, UINT8, BOOL, or FLOAT16 + repeated int32 int32_data = 5 [packed = true]; + + // For strings. + // Each element of string_data is a UTF-8 encoded Unicode + // string. No trailing null, no leading BOM. The protobuf "string" + // scalar type is not used to match ML community conventions. + // When this field is present, the data_type field MUST be STRING + repeated bytes string_data = 6; + + // For int64. + // When this field is present, the data_type field MUST be INT64 + repeated int64 int64_data = 7 [packed = true]; + + // Optionally, a name for the tensor. + optional string name = 8; // namespace Value + + // A human-readable documentation for this tensor. Markdown is allowed. + optional string doc_string = 12; + + // Serializations can either use one of the fields above, or use this + // raw bytes field. The only exception is the string case, where one is + // required to store the content in the repeated bytes string_data field. + // + // When this raw_data field is used to store tensor value, elements MUST + // be stored in as fixed-width, little-endian order. + // Floating-point data types MUST be stored in IEEE 754 format. + // Complex64 elements must be written as two consecutive FLOAT values, real component first. + // Complex128 elements must be written as two consecutive DOUBLE values, real component first. + // Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false). + // + // Note: the advantage of specific field rather than the raw_data field is + // that in some cases (e.g. int data), protobuf does a better packing via + // variable length storage, and may lead to smaller binary footprint. + // When this field is present, the data_type field MUST NOT be STRING or UNDEFINED + optional bytes raw_data = 9; + + // Data can be stored inside the protobuf file using type-specific fields or raw_data. + // Alternatively, raw bytes data can be stored in an external file, using the external_data field. + // external_data stores key-value pairs describing data location. Recognized keys are: + // - "location" (required) - POSIX filesystem path relative to the directory where the ONNX + // protobuf model was stored + // - "offset" (optional) - position of byte at which stored data begins. Integer stored as string. + // Offset values SHOULD be multiples 4096 (page size) to enable mmap support. + // - "length" (optional) - number of bytes containing data. Integer stored as string. + // - "checksum" (optional) - SHA1 digest of file specified in under 'location' key. + repeated StringStringEntryProto external_data = 13; + + // Location of the data for this tensor. MUST be one of: + // - DEFAULT - data stored inside the protobuf message. Data is stored in raw_data (if set) otherwise in type-specified field. + // - EXTERNAL - data stored in an external location as described by external_data field. + enum DataLocation { + DEFAULT = 0; + EXTERNAL = 1; + } + + // If value not set, data is stored in raw_data (if set) otherwise in type-specified field. + optional DataLocation data_location = 14; + + // For double + // Complex128 tensors are encoded as a single array of doubles, + // with the real components appearing in odd numbered positions, + // and the corresponding imaginary component appearing in the + // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] + // is encoded as [1.0, 2.0 ,3.0 ,4.0] + // When this field is present, the data_type field MUST be DOUBLE or COMPLEX128 + repeated double double_data = 10 [packed = true]; + + // For uint64 and uint32 values + // When this field is present, the data_type field MUST be + // UINT32 or UINT64 + repeated uint64 uint64_data = 11 [packed = true]; +} + +// A serialized sparse-tensor value +message SparseTensorProto { + // The sequence of non-default values are encoded as a tensor of shape [NNZ]. + // The default-value is zero for numeric tensors, and empty-string for string tensors. + // values must have a non-empty name present which serves as a name for SparseTensorProto + // when used in sparse_initializer list. + optional TensorProto values = 1; + + // The indices of the non-default values, which may be stored in one of two formats. + // (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value + // corresponding to the j-th index of the i-th value (in the values tensor). + // (b) Indices can be a tensor of shape [NNZ], in which case the i-th value + // must be the linearized-index of the i-th value (in the values tensor). + // The linearized-index can be converted into an index tuple (k_1,...,k_rank) + // using the shape provided below. + // The indices must appear in ascending order without duplication. + // In the first format, the ordering is lexicographic-ordering: + // e.g., index-value [1,4] must appear before [2,1] + optional TensorProto indices = 2; + + // The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank] + repeated int64 dims = 3; +} + +// Defines a tensor shape. A dimension can be either an integer value +// or a symbolic variable. A symbolic variable represents an unknown +// dimension. +message TensorShapeProto { + message Dimension { + oneof value { + int64 dim_value = 1; + string dim_param = 2; // namespace Shape + }; + // Standard denotation can optionally be used to denote tensor + // dimensions with standard semantic descriptions to ensure + // that operations are applied to the correct axis of a tensor. + // Refer to https://github.com/onnx/onnx/blob/master/docs/DimensionDenotation.md#denotation-definition + // for pre-defined dimension denotations. + optional string denotation = 3; + }; + repeated Dimension dim = 1; +} + +// Types +// +// The standard ONNX data types. +message TypeProto { + + message Tensor { + // This field MUST NOT have the value of UNDEFINED + // This field MUST have a valid TensorProto.DataType value + // This field MUST be present for this version of the IR. + optional int32 elem_type = 1; + optional TensorShapeProto shape = 2; + } + + // repeated T + message Sequence { + // The type and optional shape of each element of the sequence. + // This field MUST be present for this version of the IR. + optional TypeProto elem_type = 1; + }; + + // map<K,V> + message Map { + // This field MUST have a valid TensorProto.DataType value + // This field MUST be present for this version of the IR. + // This field MUST refer to an integral type ([U]INT{8|16|32|64}) or STRING + optional int32 key_type = 1; + // This field MUST be present for this version of the IR. + optional TypeProto value_type = 2; + }; + + // wrapper for Tensor, Sequence, or Map + message Optional { + // The type and optional shape of the element wrapped. + // This field MUST be present for this version of the IR. + // Possible values correspond to OptionalProto.DataType enum + optional TypeProto elem_type = 1; + }; + + + message SparseTensor { + // This field MUST NOT have the value of UNDEFINED + // This field MUST have a valid TensorProto.DataType value + // This field MUST be present for this version of the IR. + optional int32 elem_type = 1; + optional TensorShapeProto shape = 2; + } + + + oneof value { + // The type of a tensor. + Tensor tensor_type = 1; + + // NOTE: DNN-only implementations of ONNX MAY elect to not support non-tensor values + // as input and output to graphs and nodes. These types are needed to naturally + // support classical ML operators. DNN operators SHOULD restrict their input + // and output types to tensors. + + // The type of a sequence. + Sequence sequence_type = 4; + + // The type of a map. + Map map_type = 5; + + // The type of an optional. + Optional optional_type = 9; + + + // Type of the sparse tensor + SparseTensor sparse_tensor_type = 8; + + } + + // An optional denotation can be used to denote the whole + // type with a standard semantic description as to what is + // stored inside. Refer to https://github.com/onnx/onnx/blob/master/docs/TypeDenotation.md#type-denotation-definition + // for pre-defined type denotations. + optional string denotation = 6; +} + +// Operator Sets +// +// OperatorSets are uniquely identified by a (domain, opset_version) pair. +message OperatorSetIdProto { + // The domain of the operator set being identified. + // The empty string ("") or absence of this field implies the operator + // set that is defined as part of the ONNX specification. + // This field MUST be present in this version of the IR when referring to any other operator set. + optional string domain = 1; + + // The version of the operator set being identified. + // This field MUST be present in this version of the IR. + optional int64 version = 2; +} + +// Operator/function status. +enum OperatorStatus { + EXPERIMENTAL = 0; + STABLE = 1; +} + +message FunctionProto { + // The name of the function, similar usage of op_type in OperatorProto. + // Combined with FunctionProto.domain, this forms the unique identity of + // the FunctionProto. + optional string name = 1; + + // Deprecated since IR Version 8 + // optional int64 since_version = 2; + reserved 2; + reserved "since_version"; + + // Deprecated since IR Version 8 + // optional OperatorStatus status = 3; + reserved 3; + reserved "status"; + + // The inputs and outputs of the function. + repeated string input = 4; + repeated string output = 5; + + // The attributes of the function. + repeated string attribute = 6; + + // The nodes in the function. + repeated NodeProto node = 7; + // A human-readable documentation for this function. Markdown is allowed. + optional string doc_string = 8; + + // The OperatorSets this function body (graph) relies on. + // + // All nodes in the function body (graph) will bind against the operator + // with the same-domain/same-op_type operator with the HIGHEST version + // in the referenced operator sets. This means at most one version can be relied + // for one domain. + // + // The operator sets imported by FunctionProto should be compatible with the ones + // imported by ModelProto. Example, if same operator set say 'A' is imported by FunctionProto + // and ModelProto then versions for the operator set may be different but, + // the operator schema returned for op_type, domain, version combination + // for both the versions should be same. + + repeated OperatorSetIdProto opset_import = 9; + + // The domain which this function belongs to. Combined with FunctionProto.name, this forms the unique identity of + // the FunctionProto. + optional string domain = 10; +} + + +// For using protobuf-lite +option optimize_for = LITE_RUNTIME; diff --git a/nnet.opam b/nnet.opam index 4871796a4c568e4a3339f8a81b43c26d01c74ccd..aa2ffd8b08b83e5d09134e0904ee1293c9d9959a 100644 --- a/nnet.opam +++ b/nnet.opam @@ -4,12 +4,12 @@ version: "0.1" synopsis: "NNet parser" depends: [ "ocaml" {>= "4.10"} - "dune" {>= "2.9" & >= "2.7.1"} + "dune" {>= "2.9" & >= "2.9.1"} "base" {>= "v0.14.0"} "odoc" {with-doc} ] build: [ - ["dune" "subst" "--root" "."] {dev} + ["dune" "subst"] {dev} [ "dune" "build" @@ -17,8 +17,7 @@ build: [ name "-j" jobs - "--promote-install-files" - "false" + "--promote-install-files=false" "@install" "@runtest" {with-test} "@doc" {with-doc} diff --git a/onnx.opam b/onnx.opam new file mode 100644 index 0000000000000000000000000000000000000000..1440f206e7b77aecba8249d0490dd381388aa5ce --- /dev/null +++ b/onnx.opam @@ -0,0 +1,27 @@ +# This file is generated by dune, edit dune-project instead +opam-version: "2.0" +version: "0.1" +synopsis: "ONNX parser" +depends: [ + "ocaml" {>= "4.10"} + "dune" {>= "2.9" & >= "2.9.1"} + "base" {>= "v0.14.0"} + "ocaml-protoc-plugin" {= "4.2.0"} + "odoc" {with-doc} +] +build: [ + ["dune" "subst"] {dev} + [ + "dune" + "build" + "-p" + name + "-j" + jobs + "--promote-install-files=false" + "@install" + "@runtest" {with-test} + "@doc" {with-doc} + ] + ["dune" "install" "-p" name "--create-install-files" name] +] diff --git a/src/dune b/src/dune index b86ccd0b11f92429c615a660bdede99612737f67..4531d323730a27318fc5b3317e7cb7253ef892c9 100644 --- a/src/dune +++ b/src/dune @@ -1,7 +1,7 @@ (executable (name main) (public_name caisar) - (libraries menhirLib yojson cmdliner logs logs.cli logs.fmt fmt.tty base unix str ppx_deriving_yojson.runtime nnet why3 dune-site re) + (libraries menhirLib yojson cmdliner logs logs.cli logs.fmt fmt.tty base unix str ppx_deriving_yojson.runtime nnet onnx why3 dune-site re) (preprocess (pps ppx_deriving_yojson ppx_deriving.show ppx_deriving.ord ppx_deriving.eq)) (package caisar) ) diff --git a/src/language.ml b/src/language.ml index 34b21be09adb4860299d9b9d81a7ea55d7dbe409..ed602c42d9b462d96b3b1f623e3705cf910cc4cb 100644 --- a/src/language.ml +++ b/src/language.ml @@ -6,55 +6,63 @@ open Base -(* -- Support for the NNet neural network format. *) +(* -- Support for the NNet and ONNX neural network formats. *) -type nnet = { +type ioshape = { nb_inputs : int; nb_outputs : int; ty_data : Why3.Ty.ty; filename : string; } -let loaded_nnets = Why3.Term.Hls.create 10 +let loaded_nets = Why3.Term.Hls.create 10 -let lookup_loaded_nnets = Why3.Term.Hls.find_opt loaded_nnets +let lookup_loaded_nets = Why3.Term.Hls.find_opt loaded_nets + +let register_astuple nb_inputs nb_outputs filename env = + let open Why3 in + let net = Pmodule.read_module env [ "caisar" ] "IOShape" in + let ioshape_input_type = + Ty.ty_app Theory.(ns_find_ts net.mod_theory.th_export [ "input_type" ]) [] + in + let id_as_tuple = Ident.id_fresh "AsTuple" in + let th_uc = Pmodule.create_module env id_as_tuple in + let th_uc = Pmodule.use_export th_uc net in + let ls_net_apply = + let f _ = ioshape_input_type in + Term.create_fsymbol + (Ident.id_fresh "net_apply") + (List.init nb_inputs ~f) + (Ty.ty_tuple (List.init nb_outputs ~f)) + in + Why3.Term.Hls.add loaded_nets ls_net_apply + { filename; nb_inputs; nb_outputs; ty_data = ioshape_input_type }; + let th_uc = + Pmodule.add_pdecl ~vc:false th_uc + (Pdecl.create_pure_decl (Decl.create_param_decl ls_net_apply)) + in + Wstdlib.Mstr.singleton "AsTuple" (Pmodule.close_module th_uc) let nnet_parser env _ filename _ = let open Why3 in - let header = Nnet.parse filename in - match header with + let model = Nnet.parse filename in + match model with | Error s -> Loc.errorm "%s" s - | Ok header -> - let nnet = Pmodule.read_module env [ "caisar" ] "NNet" in - let nnet_input_type = - Ty.ty_app - Theory.(ns_find_ts nnet.mod_theory.th_export [ "input_type" ]) - [] - in - let id_as_tuple = Ident.id_fresh "AsTuple" in - let th_uc = Pmodule.create_module env id_as_tuple in - let th_uc = Pmodule.use_export th_uc nnet in - let ls_nnet_apply = - let f _ = nnet_input_type in - Term.create_fsymbol - (Ident.id_fresh "nnet_apply") - (List.init header.n_inputs ~f) - (Ty.ty_tuple (List.init header.n_outputs ~f)) - in - Why3.Term.Hls.add loaded_nnets ls_nnet_apply - { - filename; - nb_inputs = header.n_inputs; - nb_outputs = header.n_outputs; - ty_data = nnet_input_type; - }; - let th_uc = - Pmodule.add_pdecl ~vc:false th_uc - (Pdecl.create_pure_decl (Decl.create_param_decl ls_nnet_apply)) - in - Wstdlib.Mstr.singleton "AsTuple" (Pmodule.close_module th_uc) + | Ok model -> register_astuple model.n_inputs model.n_outputs filename env + +let onnx_parser env _ filename _ = + let open Why3 in + let model = Onnx.parse filename in + match model with + | Error s -> Loc.errorm "%s" s + | Ok model -> register_astuple model.n_inputs model.n_outputs filename env let register_nnet_support () = Why3.( Env.register_format ~desc:"NNet format (ReLU only)" Pmodule.mlw_language "NNet" [ "nnet" ] nnet_parser) + +let register_onnx_support () = + Why3.( + Env.register_format ~desc:"ONNX format" Pmodule.mlw_language "ONNX" + [ "onnx" ] onnx_parser) diff --git a/src/language.mli b/src/language.mli index 71482631586333d265b079a0901af77bc8545369..f1d1fcabeff3011ebe2235a8ebac2dd91fbf3dd4 100644 --- a/src/language.mli +++ b/src/language.mli @@ -4,15 +4,18 @@ (* *) (**************************************************************************) -type nnet = { +type ioshape = { nb_inputs : int; nb_outputs : int; ty_data : Why3.Ty.ty; filename : string; } -val lookup_loaded_nnets : Why3.Term.lsymbol -> nnet option +val lookup_loaded_nets : Why3.Term.lsymbol -> ioshape option (** @return the filename of a nnet Why3 representation. *) val register_nnet_support : unit -> unit -(** Register nnet parser. *) +(** Register NNet parser. *) + +val register_onnx_support : unit -> unit +(** Register ONNX parser. *) diff --git a/src/transformations/native_nn_prover.ml b/src/transformations/native_nn_prover.ml index 95b89b07e1cf969070cf545f9c6d7a26af0ac56b..15e77170ec6e9c4b6ba86803076840f2c217bab4 100644 --- a/src/transformations/native_nn_prover.ml +++ b/src/transformations/native_nn_prover.ml @@ -30,7 +30,7 @@ let get_input_variables = let rec aux acc (term : Term.term) = match term.t_node with | Term.Tapp (ls, args) -> ( - match Language.lookup_loaded_nnets ls with + match Language.lookup_loaded_nets ls with | None -> acc | Some _ -> let add i acc = function @@ -52,7 +52,7 @@ let simplify_goal env input_variables = let rec aux meta hls (term : Term.term) = match term.t_node with | Term.Tapp (ls, _) -> ( - match Language.lookup_loaded_nnets ls with + match Language.lookup_loaded_nets ls with | None -> Term.t_map (aux meta hls) term | Some nnet -> meta := nnet.filename :: !meta; diff --git a/src/verification.ml b/src/verification.ml index b274a68e186bc5e0c4efb84e16e893dfdaf4c565..a0ed2f5a3de229708ffdb9591da41d32124da38d 100644 --- a/src/verification.ml +++ b/src/verification.ml @@ -8,6 +8,7 @@ open Base module Filename = Caml.Filename let () = Language.register_nnet_support () +let () = Language.register_onnx_support () let create_env loadpath = let config = Autodetection.autodetect ~debug:true () in diff --git a/stdlib/caisar.mlw b/stdlib/caisar.mlw index c29586772de968684847674cfe0347514ebc3204..9166a2a923317ff4bd192e7a27bba880dd625628 100644 --- a/stdlib/caisar.mlw +++ b/stdlib/caisar.mlw @@ -1,4 +1,4 @@ -theory NNet +theory IOShape use ieee_float.Float64 type input_type = t end diff --git a/tests/TestNetworkONNX.onnx b/tests/TestNetworkONNX.onnx new file mode 100644 index 0000000000000000000000000000000000000000..9eca2e7a9f30ee1469c220b4ea2c20ef38508553 --- /dev/null +++ b/tests/TestNetworkONNX.onnx @@ -0,0 +1,28 @@ +pytorch1.8:‚ +' +actual_input +126MatMul_0"MatMul + +6 +fc1.bias7Add_1"Add + +78Relu_2"Relu + +8 +1310MatMul_3"MatMul +) +10 +fc2.bias actual_outputAdd_4"Addtorch-jit-export*Bfc1.biasJE²>¿*Bfc2.biasJÂ3?£©¿*$B12Jc×¾ÏF>9µ +>Óv¯¾Ñ"¾9A¾*B13Jty¾¾Ô•ê>D##?aôj>Z& +actual_input + + + + +b' + actual_output + + + + +B \ No newline at end of file diff --git a/tests/dune b/tests/dune index eac4b0ca195dff598a99f13008ccbf1bacf24e70..d62110c4fc1329e713e41fe0b50289b81bd914a8 100644 --- a/tests/dune +++ b/tests/dune @@ -2,6 +2,7 @@ (deps (package caisar) TestNetwork.nnet + TestNetworkONNX.onnx bin/pyrat.py bin/Marabou )) diff --git a/tests/marabout.t b/tests/marabout.t index 16ae72e58d27f9a736685124d708b4e0cff2a60b..8cddc6c1a54399c1e564a06d4f2be55cc0a52f01 100644 --- a/tests/marabout.t +++ b/tests/marabout.t @@ -21,11 +21,11 @@ Test verify > theory T > use TestNetwork.AsTuple > use ieee_float.Float64 - > use caisar.NNet + > use caisar.IOShape > > goal G: forall x1 x2 x3 x4 x5. > (0.0:t) .< x1 .< (0.5:t) -> - > let (y1,_,_,_,_) = nnet_apply x1 x2 x3 x4 x5 in + > let (y1,_,_,_,_) = net_apply x1 x2 x3 x4 x5 in > (0.0:t) .< y1 .< (0.5:t) > > goal H: forall x1 x2 x3 x4 x5. diff --git a/tests/simple.t b/tests/simple.t index 14221315cd8cb35dc83f17a0f7eef5d982ac19a0..ccc1b93a8125046a303d7082379ea0dbd50139e1 100644 --- a/tests/simple.t +++ b/tests/simple.t @@ -21,16 +21,16 @@ Test verify > theory T > use TestNetwork.AsTuple > use ieee_float.Float64 - > use caisar.NNet + > use caisar.IOShape > > goal G: forall x1 x2 x3 x4 x5. > (0.0:t) .< x1 .< (0.5:t) -> - > let (y1,_,_,_,_) = nnet_apply x1 x2 x3 x4 x5 in + > let (y1,_,_,_,_) = net_apply x1 x2 x3 x4 x5 in > (0.0:t) .< y1 .< (0.5:t) > > goal H: forall x1 x2 x3 x4 x5. > (0.0:t) .< x1 .< (0.5:t) /\ (0.5:t) .< x2 .< (1.0:t) -> - > let (y1,y2,_,_,_) = nnet_apply x1 x2 x3 x4 x5 in + > let (y1,y2,_,_,_) = net_apply x1 x2 x3 x4 x5 in > ((0.0:t) .< y1 \/ (0.5:t) .< y1) /\ (0.0:t) .< y2 .< (0.5:t) > end > EOF diff --git a/tests/simple_onnx.t b/tests/simple_onnx.t new file mode 100644 index 0000000000000000000000000000000000000000..6a35e67c6761bd7281ea2d0c7c635b47f6417fbd --- /dev/null +++ b/tests/simple_onnx.t @@ -0,0 +1,51 @@ +Test verify + $ cat - > bin/alt-ergo << EOF + > #!/bin/sh + > echo "2.4.0" + > EOF + + $ chmod u+x bin/alt-ergo bin/pyrat.py bin/Marabou + + $ bin/alt-ergo + 2.4.0 + + $ bin/pyrat.py --version + PyRAT 1.1 + + $ bin/Marabou --version + 1.0.+ + + $ PATH=$(pwd)/bin:$PATH + + $ caisar verify -L . --format whyml --prover=PyRAT - 2>&1 <<EOF | sed 's/\/tmp\/[a-z0-9./]*/$TMPFILE/' + > theory T + > use TestNetworkONNX.AsTuple + > use ieee_float.Float64 + > use caisar.IOShape + > + > goal G: forall x1 x2 x3. + > (0.0:t) .< x1 .< (0.5:t) -> + > let (y1,_) = net_apply x1 x2 x3 in + > (0.0:t) .< y1 .< (0.5:t) + > end + > EOF + <autodetect>0 prover(s) added + <autodetect>Generating strategies: + <autodetect>Run: (Marabou --version) > $TMPFILE 2>&1 + <autodetect>Run: (alt-ergo --version) > $TMPFILE 2>&1 + <autodetect>Run: (pyrat.py --version) > $TMPFILE 2>&1 + <autodetect>Found prover Alt-Ergo version 2.4.0, OK. + <autodetect>Found prover Marabou version 1.0.+, OK. + <autodetect>Found prover PyRAT version 1.1, OK. + <autodetect>3 prover(s) added + Goal G: Unknown + () + Output: + NN: ./TestNetworkONNX.onnx + Goal: + 0.0 < x0 + x0 < 0.5 + 0.0 < y0 + y0 < 0.5 + Result = Unknown +