diff options
Diffstat (limited to 'model-integration/src/test')
17 files changed, 411 insertions, 0 deletions
diff --git a/model-integration/src/test/java/ai/vespa/modelintegration/evaluator/OnnxEvaluatorTest.java b/model-integration/src/test/java/ai/vespa/modelintegration/evaluator/OnnxEvaluatorTest.java new file mode 100644 index 00000000000..4b42e18d75e --- /dev/null +++ b/model-integration/src/test/java/ai/vespa/modelintegration/evaluator/OnnxEvaluatorTest.java @@ -0,0 +1,93 @@ +// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +package ai.vespa.modelintegration.evaluator; + +import com.yahoo.tensor.Tensor; +import com.yahoo.tensor.TensorType; +import org.junit.Test; + +import java.util.HashMap; +import java.util.Map; + +import static org.junit.Assert.assertEquals; + +/** + * @author lesters + */ +public class OnnxEvaluatorTest { + + @Test + public void testSimpleMoodel() { + OnnxEvaluator evaluator = new OnnxEvaluator("src/test/models/onnx/simple/simple.onnx"); + + // Input types + Map<String, TensorType> inputTypes = evaluator.getInputInfo(); + assertEquals(inputTypes.get("query_tensor"), TensorType.fromSpec("tensor<float>(d0[1],d1[4])")); + assertEquals(inputTypes.get("attribute_tensor"), TensorType.fromSpec("tensor<float>(d0[4],d1[1])")); + assertEquals(inputTypes.get("bias_tensor"), TensorType.fromSpec("tensor<float>(d0[1],d1[1])")); + + // Output types + Map<String, TensorType> outputTypes = evaluator.getOutputInfo(); + assertEquals(outputTypes.get("output"), TensorType.fromSpec("tensor<float>(d0[1],d1[1])")); + + // Evaluation + Map<String, Tensor> inputs = new HashMap<>(); + inputs.put("query_tensor", Tensor.from("tensor(d0[1],d1[4]):[0.1, 0.2, 0.3, 0.4]")); + inputs.put("attribute_tensor", Tensor.from("tensor(d0[4],d1[1]):[0.1, 0.2, 0.3, 0.4]")); + inputs.put("bias_tensor", Tensor.from("tensor(d0[1],d1[1]):[1.0]")); + + assertEquals(evaluator.evaluate(inputs).get("output"), Tensor.from("tensor(d0[1],d1[1]):[1.3]")); + assertEquals(evaluator.evaluate(inputs, "output"), Tensor.from("tensor(d0[1],d1[1]):[1.3]")); + } + + @Test + public void testBatchDimension() { + OnnxEvaluator evaluator = new OnnxEvaluator("src/test/models/onnx/pytorch/one_layer.onnx"); + + // Input types + Map<String, TensorType> inputTypes = evaluator.getInputInfo(); + assertEquals(inputTypes.get("input"), TensorType.fromSpec("tensor<float>(d0[],d1[3])")); + + // Output types + Map<String, TensorType> outputTypes = evaluator.getOutputInfo(); + assertEquals(outputTypes.get("output"), TensorType.fromSpec("tensor<float>(d0[],d1[1])")); + + // Evaluation + Map<String, Tensor> inputs = new HashMap<>(); + inputs.put("input", Tensor.from("tensor<float>(d0[2],d1[3]):[[0.1, 0.2, 0.3],[0.4,0.5,0.6]]")); + assertEquals(evaluator.evaluate(inputs, "output"), Tensor.from("tensor<float>(d0[2],d1[1]):[0.6393113,0.67574286]")); + } + + @Test + public void testMatMul() { + String expected = "tensor<float>(d0[2],d1[4]):[38,44,50,56,83,98,113,128]"; + String input1 = "tensor<float>(d0[2],d1[3]):[1,2,3,4,5,6]"; + String input2 = "tensor<float>(d0[3],d1[4]):[1,2,3,4,5,6,7,8,9,10,11,12]"; + assertEvaluate("simple/matmul.onnx", expected, input1, input2); + } + + @Test + public void testTypes() { + assertEvaluate("add_double.onnx", "tensor(d0[1]):[3]", "tensor(d0[1]):[1]", "tensor(d0[1]):[2]"); + assertEvaluate("add_float.onnx", "tensor<float>(d0[1]):[3]", "tensor<float>(d0[1]):[1]", "tensor<float>(d0[1]):[2]"); + assertEvaluate("add_int64.onnx", "tensor<double>(d0[1]):[3]", "tensor<double>(d0[1]):[1]", "tensor<double>(d0[1]):[2]"); + assertEvaluate("cast_int8_float.onnx", "tensor<float>(d0[1]):[-128]", "tensor<int8>(d0[1]):[128]"); + assertEvaluate("cast_float_int8.onnx", "tensor<int8>(d0[1]):[-1]", "tensor<float>(d0[1]):[255]"); + + // ONNX Runtime 1.7.0 does not support much of bfloat16 yet + // assertEvaluate("cast_bfloat16_float.onnx", "tensor<float>(d0[1]):[1]", "tensor<bfloat16>(d0[1]):[1]"); + } + + private void assertEvaluate(String model, String output, String... input) { + OnnxEvaluator evaluator = new OnnxEvaluator("src/test/models/onnx/" + model); + Map<String, Tensor> inputs = new HashMap<>(); + for (int i = 0; i < input.length; ++i) { + inputs.put("input" + (i+1), Tensor.from(input[i])); + } + Tensor expected = Tensor.from(output); + Tensor result = evaluator.evaluate(inputs, "output"); + assertEquals(expected, result); + assertEquals(expected.type().valueType(), result.type().valueType()); + } + +} diff --git a/model-integration/src/test/models/onnx/add_double.onnx b/model-integration/src/test/models/onnx/add_double.onnx new file mode 100644 index 00000000000..9264d1eb9f9 --- /dev/null +++ b/model-integration/src/test/models/onnx/add_double.onnx @@ -0,0 +1,16 @@ +
add_double.py:f + +input1 +input2output"AddaddZ +input1 + + +Z +input2 + + +b +output + + +B
\ No newline at end of file diff --git a/model-integration/src/test/models/onnx/add_double.py b/model-integration/src/test/models/onnx/add_double.py new file mode 100755 index 00000000000..fa9aa48f4b2 --- /dev/null +++ b/model-integration/src/test/models/onnx/add_double.py @@ -0,0 +1,27 @@ +# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +import onnx +from onnx import helper, TensorProto + +INPUT_1 = helper.make_tensor_value_info('input1', TensorProto.DOUBLE, [1]) +INPUT_2 = helper.make_tensor_value_info('input2', TensorProto.DOUBLE, [1]) +OUTPUT = helper.make_tensor_value_info('output', TensorProto.DOUBLE, [1]) + +nodes = [ + helper.make_node( + 'Add', + ['input1', 'input2'], + ['output'], + ), +] +graph_def = helper.make_graph( + nodes, + 'add', + [ + INPUT_1, + INPUT_2 + ], + [OUTPUT], +) +model_def = helper.make_model(graph_def, producer_name='add_double.py', opset_imports=[onnx.OperatorSetIdProto(version=12)]) +onnx.save(model_def, 'add_double.onnx') diff --git a/model-integration/src/test/models/onnx/add_float.onnx b/model-integration/src/test/models/onnx/add_float.onnx new file mode 100644 index 00000000000..0e3ad8f900c --- /dev/null +++ b/model-integration/src/test/models/onnx/add_float.onnx @@ -0,0 +1,16 @@ +add_float.py:f + +input1 +input2output"AddaddZ +input1 + + +Z +input2 + + +b +output + + +B
\ No newline at end of file diff --git a/model-integration/src/test/models/onnx/add_float.py b/model-integration/src/test/models/onnx/add_float.py new file mode 100755 index 00000000000..e18b2c46d9d --- /dev/null +++ b/model-integration/src/test/models/onnx/add_float.py @@ -0,0 +1,27 @@ +# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +import onnx +from onnx import helper, TensorProto + +INPUT_1 = helper.make_tensor_value_info('input1', TensorProto.FLOAT, [1]) +INPUT_2 = helper.make_tensor_value_info('input2', TensorProto.FLOAT, [1]) +OUTPUT = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1]) + +nodes = [ + helper.make_node( + 'Add', + ['input1', 'input2'], + ['output'], + ), +] +graph_def = helper.make_graph( + nodes, + 'add', + [ + INPUT_1, + INPUT_2 + ], + [OUTPUT], +) +model_def = helper.make_model(graph_def, producer_name='add_float.py', opset_imports=[onnx.OperatorSetIdProto(version=12)]) +onnx.save(model_def, 'add_float.onnx') diff --git a/model-integration/src/test/models/onnx/add_int64.onnx b/model-integration/src/test/models/onnx/add_int64.onnx new file mode 100644 index 00000000000..7b3a9ec6b95 --- /dev/null +++ b/model-integration/src/test/models/onnx/add_int64.onnx @@ -0,0 +1,16 @@ +add_int64.py:f + +input1 +input2output"AddaddZ +input1 + + +Z +input2 + + +b +output + + +B
\ No newline at end of file diff --git a/model-integration/src/test/models/onnx/add_int64.py b/model-integration/src/test/models/onnx/add_int64.py new file mode 100755 index 00000000000..87908e292a2 --- /dev/null +++ b/model-integration/src/test/models/onnx/add_int64.py @@ -0,0 +1,27 @@ +# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +import onnx +from onnx import helper, TensorProto + +INPUT_1 = helper.make_tensor_value_info('input1', TensorProto.INT64, [1]) +INPUT_2 = helper.make_tensor_value_info('input2', TensorProto.INT64, [1]) +OUTPUT = helper.make_tensor_value_info('output', TensorProto.INT64, [1]) + +nodes = [ + helper.make_node( + 'Add', + ['input1', 'input2'], + ['output'], + ), +] +graph_def = helper.make_graph( + nodes, + 'add', + [ + INPUT_1, + INPUT_2 + ], + [OUTPUT], +) +model_def = helper.make_model(graph_def, producer_name='add_int64.py', opset_imports=[onnx.OperatorSetIdProto(version=12)]) +onnx.save(model_def, 'add_int64.onnx') diff --git a/model-integration/src/test/models/onnx/cast_bfloat16_float.onnx b/model-integration/src/test/models/onnx/cast_bfloat16_float.onnx new file mode 100644 index 00000000000..cb19592abf4 --- /dev/null +++ b/model-integration/src/test/models/onnx/cast_bfloat16_float.onnx @@ -0,0 +1,12 @@ +cast_bfloat16_float.py:U +! +input1output"Cast* +to castZ +input1 + + +b +output + + +B
\ No newline at end of file diff --git a/model-integration/src/test/models/onnx/cast_bfloat16_float.py b/model-integration/src/test/models/onnx/cast_bfloat16_float.py new file mode 100755 index 00000000000..14b05347262 --- /dev/null +++ b/model-integration/src/test/models/onnx/cast_bfloat16_float.py @@ -0,0 +1,24 @@ +# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +import onnx +from onnx import helper, TensorProto + +INPUT_1 = helper.make_tensor_value_info('input1', TensorProto.BFLOAT16, [1]) +OUTPUT = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1]) + +nodes = [ + helper.make_node( + 'Cast', + ['input1'], + ['output'], + to=TensorProto.FLOAT + ), +] +graph_def = helper.make_graph( + nodes, + 'cast', + [INPUT_1], + [OUTPUT], +) +model_def = helper.make_model(graph_def, producer_name='cast_bfloat16_float.py', opset_imports=[onnx.OperatorSetIdProto(version=12)]) +onnx.save(model_def, 'cast_bfloat16_float.onnx') diff --git a/model-integration/src/test/models/onnx/cast_float_int8.onnx b/model-integration/src/test/models/onnx/cast_float_int8.onnx new file mode 100644 index 00000000000..c30b023dd68 --- /dev/null +++ b/model-integration/src/test/models/onnx/cast_float_int8.onnx @@ -0,0 +1,12 @@ +cast_float_int8.py:U +! +input1output"Cast* +to castZ +input1 + + +b +output + + +B
\ No newline at end of file diff --git a/model-integration/src/test/models/onnx/cast_float_int8.py b/model-integration/src/test/models/onnx/cast_float_int8.py new file mode 100755 index 00000000000..bdc0850d033 --- /dev/null +++ b/model-integration/src/test/models/onnx/cast_float_int8.py @@ -0,0 +1,24 @@ +# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +import onnx +from onnx import helper, TensorProto + +INPUT_1 = helper.make_tensor_value_info('input1', TensorProto.FLOAT, [1]) +OUTPUT = helper.make_tensor_value_info('output', TensorProto.INT8, [1]) + +nodes = [ + helper.make_node( + 'Cast', + ['input1'], + ['output'], + to=TensorProto.INT8 + ), +] +graph_def = helper.make_graph( + nodes, + 'cast', + [INPUT_1], + [OUTPUT], +) +model_def = helper.make_model(graph_def, producer_name='cast_float_int8.py', opset_imports=[onnx.OperatorSetIdProto(version=12)]) +onnx.save(model_def, 'cast_float_int8.onnx') diff --git a/model-integration/src/test/models/onnx/cast_int8_float.onnx b/model-integration/src/test/models/onnx/cast_int8_float.onnx new file mode 100644 index 00000000000..65aea4a36ae --- /dev/null +++ b/model-integration/src/test/models/onnx/cast_int8_float.onnx @@ -0,0 +1,12 @@ +cast_int8_float.py:U +! +input1output"Cast* +to castZ +input1 + + +b +output + + +B
\ No newline at end of file diff --git a/model-integration/src/test/models/onnx/cast_int8_float.py b/model-integration/src/test/models/onnx/cast_int8_float.py new file mode 100755 index 00000000000..70bf2cf70ca --- /dev/null +++ b/model-integration/src/test/models/onnx/cast_int8_float.py @@ -0,0 +1,24 @@ +# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +import onnx +from onnx import helper, TensorProto + +INPUT_1 = helper.make_tensor_value_info('input1', TensorProto.INT8, [1]) +OUTPUT = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1]) + +nodes = [ + helper.make_node( + 'Cast', + ['input1'], + ['output'], + to=TensorProto.FLOAT + ), +] +graph_def = helper.make_graph( + nodes, + 'cast', + [INPUT_1], + [OUTPUT], +) +model_def = helper.make_model(graph_def, producer_name='cast_int8_float.py', opset_imports=[onnx.OperatorSetIdProto(version=12)]) +onnx.save(model_def, 'cast_int8_float.onnx') diff --git a/model-integration/src/test/models/onnx/pytorch/one_layer.onnx b/model-integration/src/test/models/onnx/pytorch/one_layer.onnx Binary files differnew file mode 100644 index 00000000000..dc9f664b943 --- /dev/null +++ b/model-integration/src/test/models/onnx/pytorch/one_layer.onnx diff --git a/model-integration/src/test/models/onnx/pytorch/pytorch_one_layer.py b/model-integration/src/test/models/onnx/pytorch/pytorch_one_layer.py new file mode 100755 index 00000000000..1296d84e180 --- /dev/null +++ b/model-integration/src/test/models/onnx/pytorch/pytorch_one_layer.py @@ -0,0 +1,38 @@ +# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +import torch +import torch.onnx + + +class MyModel(torch.nn.Module): + def __init__(self): + super(MyModel, self).__init__() + self.linear = torch.nn.Linear(in_features=3, out_features=1) + self.logistic = torch.nn.Sigmoid() + + def forward(self, vec): + return self.logistic(self.linear(vec)) + + +def main(): + model = MyModel() + + # Omit training - just export randomly initialized network + + data = torch.FloatTensor([[0.1, 0.2, 0.3],[0.4, 0.5, 0.6]]) + torch.onnx.export(model, + data, + "one_layer.onnx", + input_names = ["input"], + output_names = ["output"], + dynamic_axes = { + "input": {0: "batch"}, + "output": {0: "batch"}, + }, + opset_version=12) + + +if __name__ == "__main__": + main() + + diff --git a/model-integration/src/test/models/onnx/simple/matmul.onnx b/model-integration/src/test/models/onnx/simple/matmul.onnx new file mode 100644 index 00000000000..9bb88406116 --- /dev/null +++ b/model-integration/src/test/models/onnx/simple/matmul.onnx @@ -0,0 +1,16 @@ + matmul.py:x + +input1 +input2output"MatMulmatmulZ +input1 + + +Z +input2 + + +b +output + + +B
\ No newline at end of file diff --git a/model-integration/src/test/models/onnx/simple/matmul.py b/model-integration/src/test/models/onnx/simple/matmul.py new file mode 100755 index 00000000000..beec55e9f5a --- /dev/null +++ b/model-integration/src/test/models/onnx/simple/matmul.py @@ -0,0 +1,27 @@ +# Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +import onnx +from onnx import helper, TensorProto + +INPUT1 = helper.make_tensor_value_info('input1', TensorProto.FLOAT, [2, 3]) +INPUT2 = helper.make_tensor_value_info('input2', TensorProto.FLOAT, [3, 4]) +OUTPUT = helper.make_tensor_value_info('output', TensorProto.FLOAT, [2, 4]) + +nodes = [ + helper.make_node( + 'MatMul', + ['input1', 'input2'], + ['output'], + ), +] +graph_def = helper.make_graph( + nodes, + 'matmul', + [ + INPUT1, + INPUT2, + ], + [OUTPUT], +) +model_def = helper.make_model(graph_def, producer_name='matmul.py', opset_imports=[onnx.OperatorSetIdProto(version=12)]) +onnx.save(model_def, 'matmul.onnx') |