aboutsummaryrefslogtreecommitdiffstats
path: root/eval/src/tests/tensor/instruction_benchmark
diff options
context:
space:
mode:
authorHåvard Pettersen <havardpe@oath.com>2020-10-15 15:59:25 +0000
committerHåvard Pettersen <havardpe@oath.com>2020-10-16 13:08:49 +0000
commitc8915db8971064410e8f025cb77086254621f07a (patch)
tree576edf1b1a050a2b114365559197ee63915dea51 /eval/src/tests/tensor/instruction_benchmark
parentcffbfbd659a06970d824dff98070ec2aee9019e1 (diff)
added tensor lambda benchmark
Diffstat (limited to 'eval/src/tests/tensor/instruction_benchmark')
-rw-r--r--eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp59
1 files changed, 58 insertions, 1 deletions
diff --git a/eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp b/eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp
index a340b178c78..e107bd5a492 100644
--- a/eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp
+++ b/eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp
@@ -163,6 +163,14 @@ struct Impl {
const auto &create_tensor_node = tensor_function::create(proto_type, spec, stash);
return create_tensor_node.compile_self(engine, stash);
}
+ Instruction create_tensor_lambda(const ValueType &type, const Function &function, const ValueType &p0_type, Stash &stash) const {
+ std::vector<ValueType> arg_types(type.dimensions().size(), ValueType::double_type());
+ arg_types.push_back(p0_type);
+ NodeTypes types(function, arg_types);
+ EXPECT_EQ(types.errors(), std::vector<vespalib::string>());
+ const auto &tensor_lambda_node = tensor_function::lambda(type, {0}, function, std::move(types), stash);
+ return tensor_lambda_node.compile_self(engine, stash);
+ }
};
//-----------------------------------------------------------------------------
@@ -254,16 +262,29 @@ std::vector<BenchmarkResult> benchmark_results;
//-----------------------------------------------------------------------------
+struct MyParam : LazyParams {
+ Value::UP my_value;
+ MyParam() : my_value() {}
+ MyParam(const TensorSpec &p0, const Impl &impl) : my_value(impl.create_value(p0)) {}
+ const Value &resolve(size_t idx, Stash &) const override {
+ assert(idx == 0);
+ return *my_value;
+ }
+ ~MyParam() override;
+};
+MyParam::~MyParam() = default;
+
struct EvalOp {
using UP = std::unique_ptr<EvalOp>;
const Impl &impl;
+ MyParam my_param;
std::vector<Value::UP> values;
std::vector<Value::CREF> stack;
EvalSingle single;
EvalOp(const EvalOp &) = delete;
EvalOp &operator=(const EvalOp &) = delete;
EvalOp(Instruction op, const std::vector<CREF<TensorSpec>> &stack_spec, const Impl &impl_in)
- : impl(impl_in), values(), stack(), single(impl.engine, op)
+ : impl(impl_in), my_param(), values(), stack(), single(impl.engine, op)
{
for (const TensorSpec &spec: stack_spec) {
values.push_back(impl.create_value(spec));
@@ -272,6 +293,10 @@ struct EvalOp {
stack.push_back(*value.get());
}
}
+ EvalOp(Instruction op, const TensorSpec &p0, const Impl &impl_in)
+ : impl(impl_in), my_param(p0, impl), values(), stack(), single(impl.engine, op, my_param)
+ {
+ }
TensorSpec result() { return impl.create_spec(single.eval(stack)); }
double estimate_cost_us() {
auto actual = [&](){ single.eval(stack); };
@@ -441,6 +466,20 @@ void benchmark_tensor_create(const vespalib::string &desc, const TensorSpec &pro
//-----------------------------------------------------------------------------
+void benchmark_tensor_lambda(const vespalib::string &desc, const ValueType &type, const TensorSpec &p0, const Function &function) {
+ Stash stash;
+ ValueType p0_type = ValueType::from_spec(p0.type());
+ ASSERT_FALSE(p0_type.is_error());
+ std::vector<EvalOp::UP> list;
+ for (const Impl &impl: impl_list) {
+ auto op = impl.create_tensor_lambda(type, function, p0_type, stash);
+ list.push_back(std::make_unique<EvalOp>(op, p0, impl));
+ }
+ benchmark(desc, list);
+}
+
+//-----------------------------------------------------------------------------
+
TEST(MakeInputTest, print_some_test_input) {
auto number = make_spec(5.0);
auto sparse = make_vector(D::map("x", 5, 3), 1.0);
@@ -778,6 +817,24 @@ TEST(TensorCreateBench, create_mixed) {
//-----------------------------------------------------------------------------
+TEST(TensorLambdaBench, simple_lambda) {
+ auto type = ValueType::from_spec("tensor<float>(a[64],b[64])");
+ auto p0 = make_spec(3.5);
+ auto function = Function::parse({"a", "b", "p0"}, "(a*64+b)*p0");
+ ASSERT_FALSE(function->has_error());
+ benchmark_tensor_lambda("simple tensor lambda", type, p0, *function);
+}
+
+TEST(TensorLambdaBench, complex_lambda) {
+ auto type = ValueType::from_spec("tensor<float>(a[64],b[64])");
+ auto p0 = make_vector(D::idx("x", 3), 1.0);
+ auto function = Function::parse({"a", "b", "p0"}, "(a*64+b)*reduce(p0,sum)");
+ ASSERT_FALSE(function->has_error());
+ benchmark_tensor_lambda("complex tensor lambda", type, p0, *function);
+}
+
+//-----------------------------------------------------------------------------
+
void print_results(const vespalib::string &desc, const std::vector<BenchmarkResult> &results) {
if (results.empty()) {
return;