aboutsummaryrefslogtreecommitdiffstats
path: root/eval/src/tests
diff options
context:
space:
mode:
Diffstat (limited to 'eval/src/tests')
-rw-r--r--eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp20
-rw-r--r--eval/src/tests/instruction/dense_replace_type_function/dense_replace_type_function_test.cpp4
-rw-r--r--eval/src/tests/instruction/generic_peek/generic_peek_test.cpp2
-rw-r--r--eval/src/tests/instruction/universal_dot_product/universal_dot_product_test.cpp50
-rw-r--r--eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp20
5 files changed, 52 insertions, 44 deletions
diff --git a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
index 4ba715ea192..ac7e0f6d126 100644
--- a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
+++ b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp
@@ -199,16 +199,18 @@ TEST("require that functions with non-interpretable complex lambdas cannot be in
//-----------------------------------------------------------------------------
TEST("require that compilation meta-data can be collected") {
- Stash stash;
- const auto &x2 = tensor_function::inject(ValueType::from_spec("tensor(x[2])"), 0, stash);
- const auto &x3 = tensor_function::inject(ValueType::from_spec("tensor(x[3])"), 1, stash);
- const auto &concat_x5 = tensor_function::concat(x3, x2, "x", stash);
- const auto &x5 = tensor_function::inject(ValueType::from_spec("tensor(x[5])"), 2, stash);
- const auto &mapped_x5 = tensor_function::map(x5, operation::Relu::f, stash);
- const auto &flag = tensor_function::inject(ValueType::from_spec("double"), 0, stash);
- const auto &root = tensor_function::if_node(flag, concat_x5, mapped_x5, stash);
+ auto fun = Function::parse("if(flag,concat(x2,x3,x),map(x5,f(x)(relu(x))))");
+ fprintf(stderr, "%s\n", fun->dump_as_lambda().c_str());
+ ASSERT_TRUE(fun->dump_as_lambda().starts_with("f(flag,x2,x3,x5)"));
+ std::vector<ValueType> param_types({ValueType::from_spec("double"),
+ ValueType::from_spec("tensor(x[2])"),
+ ValueType::from_spec("tensor(x[3])"),
+ ValueType::from_spec("tensor(x[5])")});
+ NodeTypes types(*fun, param_types);
+ ASSERT_FALSE(types.get_type(fun->root()).is_error());
+ ASSERT_TRUE(types.errors().empty());
CTFMetaData meta;
- InterpretedFunction ifun(FastValueBuilderFactory::get(), root, &meta);
+ auto ifun = InterpretedFunction::opts(FastValueBuilderFactory::get()).meta(&meta).make(fun->root(), types);
fprintf(stderr, "compilation meta-data:\n");
for (const auto &step: meta.steps) {
fprintf(stderr, " %s -> %s\n", step.class_name.c_str(), step.symbol_name.c_str());
diff --git a/eval/src/tests/instruction/dense_replace_type_function/dense_replace_type_function_test.cpp b/eval/src/tests/instruction/dense_replace_type_function/dense_replace_type_function_test.cpp
index 0cb5a821136..748f38a3343 100644
--- a/eval/src/tests/instruction/dense_replace_type_function/dense_replace_type_function_test.cpp
+++ b/eval/src/tests/instruction/dense_replace_type_function/dense_replace_type_function_test.cpp
@@ -22,7 +22,7 @@ struct ChildMock : Leaf {
bool is_mutable;
ChildMock(const ValueType &type) : Leaf(type), is_mutable(true) {}
bool result_is_mutable() const override { return is_mutable; }
- InterpretedFunction::Instruction compile_self(const ValueBuilderFactory &, Stash &) const override { abort(); }
+ InterpretedFunction::Instruction compile_self(const CTFContext &) const override { abort(); }
};
struct Fixture {
@@ -42,7 +42,7 @@ struct Fixture {
{
my_fun.push_children(children);
state.stack.push_back(*my_value);
- my_fun.compile_self(prod_factory, state.stash).perform(state);
+ my_fun.compile_self(CTFContext(prod_factory, state.stash, nullptr)).perform(state);
ASSERT_EQUAL(children.size(), 1u);
ASSERT_EQUAL(state.stack.size(), 1u);
ASSERT_TRUE(!new_type.is_error());
diff --git a/eval/src/tests/instruction/generic_peek/generic_peek_test.cpp b/eval/src/tests/instruction/generic_peek/generic_peek_test.cpp
index 7f25edd50c8..df1661cc57f 100644
--- a/eval/src/tests/instruction/generic_peek/generic_peek_test.cpp
+++ b/eval/src/tests/instruction/generic_peek/generic_peek_test.cpp
@@ -115,7 +115,7 @@ TensorSpec tensor_function_peek(const TensorSpec &a, const ValueType &result_typ
}
const auto &func_param = tensor_function::inject(param->type(), 0, stash);
const auto &peek_node = tensor_function::peek(func_param, func_spec, stash);
- auto my_op = peek_node.compile_self(factory, stash);
+ auto my_op = peek_node.compile_self(CTFContext(factory, stash, nullptr));
InterpretedFunction::EvalSingle single(factory, my_op);
return spec_from_value(single.eval(my_stack));
}
diff --git a/eval/src/tests/instruction/universal_dot_product/universal_dot_product_test.cpp b/eval/src/tests/instruction/universal_dot_product/universal_dot_product_test.cpp
index 99809601d9a..6b72dd9ca06 100644
--- a/eval/src/tests/instruction/universal_dot_product/universal_dot_product_test.cpp
+++ b/eval/src/tests/instruction/universal_dot_product/universal_dot_product_test.cpp
@@ -75,6 +75,7 @@ class Optimize
private:
struct ctor_tag{};
public:
+ using optimize_fun_t = InterpretedFunction::Options::optimize_fun_t;
enum class With { NONE, CUSTOM, PROD, SPECIFIC };
With with;
vespalib::string name;
@@ -92,6 +93,29 @@ public:
static Optimize specific(const vespalib::string &name_in, tensor_function_optimizer optimizer_in) {
return {With::SPECIFIC, name_in, {}, optimizer_in, {}};
}
+ optimize_fun_t make_optimize_fun() const {
+ switch (with) {
+ case Optimize::With::NONE: return do_not_optimize_tensor_function;
+ case Optimize::With::PROD: return optimize_tensor_function;
+ case Optimize::With::CUSTOM:
+ return [options=options](const ValueBuilderFactory &factory, const TensorFunction &function, Stash &stash)
+ ->const TensorFunction &
+ {
+ return optimize_tensor_function_impl(factory, function, stash, options);
+ };
+ case Optimize::With::SPECIFIC:
+ return [optimizer=optimizer](const ValueBuilderFactory &, const TensorFunction &function, Stash &stash)
+ ->const TensorFunction &
+ {
+ size_t count = 0;
+ const auto &result = apply_tensor_function_optimizer(function, optimizer, stash,
+ [&count](const auto &)noexcept{ ++count; });
+ EXPECT_EQ(count, 1);
+ return result;
+ };
+ }
+ abort();
+ }
~Optimize();
};
Optimize::~Optimize() = default;
@@ -201,29 +225,11 @@ void benchmark(const vespalib::string &expr, std::vector<Optimize> list) {
}
NodeTypes node_types(*fun, param_types);
ASSERT_FALSE(node_types.get_type(fun->root()).is_error());
- Stash stash;
- const TensorFunction &plain_fun = make_tensor_function(prod_factory, fun->root(), node_types, stash);
- const TensorFunction *optimized = nullptr;
- switch (optimize.with) {
- case Optimize::With::NONE:
- optimized = std::addressof(plain_fun);
- break;
- case Optimize::With::PROD:
- optimized = std::addressof(optimize_tensor_function(prod_factory, plain_fun, stash));
- break;
- case Optimize::With::CUSTOM:
- optimized = std::addressof(optimize_tensor_function(prod_factory, plain_fun, stash, optimize.options));
- break;
- case Optimize::With::SPECIFIC:
- size_t count = 0;
- optimized = std::addressof(apply_tensor_function_optimizer(plain_fun, optimize.optimizer, stash,
- [&count](const auto &)noexcept{ ++count; }));
- ASSERT_EQ(count, 1);
- break;
- }
- ASSERT_NE(optimized, nullptr);
CTFMetaData ctf_meta;
- InterpretedFunction ifun(prod_factory, *optimized, &ctf_meta);
+ auto ifun = InterpretedFunction::opts(prod_factory)
+ .optimize(optimize.make_optimize_fun())
+ .meta(&ctf_meta)
+ .make(fun->root(), node_types);
InterpretedFunction::ProfiledContext pctx(ifun);
ASSERT_EQ(ctf_meta.steps.size(), ifun.program_size());
std::vector<duration> prev_time(ctf_meta.steps.size(), duration::zero());
diff --git a/eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp b/eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp
index 733acbc09bf..9fa2ef3d56f 100644
--- a/eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp
+++ b/eval/src/tests/tensor/instruction_benchmark/instruction_benchmark.cpp
@@ -97,7 +97,7 @@ void my_multi_instruction_op(InterpretedFunction::State &state, uint64_t param_i
void collect_op1_chain(const TensorFunction &node, const ValueBuilderFactory &factory, Stash &stash, std::vector<Instruction> &list) {
if (auto op1 = as<tensor_function::Op1>(node)) {
collect_op1_chain(op1->child(), factory, stash, list);
- list.push_back(node.compile_self(factory, stash));
+ list.push_back(node.compile_self(CTFContext(factory, stash, nullptr)));
}
}
@@ -125,7 +125,7 @@ struct Impl {
const auto &rhs_node = tensor_function::inject(rhs, 1, stash);
const auto &join_node = tensor_function::join(lhs_node, rhs_node, function, stash);
const auto &node = optimize ? optimize_tensor_function(factory, join_node, stash) : join_node;
- return node.compile_self(factory, stash);
+ return node.compile_self(CTFContext(factory, stash, nullptr));
}
Instruction create_reduce(const ValueType &lhs, Aggr aggr, const std::vector<vespalib::string> &dims, Stash &stash) const {
// create a complete tensor function, but only compile the relevant instruction
@@ -142,7 +142,7 @@ struct Impl {
const auto &lhs_node = tensor_function::inject(lhs, 0, stash);
const auto &rename_node = tensor_function::rename(lhs_node, from, to, stash);
const auto &node = optimize ? optimize_tensor_function(factory, rename_node, stash) : rename_node;
- return node.compile_self(factory, stash);
+ return node.compile_self(CTFContext(factory, stash, nullptr));
}
Instruction create_merge(const ValueType &lhs, const ValueType &rhs, operation::op2_t function, Stash &stash) const {
// create a complete tensor function, but only compile the relevant instruction
@@ -150,23 +150,23 @@ struct Impl {
const auto &rhs_node = tensor_function::inject(rhs, 1, stash);
const auto &merge_node = tensor_function::merge(lhs_node, rhs_node, function, stash);
const auto &node = optimize ? optimize_tensor_function(factory, merge_node, stash) : merge_node;
- return node.compile_self(factory, stash);
+ return node.compile_self(CTFContext(factory, stash, nullptr));
}
Instruction create_concat(const ValueType &lhs, const ValueType &rhs, const std::string &dimension, Stash &stash) const {
// create a complete tensor function, but only compile the relevant instruction
const auto &lhs_node = tensor_function::inject(lhs, 0, stash);
const auto &rhs_node = tensor_function::inject(rhs, 1, stash);
const auto &concat_node = tensor_function::concat(lhs_node, rhs_node, dimension, stash);
- return concat_node.compile_self(factory, stash);
+ return concat_node.compile_self(CTFContext(factory, stash, nullptr));
const auto &node = optimize ? optimize_tensor_function(factory, concat_node, stash) : concat_node;
- return node.compile_self(factory, stash);
+ return node.compile_self(CTFContext(factory, stash, nullptr));
}
Instruction create_map(const ValueType &lhs, operation::op1_t function, Stash &stash) const {
// create a complete tensor function, but only compile the relevant instruction
const auto &lhs_node = tensor_function::inject(lhs, 0, stash);
const auto &map_node = tensor_function::map(lhs_node, function, stash);
const auto &node = optimize ? optimize_tensor_function(factory, map_node, stash) : map_node;
- return node.compile_self(factory, stash);
+ return node.compile_self(CTFContext(factory, stash, nullptr));
}
Instruction create_tensor_create(const ValueType &proto_type, const TensorSpec &proto, Stash &stash) const {
// create a complete tensor function, but only compile the relevant instruction
@@ -177,7 +177,7 @@ struct Impl {
}
const auto &create_tensor_node = tensor_function::create(proto_type, spec, stash);
const auto &node = optimize ? optimize_tensor_function(factory, create_tensor_node, stash) : create_tensor_node;
- return node.compile_self(factory, stash);
+ return node.compile_self(CTFContext(factory, stash, nullptr));
}
Instruction create_tensor_lambda(const ValueType &type, const Function &function, const ValueType &p0_type, Stash &stash) const {
std::vector<ValueType> arg_types(type.dimensions().size(), ValueType::double_type());
@@ -186,7 +186,7 @@ struct Impl {
EXPECT_EQ(types.errors(), std::vector<vespalib::string>());
const auto &tensor_lambda_node = tensor_function::lambda(type, {0}, function, std::move(types), stash);
const auto &node = optimize ? optimize_tensor_function(factory, tensor_lambda_node, stash) : tensor_lambda_node;
- return node.compile_self(factory, stash);
+ return node.compile_self(CTFContext(factory, stash, nullptr));
}
Instruction create_tensor_peek(const ValueType &type, const MyPeekSpec &my_spec, Stash &stash) const {
// create a complete tensor function, but only compile the relevant instruction
@@ -210,7 +210,7 @@ struct Impl {
}
const auto &peek_node = tensor_function::peek(my_param, spec, stash);
const auto &node = optimize ? optimize_tensor_function(factory, peek_node, stash) : peek_node;
- return node.compile_self(factory, stash);
+ return node.compile_self(CTFContext(factory, stash, nullptr));
}
};