summaryrefslogtreecommitdiffstats
path: root/eval
diff options
context:
space:
mode:
authorHåvard Pettersen <havardpe@oath.com>2018-03-02 11:13:19 +0000
committerHåvard Pettersen <havardpe@oath.com>2018-03-02 11:13:19 +0000
commit829e3ddec1e49911f8ffcc5b0f32846efd236e9e (patch)
tree2cfea2ff14faa8a27beb2f130daf844c7af43de6 /eval
parent37bdbc489a0b1bac3d158fee55f65d4526853c41 (diff)
minor cleanup
Diffstat (limited to 'eval')
-rw-r--r--eval/src/tests/tensor/dense_inplace_join_function/dense_inplace_join_function_test.cpp131
-rw-r--r--eval/src/vespa/eval/eval/test/eval_fixture.cpp6
-rw-r--r--eval/src/vespa/eval/eval/test/eval_fixture.h1
3 files changed, 76 insertions, 62 deletions
diff --git a/eval/src/tests/tensor/dense_inplace_join_function/dense_inplace_join_function_test.cpp b/eval/src/tests/tensor/dense_inplace_join_function/dense_inplace_join_function_test.cpp
index 28c5ebc7037..c794b81f573 100644
--- a/eval/src/tests/tensor/dense_inplace_join_function/dense_inplace_join_function_test.cpp
+++ b/eval/src/tests/tensor/dense_inplace_join_function/dense_inplace_join_function_test.cpp
@@ -21,49 +21,49 @@ using namespace vespalib::eval::tensor_function;
const TensorEngine &prod_engine = DefaultTensorEngine::ref();
-struct SimpleSequence : public Sequence {
- int _step, _bias;
- SimpleSequence(int step, int bias) : _step(step), _bias(bias) {}
- double operator[](size_t i) const override { return ((_step * i) + _bias) & 0xffff; }
- ~SimpleSequence() {}
-};
-struct SequenceGenerator {
- int step, bias;
- SequenceGenerator() : step(0), bias(0) {}
- SimpleSequence next() {
- step += 17;
- bias += 42;
- return SimpleSequence(step, bias);
+double seq_value = 0.0;
+
+struct GlobalSequence : public Sequence {
+ GlobalSequence() {}
+ double operator[](size_t) const override {
+ seq_value += 1.0;
+ return seq_value;
}
-} seqGen;
+ ~GlobalSequence() {}
+};
+GlobalSequence seq;
EvalFixture::ParamRepo make_params() {
return EvalFixture::ParamRepo()
- .add("con_dbl_A", spec(1.5))
- .add("con_dbl_B", spec(2.5))
- .add("con_dbl_C", spec(3.5))
- .add("con_vec_A", spec({x(5)}, seqGen.next()))
- .add("con_vec_B", spec({x(5)}, seqGen.next()))
- .add("con_vec_C", spec({x(5)}, seqGen.next()))
- .add("con_x5y3_A", spec({x(5),y(3)}, seqGen.next()))
- .add_mutable("mut_dbl_A", spec(4.5))
- .add_mutable("mut_dbl_B", spec(5.5))
- .add_mutable("mut_dbl_C", spec(6.5))
- .add_mutable("mut_vec_A", spec({x(5)}, seqGen.next()))
- .add_mutable("mut_vec_B", spec({x(5)}, seqGen.next()))
- .add_mutable("mut_vec_C", spec({x(5)}, seqGen.next()))
- .add_mutable("mut_x5y3_A", spec({x(5),y(3)}, seqGen.next()))
- .add_mutable("mut_x5y3_B", spec({x(5),y(3)}, seqGen.next()))
- .add_mutable("mut_x5y3_C", spec({x(5),y(3)}, seqGen.next()))
- .add_mutable("mut_vec_unbound", spec({x(5)}, seqGen.next()), "tensor(x[])")
- .add_mutable("mut_vec_sparse", spec({x({"a", "b", "c"})}, seqGen.next()));
+ .add("con_x5_A", spec({x(5)}, seq))
+ .add("con_x5_B", spec({x(5)}, seq))
+ .add("con_x5_C", spec({x(5)}, seq))
+ .add("con_x5y3_A", spec({x(5),y(3)}, seq))
+ .add("con_x5y3_B", spec({x(5),y(3)}, seq))
+ .add_mutable("mut_dbl_A", spec(1.5))
+ .add_mutable("mut_dbl_B", spec(2.5))
+ .add_mutable("mut_x5_A", spec({x(5)}, seq))
+ .add_mutable("mut_x5_B", spec({x(5)}, seq))
+ .add_mutable("mut_x5_C", spec({x(5)}, seq))
+ .add_mutable("mut_x4", spec({x(4)}, seq))
+ .add_mutable("mut_x5y3_A", spec({x(5),y(3)}, seq))
+ .add_mutable("mut_x5y3_B", spec({x(5),y(3)}, seq))
+ .add_mutable("mut_x5_unbound", spec({x(5)}, seq), "tensor(x[])")
+ .add_mutable("mut_x_sparse", spec({x({"a", "b", "c"})}, seq));
}
EvalFixture::ParamRepo param_repo = make_params();
void verify_optimized(const vespalib::string &expr, size_t cnt, size_t param_idx) {
EvalFixture fixture(prod_engine, expr, param_repo, true, true);
EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo));
- EXPECT_EQUAL(fixture.get_param(param_idx), fixture.result());
+ for (size_t i = 0; i < fixture.num_params(); ++i) {
+ TEST_STATE(vespalib::make_string("param %zu", i).c_str());
+ if (i == param_idx) {
+ EXPECT_EQUAL(fixture.get_param(i), fixture.result());
+ } else {
+ EXPECT_NOT_EQUAL(fixture.get_param(i), fixture.result());
+ }
+ }
auto info = fixture.find_all<DenseInplaceJoinFunction>();
ASSERT_EQUAL(info.size(), cnt);
for (size_t i = 0; i < cnt; ++i) {
@@ -71,70 +71,77 @@ void verify_optimized(const vespalib::string &expr, size_t cnt, size_t param_idx
}
}
-void verify_left_optimized(const vespalib::string &expr, size_t cnt) {
+void verify_p0_optimized(const vespalib::string &expr, size_t cnt) {
verify_optimized(expr, cnt, 0);
}
-void verify_right_optimized(const vespalib::string &expr, size_t cnt) {
+void verify_p1_optimized(const vespalib::string &expr, size_t cnt) {
verify_optimized(expr, cnt, 1);
}
-void verify_right2_optimized(const vespalib::string &expr, size_t cnt) {
+void verify_p2_optimized(const vespalib::string &expr, size_t cnt) {
verify_optimized(expr, cnt, 2);
}
void verify_not_optimized(const vespalib::string &expr) {
EvalFixture fixture(prod_engine, expr, param_repo, true, true);
EXPECT_EQUAL(fixture.result(), EvalFixture::ref(expr, param_repo));
- EXPECT_NOT_EQUAL(fixture.get_param(0), fixture.result());
+ for (size_t i = 0; i < fixture.num_params(); ++i) {
+ EXPECT_NOT_EQUAL(fixture.get_param(i), fixture.result());
+ }
auto info = fixture.find_all<DenseInplaceJoinFunction>();
EXPECT_TRUE(info.empty());
}
TEST("require that mutable dense concrete tensors are optimized") {
- TEST_DO(verify_left_optimized("mut_vec_A-mut_vec_B", 1));
- TEST_DO(verify_left_optimized("mut_x5y3_A-mut_x5y3_B", 1));
- TEST_DO(verify_left_optimized("mut_vec_A-mut_vec_B", 1));
- TEST_DO(verify_left_optimized("mut_vec_A-con_vec_A", 1));
- TEST_DO(verify_right_optimized("con_vec_A-mut_vec_A", 1));
- TEST_DO(verify_right_optimized("con_x5y3_A-mut_x5y3_A", 1));
+ TEST_DO(verify_p0_optimized("mut_x5_A-mut_x5_B", 1));
+ TEST_DO(verify_p0_optimized("mut_x5_A-con_x5_B", 1));
+ TEST_DO(verify_p1_optimized("con_x5_A-mut_x5_B", 1));
+ TEST_DO(verify_p0_optimized("mut_x5y3_A-mut_x5y3_B", 1));
+ TEST_DO(verify_p0_optimized("mut_x5y3_A-con_x5y3_B", 1));
+ TEST_DO(verify_p1_optimized("con_x5y3_A-mut_x5y3_B", 1));
+}
+
+TEST("require that self-join operations can be optimized") {
+ TEST_DO(verify_p0_optimized("mut_x5_A+mut_x5_A", 1));
+}
+
+TEST("require that join(tensor,scalar) operations are not optimized") {
+ TEST_DO(verify_not_optimized("mut_x5_A-mut_dbl_B"));
+ TEST_DO(verify_not_optimized("mut_dbl_A-mut_x5_B"));
}
-TEST("require that join(tensor,scalar) operations are not optimized (yet)") {
- TEST_DO(verify_not_optimized("mut_vec_A-mut_dbl_A"));
- TEST_DO(verify_not_optimized("mut_vec_A-con_dbl_A"));
- TEST_DO(verify_not_optimized("mut_dbl_A-mut_vec_A"));
- TEST_DO(verify_not_optimized("con_dbl_A-mut_vec_A"));
- TEST_DO(verify_not_optimized("mut_x5y3_A-mut_dbl_A"));
- TEST_DO(verify_not_optimized("mut_x5y3_A-con_dbl_A"));
- TEST_DO(verify_not_optimized("mut_dbl_A-mut_x5y3_A"));
- TEST_DO(verify_not_optimized("con_dbl_A-mut_x5y3_A"));
+TEST("require that join with different tensor shapes are not optimized") {
+ TEST_DO(verify_not_optimized("mut_x5_A-mut_x4"));
+ TEST_DO(verify_not_optimized("mut_x4-mut_x5_A"));
+ TEST_DO(verify_not_optimized("mut_x5_A*mut_x5y3_B"));
}
TEST("require that inplace join operations can be chained") {
- TEST_DO(verify_left_optimized("mut_vec_A-(mut_vec_B-mut_vec_C)", 2));
- TEST_DO(verify_left_optimized("(mut_vec_A-con_vec_B)-con_vec_C", 2));
- TEST_DO(verify_right_optimized("con_vec_A-(mut_vec_B-con_vec_C)", 2));
- TEST_DO(verify_right2_optimized("con_vec_A-(con_vec_B-mut_vec_C)", 2));
+ TEST_DO(verify_p0_optimized("mut_x5_A-(mut_x5_B-mut_x5_C)", 2));
+ TEST_DO(verify_p0_optimized("(mut_x5_A-con_x5_B)-con_x5_C", 2));
+ TEST_DO(verify_p1_optimized("con_x5_A-(mut_x5_B-con_x5_C)", 2));
+ TEST_DO(verify_p2_optimized("con_x5_A-(con_x5_B-mut_x5_C)", 2));
}
TEST("require that abstract tensors are not optimized") {
- TEST_DO(verify_not_optimized("mut_vec_unbound+mut_vec_A"));
- TEST_DO(verify_not_optimized("mut_vec_A+mut_vec_unbound"));
- TEST_DO(verify_not_optimized("mut_vec_unbound+mut_vec_unbound"));
+ TEST_DO(verify_not_optimized("mut_x5_unbound+mut_x5_A"));
+ TEST_DO(verify_not_optimized("mut_x5_A+mut_x5_unbound"));
+ TEST_DO(verify_not_optimized("mut_x5_unbound+mut_x5_unbound"));
}
TEST("require that non-mutable tensors are not optimized") {
- TEST_DO(verify_not_optimized("con_vec_A+con_vec_B"));
+ TEST_DO(verify_not_optimized("con_x5_A+con_x5_B"));
}
TEST("require that scalar values are not optimized") {
TEST_DO(verify_not_optimized("mut_dbl_A+mut_dbl_B"));
- TEST_DO(verify_not_optimized("join(mut_dbl_A,mut_dbl_B,f(x,y)(x+y))"));
+ TEST_DO(verify_not_optimized("mut_dbl_A+5"));
+ TEST_DO(verify_not_optimized("5+mut_dbl_B"));
}
TEST("require that mapped tensors are not optimized") {
- TEST_DO(verify_not_optimized("mut_vec_sparse+mut_vec_sparse"));
+ TEST_DO(verify_not_optimized("mut_x_sparse+mut_x_sparse"));
}
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/vespa/eval/eval/test/eval_fixture.cpp b/eval/src/vespa/eval/eval/test/eval_fixture.cpp
index 2593b74e300..bd4dcc37e3c 100644
--- a/eval/src/vespa/eval/eval/test/eval_fixture.cpp
+++ b/eval/src/vespa/eval/eval/test/eval_fixture.cpp
@@ -113,4 +113,10 @@ EvalFixture::get_param(size_t idx) const
return _engine.to_spec(*(_param_values[idx]));
}
+size_t
+EvalFixture::num_params() const
+{
+ return _param_values.size();
+}
+
} // namespace vespalib::eval::test
diff --git a/eval/src/vespa/eval/eval/test/eval_fixture.h b/eval/src/vespa/eval/eval/test/eval_fixture.h
index 7d3b95ad144..e20d435f608 100644
--- a/eval/src/vespa/eval/eval/test/eval_fixture.h
+++ b/eval/src/vespa/eval/eval/test/eval_fixture.h
@@ -86,6 +86,7 @@ public:
}
const TensorSpec &result() const { return _result; }
const TensorSpec get_param(size_t idx) const;
+ size_t num_params() const;
static TensorSpec ref(const vespalib::string &expr, const ParamRepo &param_repo) {
return EvalFixture(SimpleTensorEngine::ref(), expr, param_repo, false, false).result();
}