summaryrefslogtreecommitdiffstats
path: root/eval
diff options
context:
space:
mode:
authorHåvard Pettersen <havardpe@oath.com>2018-01-10 14:28:17 +0000
committerHåvard Pettersen <havardpe@oath.com>2018-01-15 09:56:55 +0000
commit9e4d9c04f9dad1a99ba33917412702200092b5c6 (patch)
tree398c84075825d1b4d9ad976cf1705114da52813b /eval
parent6bca358e80a11455ee7ef387dfe5f50719460000 (diff)
separate optimize and compile concepts
optimize: tensor function -> tensor function compile: node tree -> tensor function node trees now contain tensor functions to support direct recursive mixed-mode evaluation.
Diffstat (limited to 'eval')
-rw-r--r--eval/CMakeLists.txt2
-rw-r--r--eval/src/tests/tensor/dense_tensor_function_compiler/CMakeLists.txt8
-rw-r--r--eval/src/tests/tensor/dense_tensor_function_optimizer/CMakeLists.txt8
-rw-r--r--eval/src/tests/tensor/dense_tensor_function_optimizer/FILES (renamed from eval/src/tests/tensor/dense_tensor_function_compiler/FILES)0
-rw-r--r--eval/src/tests/tensor/dense_tensor_function_optimizer/dense_tensor_function_optimizer_test.cpp (renamed from eval/src/tests/tensor/dense_tensor_function_compiler/dense_tensor_function_compiler_test.cpp)88
-rw-r--r--eval/src/vespa/eval/eval/tensor_function.h23
-rw-r--r--eval/src/vespa/eval/tensor/default_tensor_engine.cpp4
-rw-r--r--eval/src/vespa/eval/tensor/dense/CMakeLists.txt2
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_tensor_function_optimizer.cpp (renamed from eval/src/vespa/eval/tensor/dense/dense_tensor_function_compiler.cpp)21
-rw-r--r--eval/src/vespa/eval/tensor/dense/dense_tensor_function_optimizer.h (renamed from eval/src/vespa/eval/tensor/dense/dense_tensor_function_compiler.h)6
10 files changed, 85 insertions, 77 deletions
diff --git a/eval/CMakeLists.txt b/eval/CMakeLists.txt
index f063faf19c3..00ab5b347ea 100644
--- a/eval/CMakeLists.txt
+++ b/eval/CMakeLists.txt
@@ -27,7 +27,7 @@ vespa_define_module(
src/tests/tensor/dense_dot_product_function
src/tests/tensor/dense_tensor_address_combiner
src/tests/tensor/dense_tensor_builder
- src/tests/tensor/dense_tensor_function_compiler
+ src/tests/tensor/dense_tensor_function_optimizer
src/tests/tensor/dense_xw_product_function
src/tests/tensor/sparse_tensor_builder
src/tests/tensor/tensor_address
diff --git a/eval/src/tests/tensor/dense_tensor_function_compiler/CMakeLists.txt b/eval/src/tests/tensor/dense_tensor_function_compiler/CMakeLists.txt
deleted file mode 100644
index b49a439b0ab..00000000000
--- a/eval/src/tests/tensor/dense_tensor_function_compiler/CMakeLists.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_executable(eval_dense_tensor_function_compiler_test_app TEST
- SOURCES
- dense_tensor_function_compiler_test.cpp
- DEPENDS
- vespaeval
-)
-vespa_add_test(NAME eval_dense_tensor_function_compiler_test_app COMMAND eval_dense_tensor_function_compiler_test_app)
diff --git a/eval/src/tests/tensor/dense_tensor_function_optimizer/CMakeLists.txt b/eval/src/tests/tensor/dense_tensor_function_optimizer/CMakeLists.txt
new file mode 100644
index 00000000000..3a95ef776d7
--- /dev/null
+++ b/eval/src/tests/tensor/dense_tensor_function_optimizer/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_dense_tensor_function_optimizer_test_app TEST
+ SOURCES
+ dense_tensor_function_optimizer_test.cpp
+ DEPENDS
+ vespaeval
+)
+vespa_add_test(NAME eval_dense_tensor_function_optimizer_test_app COMMAND eval_dense_tensor_function_optimizer_test_app)
diff --git a/eval/src/tests/tensor/dense_tensor_function_compiler/FILES b/eval/src/tests/tensor/dense_tensor_function_optimizer/FILES
index 3c4ec2f1753..3c4ec2f1753 100644
--- a/eval/src/tests/tensor/dense_tensor_function_compiler/FILES
+++ b/eval/src/tests/tensor/dense_tensor_function_optimizer/FILES
diff --git a/eval/src/tests/tensor/dense_tensor_function_compiler/dense_tensor_function_compiler_test.cpp b/eval/src/tests/tensor/dense_tensor_function_optimizer/dense_tensor_function_optimizer_test.cpp
index 7df436d85a1..57d03c09686 100644
--- a/eval/src/tests/tensor/dense_tensor_function_compiler/dense_tensor_function_compiler_test.cpp
+++ b/eval/src/tests/tensor/dense_tensor_function_optimizer/dense_tensor_function_optimizer_test.cpp
@@ -3,7 +3,7 @@
#include <vespa/vespalib/testkit/test_kit.h>
#include <vespa/eval/tensor/dense/dense_dot_product_function.h>
#include <vespa/eval/tensor/dense/dense_xw_product_function.h>
-#include <vespa/eval/tensor/dense/dense_tensor_function_compiler.h>
+#include <vespa/eval/tensor/dense/dense_tensor_function_optimizer.h>
#include <vespa/eval/eval/operation.h>
using namespace vespalib::eval;
@@ -15,7 +15,7 @@ using vespalib::Stash;
//-----------------------------------------------------------------------------
const TensorFunction &
-compileDotProduct(const vespalib::string &lhsType,
+optimizeDotProduct(const vespalib::string &lhsType,
const vespalib::string &rhsType,
Stash &stash)
{
@@ -23,15 +23,15 @@ compileDotProduct(const vespalib::string &lhsType,
inject(ValueType::from_spec(rhsType), 3, stash),
Mul::f, stash),
Aggr::SUM, {}, stash);
- return DenseTensorFunctionCompiler::compile(reduceNode, stash);
+ return DenseTensorFunctionOptimizer::optimize(reduceNode, stash);
}
void
-assertCompiledDotProduct(const vespalib::string &lhsType,
+assertOptimizedDotProduct(const vespalib::string &lhsType,
const vespalib::string &rhsType)
{
Stash stash;
- const TensorFunction &func = compileDotProduct(lhsType, rhsType, stash);
+ const TensorFunction &func = optimizeDotProduct(lhsType, rhsType, stash);
const DenseDotProductFunction *dotProduct = as<DenseDotProductFunction>(func);
ASSERT_TRUE(dotProduct);
EXPECT_EQUAL(1u, dotProduct->lhsTensorId());
@@ -39,11 +39,11 @@ assertCompiledDotProduct(const vespalib::string &lhsType,
}
void
-assertNotCompiledDotProduct(const vespalib::string &lhsType,
+assertNotOptimizedDotProduct(const vespalib::string &lhsType,
const vespalib::string &rhsType)
{
Stash stash;
- const TensorFunction &func = compileDotProduct(lhsType, rhsType, stash);
+ const TensorFunction &func = optimizeDotProduct(lhsType, rhsType, stash);
const Reduce *reduce = as<Reduce>(func);
EXPECT_TRUE(reduce);
}
@@ -51,7 +51,7 @@ assertNotCompiledDotProduct(const vespalib::string &lhsType,
//-----------------------------------------------------------------------------
const TensorFunction &
-compileXWProduct(const vespalib::string &lhsType,
+optimizeXWProduct(const vespalib::string &lhsType,
const vespalib::string &rhsType,
const vespalib::string &dim,
Stash &stash)
@@ -60,17 +60,17 @@ compileXWProduct(const vespalib::string &lhsType,
inject(ValueType::from_spec(rhsType), 3, stash),
Mul::f, stash),
Aggr::SUM, {dim}, stash);
- return DenseTensorFunctionCompiler::compile(reduceNode, stash);
+ return DenseTensorFunctionOptimizer::optimize(reduceNode, stash);
}
void
-assertCompiledXWProduct(const vespalib::string &vecTypeStr,
+assertOptimizedXWProduct(const vespalib::string &vecTypeStr,
const vespalib::string &matTypeStr,
const vespalib::string &dim)
{
Stash stash;
- const TensorFunction &func = compileXWProduct(vecTypeStr, matTypeStr, dim, stash);
- const TensorFunction &inv_func = compileXWProduct(matTypeStr, vecTypeStr, dim, stash);
+ const TensorFunction &func = optimizeXWProduct(vecTypeStr, matTypeStr, dim, stash);
+ const TensorFunction &inv_func = optimizeXWProduct(matTypeStr, vecTypeStr, dim, stash);
const DenseXWProductFunction *xwProduct = as<DenseXWProductFunction>(func);
const DenseXWProductFunction *inv_xwProduct = as<DenseXWProductFunction>(inv_func);
ValueType vecType = ValueType::from_spec(vecTypeStr);
@@ -92,13 +92,13 @@ assertCompiledXWProduct(const vespalib::string &vecTypeStr,
}
void
-assertNotCompiledXWProduct(const vespalib::string &vecType,
+assertNotOptimizedXWProduct(const vespalib::string &vecType,
const vespalib::string &matType,
const vespalib::string &dim)
{
Stash stash;
- const TensorFunction &func = compileXWProduct(vecType, matType, dim, stash);
- const TensorFunction &inv_func = compileXWProduct(matType, vecType, dim, stash);
+ const TensorFunction &func = optimizeXWProduct(vecType, matType, dim, stash);
+ const TensorFunction &inv_func = optimizeXWProduct(matType, vecType, dim, stash);
const Reduce *reduce = as<Reduce>(func);
const Reduce *inv_reduce = as<Reduce>(inv_func);
EXPECT_TRUE(reduce);
@@ -107,45 +107,45 @@ assertNotCompiledXWProduct(const vespalib::string &vecType,
//-----------------------------------------------------------------------------
-TEST("require that dot product with compatible dimensions is compiled")
+TEST("require that dot product with compatible dimensions is optimized")
{
- TEST_DO(assertCompiledDotProduct("tensor(x[5])", "tensor(x[5])"));
- TEST_DO(assertCompiledDotProduct("tensor(x[3])", "tensor(x[5])"));
- TEST_DO(assertCompiledDotProduct("tensor(x[5])", "tensor(x[3])"));
- TEST_DO(assertCompiledDotProduct("tensor(x[])", "tensor(x[5])"));
- TEST_DO(assertCompiledDotProduct("tensor(x[5])", "tensor(x[])"));
- TEST_DO(assertCompiledDotProduct("tensor(x[])", "tensor(x[])"));
+ TEST_DO(assertOptimizedDotProduct("tensor(x[5])", "tensor(x[5])"));
+ TEST_DO(assertOptimizedDotProduct("tensor(x[3])", "tensor(x[5])"));
+ TEST_DO(assertOptimizedDotProduct("tensor(x[5])", "tensor(x[3])"));
+ TEST_DO(assertOptimizedDotProduct("tensor(x[])", "tensor(x[5])"));
+ TEST_DO(assertOptimizedDotProduct("tensor(x[5])", "tensor(x[])"));
+ TEST_DO(assertOptimizedDotProduct("tensor(x[])", "tensor(x[])"));
}
-TEST("require that dot product with incompatible dimensions is NOT compiled")
+TEST("require that dot product with incompatible dimensions is NOT optimized")
{
- TEST_DO(assertNotCompiledDotProduct("tensor(x[5])", "tensor(y[5])"));
- TEST_DO(assertNotCompiledDotProduct("tensor(y[5])", "tensor(x[5])"));
- TEST_DO(assertNotCompiledDotProduct("tensor(y[])", "tensor(x[])"));
- TEST_DO(assertNotCompiledDotProduct("tensor(x[5])", "tensor(x[5],y[7])"));
- TEST_DO(assertNotCompiledDotProduct("tensor(x[5],y[7])", "tensor(x[5],y[7])"));
+ TEST_DO(assertNotOptimizedDotProduct("tensor(x[5])", "tensor(y[5])"));
+ TEST_DO(assertNotOptimizedDotProduct("tensor(y[5])", "tensor(x[5])"));
+ TEST_DO(assertNotOptimizedDotProduct("tensor(y[])", "tensor(x[])"));
+ TEST_DO(assertNotOptimizedDotProduct("tensor(x[5])", "tensor(x[5],y[7])"));
+ TEST_DO(assertNotOptimizedDotProduct("tensor(x[5],y[7])", "tensor(x[5],y[7])"));
}
//-----------------------------------------------------------------------------
-TEST("require that xw products with compatible dimensions are compiled") {
- TEST_DO(assertCompiledXWProduct("tensor(x[3])", "tensor(x[3],y[4])", "x"));
- TEST_DO(assertCompiledXWProduct("tensor(y[4])", "tensor(x[3],y[4])", "y"));
+TEST("require that xw products with compatible dimensions are optimized") {
+ TEST_DO(assertOptimizedXWProduct("tensor(x[3])", "tensor(x[3],y[4])", "x"));
+ TEST_DO(assertOptimizedXWProduct("tensor(y[4])", "tensor(x[3],y[4])", "y"));
}
-TEST("require that xw products with incompatible dimensions are not compiled") {
- TEST_DO(assertNotCompiledXWProduct("tensor(x[3])", "tensor(x[3],y[4])", "y"));
- TEST_DO(assertNotCompiledXWProduct("tensor(x[])", "tensor(x[3],y[4])", "x"));
- TEST_DO(assertNotCompiledXWProduct("tensor(x[3])", "tensor(x[],y[4])", "x"));
- TEST_DO(assertNotCompiledXWProduct("tensor(x[3])", "tensor(x[3],y[])", "x"));
- TEST_DO(assertNotCompiledXWProduct("tensor(x[2])", "tensor(x[3],y[4])", "x"));
- TEST_DO(assertNotCompiledXWProduct("tensor(x[4])", "tensor(x[3],y[4])", "x"));
- TEST_DO(assertNotCompiledXWProduct("tensor(x[3])", "tensor(y[3],z[4])", "x"));
- TEST_DO(assertNotCompiledXWProduct("tensor(x[3])", "tensor(y[3],z[4])", "y"));
- TEST_DO(assertNotCompiledXWProduct("tensor(x[3])", "tensor(y[3],z[4])", "z"));
- TEST_DO(assertNotCompiledXWProduct("tensor(y[4])", "tensor(x[3],y[4])", "x"));
- TEST_DO(assertNotCompiledXWProduct("tensor(y[3])", "tensor(x[3],y[4])", "y"));
- TEST_DO(assertNotCompiledXWProduct("tensor(y[5])", "tensor(x[3],y[4])", "y"));
+TEST("require that xw products with incompatible dimensions are not optimized") {
+ TEST_DO(assertNotOptimizedXWProduct("tensor(x[3])", "tensor(x[3],y[4])", "y"));
+ TEST_DO(assertNotOptimizedXWProduct("tensor(x[])", "tensor(x[3],y[4])", "x"));
+ TEST_DO(assertNotOptimizedXWProduct("tensor(x[3])", "tensor(x[],y[4])", "x"));
+ TEST_DO(assertNotOptimizedXWProduct("tensor(x[3])", "tensor(x[3],y[])", "x"));
+ TEST_DO(assertNotOptimizedXWProduct("tensor(x[2])", "tensor(x[3],y[4])", "x"));
+ TEST_DO(assertNotOptimizedXWProduct("tensor(x[4])", "tensor(x[3],y[4])", "x"));
+ TEST_DO(assertNotOptimizedXWProduct("tensor(x[3])", "tensor(y[3],z[4])", "x"));
+ TEST_DO(assertNotOptimizedXWProduct("tensor(x[3])", "tensor(y[3],z[4])", "y"));
+ TEST_DO(assertNotOptimizedXWProduct("tensor(x[3])", "tensor(y[3],z[4])", "z"));
+ TEST_DO(assertNotOptimizedXWProduct("tensor(y[4])", "tensor(x[3],y[4])", "x"));
+ TEST_DO(assertNotOptimizedXWProduct("tensor(y[3])", "tensor(x[3],y[4])", "y"));
+ TEST_DO(assertNotOptimizedXWProduct("tensor(y[5])", "tensor(x[3],y[4])", "y"));
}
//-----------------------------------------------------------------------------
diff --git a/eval/src/vespa/eval/eval/tensor_function.h b/eval/src/vespa/eval/eval/tensor_function.h
index e23dc8c6fc0..5ca00ca4b53 100644
--- a/eval/src/vespa/eval/eval/tensor_function.h
+++ b/eval/src/vespa/eval/eval/tensor_function.h
@@ -70,6 +70,13 @@ using join_fun_t = double (*)(double, double);
* will invoke the immediate API on the tensor engine associated with
* the input tensors. In other words, the intermediate representation
* 'compiles to itself'.
+ *
+ * The reason for using the top-level TensorFunction interface when
+ * referencing downwards in the tree is to enable mixed-mode execution
+ * resulting from partial optimization where the intermediate
+ * representation is partially replaced by implementation-specific
+ * tensor functions, which may or may not rely on lower-level tensor
+ * functions that may in turn be mixed-mode.
**/
struct Node : public TensorFunction
{
@@ -90,11 +97,11 @@ struct Inject : Node {
};
struct Reduce : Node {
- const Node &tensor;
+ const TensorFunction &tensor;
const Aggr aggr;
const std::vector<vespalib::string> dimensions;
Reduce(const ValueType &result_type_in,
- const Node &tensor_in,
+ const TensorFunction &tensor_in,
Aggr aggr_in,
const std::vector<vespalib::string> &dimensions_in)
: Node(result_type_in), tensor(tensor_in), aggr(aggr_in), dimensions(dimensions_in) {}
@@ -102,22 +109,22 @@ struct Reduce : Node {
};
struct Map : Node {
- const Node &tensor;
+ const TensorFunction &tensor;
const map_fun_t function;
Map(const ValueType &result_type_in,
- const Node &tensor_in,
+ const TensorFunction &tensor_in,
map_fun_t function_in)
: Node(result_type_in), tensor(tensor_in), function(function_in) {}
const Value &eval(ConstArrayRef<Value::CREF> params, Stash &stash) const override;
};
struct Join : Node {
- const Node &lhs_tensor;
- const Node &rhs_tensor;
+ const TensorFunction &lhs_tensor;
+ const TensorFunction &rhs_tensor;
const join_fun_t function;
Join(const ValueType &result_type_in,
- const Node &lhs_tensor_in,
- const Node &rhs_tensor_in,
+ const TensorFunction &lhs_tensor_in,
+ const TensorFunction &rhs_tensor_in,
join_fun_t function_in)
: Node(result_type_in), lhs_tensor(lhs_tensor_in),
rhs_tensor(rhs_tensor_in), function(function_in) {}
diff --git a/eval/src/vespa/eval/tensor/default_tensor_engine.cpp b/eval/src/vespa/eval/tensor/default_tensor_engine.cpp
index 773d2364b7d..88e441c486a 100644
--- a/eval/src/vespa/eval/tensor/default_tensor_engine.cpp
+++ b/eval/src/vespa/eval/tensor/default_tensor_engine.cpp
@@ -7,7 +7,7 @@
#include "serialization/typed_binary_format.h"
#include "dense/dense_tensor.h"
#include "dense/dense_tensor_builder.h"
-#include "dense/dense_tensor_function_compiler.h"
+#include "dense/dense_tensor_function_optimizer.h"
#include <vespa/eval/eval/value.h>
#include <vespa/eval/eval/tensor_spec.h>
#include <vespa/eval/eval/simple_tensor_engine.h>
@@ -208,7 +208,7 @@ DefaultTensorEngine::decode(nbostream &input) const
const TensorFunction &
DefaultTensorEngine::compile(const eval::tensor_function::Node &expr, Stash &stash) const
{
- return DenseTensorFunctionCompiler::compile(expr, stash);
+ return DenseTensorFunctionOptimizer::optimize(expr, stash);
}
//-----------------------------------------------------------------------------
diff --git a/eval/src/vespa/eval/tensor/dense/CMakeLists.txt b/eval/src/vespa/eval/tensor/dense/CMakeLists.txt
index 97343ffd380..1fa839ca4b2 100644
--- a/eval/src/vespa/eval/tensor/dense/CMakeLists.txt
+++ b/eval/src/vespa/eval/tensor/dense/CMakeLists.txt
@@ -8,7 +8,7 @@ vespa_add_library(eval_tensor_dense OBJECT
dense_tensor_address_combiner.cpp
dense_tensor_builder.cpp
dense_tensor_cells_iterator.cpp
- dense_tensor_function_compiler.cpp
+ dense_tensor_function_optimizer.cpp
dense_tensor_view.cpp
dense_tensor_reduce.cpp
mutable_dense_tensor_view.cpp
diff --git a/eval/src/vespa/eval/tensor/dense/dense_tensor_function_compiler.cpp b/eval/src/vespa/eval/tensor/dense/dense_tensor_function_optimizer.cpp
index 22e2a3fb78c..23a382baf5c 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_tensor_function_compiler.cpp
+++ b/eval/src/vespa/eval/tensor/dense/dense_tensor_function_optimizer.cpp
@@ -2,7 +2,7 @@
#include "dense_dot_product_function.h"
#include "dense_xw_product_function.h"
-#include "dense_tensor_function_compiler.h"
+#include "dense_tensor_function_optimizer.h"
#include <vespa/eval/eval/operation.h>
#include <vespa/vespalib/test/insertion_operators.h>
#include <iostream>
@@ -54,24 +54,25 @@ const TensorFunction &createDenseXWProduct(const ValueType &res, const Inject &v
common_is_inner);
}
-struct InnerProductFunctionCompiler
+struct InnerProductFunctionOptimizer
{
- static const TensorFunction &compile(const Node &expr, Stash &stash) {
+ static const TensorFunction &optimize(const TensorFunction &expr, Stash &stash) {
const Reduce *reduce = as<Reduce>(expr);
if (reduce && (reduce->aggr == Aggr::SUM)) {
+ const ValueType &result_type = reduce->result_type;
const Join *join = as<Join>(reduce->tensor);
if (join && (join->function == Mul::f)) {
const Inject *lhs = as<Inject>(join->lhs_tensor);
const Inject *rhs = as<Inject>(join->rhs_tensor);
if (lhs && rhs) {
- if (isDenseDotProduct(expr.result_type, lhs->result_type, rhs->result_type)) {
+ if (isDenseDotProduct(result_type, lhs->result_type, rhs->result_type)) {
return stash.create<DenseDotProductFunction>(lhs->tensor_id, rhs->tensor_id);
}
- if (isDenseXWProduct(expr.result_type, lhs->result_type, rhs->result_type)) {
- return createDenseXWProduct(expr.result_type, *lhs, *rhs, stash);
+ if (isDenseXWProduct(result_type, lhs->result_type, rhs->result_type)) {
+ return createDenseXWProduct(result_type, *lhs, *rhs, stash);
}
- if (isDenseXWProduct(expr.result_type, rhs->result_type, lhs->result_type)) {
- return createDenseXWProduct(expr.result_type, *rhs, *lhs, stash);
+ if (isDenseXWProduct(result_type, rhs->result_type, lhs->result_type)) {
+ return createDenseXWProduct(result_type, *rhs, *lhs, stash);
}
}
}
@@ -83,9 +84,9 @@ struct InnerProductFunctionCompiler
}
const TensorFunction &
-DenseTensorFunctionCompiler::compile(const eval::tensor_function::Node &expr, Stash &stash)
+DenseTensorFunctionOptimizer::optimize(const eval::TensorFunction &expr, Stash &stash)
{
- return InnerProductFunctionCompiler::compile(expr, stash);
+ return InnerProductFunctionOptimizer::optimize(expr, stash);
}
}
diff --git a/eval/src/vespa/eval/tensor/dense/dense_tensor_function_compiler.h b/eval/src/vespa/eval/tensor/dense/dense_tensor_function_optimizer.h
index 61c3af079e3..2478447ca48 100644
--- a/eval/src/vespa/eval/tensor/dense/dense_tensor_function_compiler.h
+++ b/eval/src/vespa/eval/tensor/dense/dense_tensor_function_optimizer.h
@@ -10,11 +10,11 @@ namespace vespalib::tensor {
/**
* Class that recognizes calculations over dense tensors (in tensor function intermediate representation)
- * and compiles this into an explicit tensor function.
+ * and optimizes this into an explicit tensor function.
*/
-struct DenseTensorFunctionCompiler
+struct DenseTensorFunctionOptimizer
{
- static const eval::TensorFunction &compile(const eval::tensor_function::Node &expr, Stash &stash);
+ static const eval::TensorFunction &optimize(const eval::TensorFunction &expr, Stash &stash);
};
}