diff options
author | Henning Baldersheim <balder@oath.com> | 2018-06-06 18:31:30 +0200 |
---|---|---|
committer | Henning Baldersheim <balder@oath.com> | 2018-06-06 18:31:30 +0200 |
commit | c484b7c1b0f244483a727ff94a5660ed7f9e3494 (patch) | |
tree | 297f492eb43150d925640096c0231bab8f1bbbe9 /streamingvisitors | |
parent | a0a4c9b7c743c46d9068619e1344f16c46c61610 (diff) |
C++11 for loops
Diffstat (limited to 'streamingvisitors')
-rw-r--r-- | streamingvisitors/src/tests/searchvisitor/searchvisitor.cpp | 5 | ||||
-rw-r--r-- | streamingvisitors/src/vespa/searchvisitor/rankprocessor.cpp | 40 |
2 files changed, 20 insertions, 25 deletions
diff --git a/streamingvisitors/src/tests/searchvisitor/searchvisitor.cpp b/streamingvisitors/src/tests/searchvisitor/searchvisitor.cpp index b419206fede..67b662fb16b 100644 --- a/streamingvisitors/src/tests/searchvisitor/searchvisitor.cpp +++ b/streamingvisitors/src/tests/searchvisitor/searchvisitor.cpp @@ -46,7 +46,7 @@ SearchVisitorTest::SearchVisitorTest() : _component.reset(new StorageComponent(_componentRegister, "storage")); } -SearchVisitorTest::~SearchVisitorTest() {} +SearchVisitorTest::~SearchVisitorTest() = default; std::vector<spi::DocEntry::UP> createDocuments(const vespalib::string & dir) @@ -105,8 +105,7 @@ SearchVisitorTest::testOnlyRequireWeakReadConsistency() SearchVisitorFactory factory("dir:" + TEST_PATH("cfg")); VisitorFactory& factoryBase(factory); vdslib::Parameters params; - std::unique_ptr<Visitor> sv( - factoryBase.makeVisitor(*_component, _env, params)); + std::unique_ptr<Visitor> sv(factoryBase.makeVisitor(*_component, _env, params)); EXPECT_TRUE(sv->getRequiredReadConsistency() == spi::ReadConsistency::WEAK); } diff --git a/streamingvisitors/src/vespa/searchvisitor/rankprocessor.cpp b/streamingvisitors/src/vespa/searchvisitor/rankprocessor.cpp index b38ce7abfea..a80fa9123ed 100644 --- a/streamingvisitors/src/vespa/searchvisitor/rankprocessor.cpp +++ b/streamingvisitors/src/vespa/searchvisitor/rankprocessor.cpp @@ -188,7 +188,7 @@ private: public: RankProgramWrapper(MatchData &match_data) : _match_data(match_data) {} - virtual void run(uint32_t docid, const std::vector<search::fef::TermFieldMatchData> &matchData) override { + void run(uint32_t docid, const std::vector<search::fef::TermFieldMatchData> &matchData) override { // Prepare the match data object used by the rank program with earlier unpacked match data. copyTermFieldMatchData(matchData, _match_data); (void) docid; @@ -226,38 +226,36 @@ RankProcessor::unpackMatchData(uint32_t docId) void RankProcessor::unpackMatchData(MatchData &matchData) { - QueryWrapper::TermList & terms = _query.getTermList(); - for (uint32_t i = 0; i < terms.size(); ++i) { - if (!terms[i].isPhraseTerm() || terms[i].isFirstPhraseTerm()) { // consider 1 term data per phrase - bool isPhrase = terms[i].isFirstPhraseTerm(); - QueryTermData & qtd = static_cast<QueryTermData &>(terms[i].getTerm()->getQueryItem()); + for (QueryWrapper::Term & term: _query.getTermList()) { + if (!term.isPhraseTerm() || term.isFirstPhraseTerm()) { // consider 1 term data per phrase + bool isPhrase = term.isFirstPhraseTerm(); + QueryTermData & qtd = static_cast<QueryTermData &>(term.getTerm()->getQueryItem()); const ITermData &td = qtd.getTermData(); HitList list; - const HitList & hitList = isPhrase ? - terms[i].getParent()->evaluateHits(list) : terms[i].getTerm()->evaluateHits(list); + const HitList & hitList = isPhrase + ? term.getParent()->evaluateHits(list) + : term.getTerm()->evaluateHits(list); if (hitList.size() > 0) { // only unpack if we have a hit LOG(debug, "Unpack match data for query term '%s:%s' (%s)", - terms[i].getTerm()->index().c_str(), terms[i].getTerm()->getTerm(), - isPhrase ? "phrase" : "term"); + term.getTerm()->index().c_str(), term.getTerm()->getTerm(), isPhrase ? "phrase" : "term"); uint32_t lastFieldId = -1; - TermFieldMatchData *tmd = 0; + TermFieldMatchData *tmd = nullptr; uint32_t fieldLen = search::fef::FieldPositionsIterator::UNKNOWN_LENGTH; // optimize for hitlist giving all hits for a single field in one chunk for (const search::Hit & hit : hitList) { uint32_t fieldId = hit.context(); - if (fieldId != lastFieldId) { // reset to notfound/unknown values - tmd = 0; + tmd = nullptr; fieldLen = search::fef::FieldPositionsIterator::UNKNOWN_LENGTH; // setup for new field that had a hit const ITermFieldData *tfd = td.lookupField(fieldId); - if (tfd != 0) { + if (tfd != nullptr) { tmd = matchData.resolveTermField(tfd->getHandle()); tmd->setFieldId(fieldId); // reset field match data, but only once per docId @@ -267,21 +265,19 @@ RankProcessor::unpackMatchData(MatchData &matchData) } // find fieldLen for new field if (isPhrase) { - if (fieldId < terms[i].getParent()->getFieldInfoSize()) { - const QueryTerm::FieldInfo & fi = terms[i].getParent()->getFieldInfo(fieldId); - fieldLen = fi.getFieldLength(); + if (fieldId < term.getParent()->getFieldInfoSize()) { + fieldLen = term.getParent()->getFieldInfo(fieldId).getFieldLength(); } } else { - if (fieldId < terms[i].getTerm()->getFieldInfoSize()) { - const QueryTerm::FieldInfo & fi = terms[i].getTerm()->getFieldInfo(fieldId); - fieldLen = fi.getFieldLength(); + if (fieldId < term.getTerm()->getFieldInfoSize()) { + fieldLen = term.getTerm()->getFieldInfo(fieldId).getFieldLength(); } } lastFieldId = fieldId; } - if (tmd != 0) { + if (tmd != nullptr) { // adjust so that the position for phrase terms equals the match for the first term - TermFieldMatchDataPosition pos(hit.elemId(), hit.wordpos() - terms[i].getPosAdjust(), + TermFieldMatchDataPosition pos(hit.elemId(), hit.wordpos() - term.getPosAdjust(), hit.weight(), fieldLen); tmd->appendPosition(pos); LOG(debug, "Append elemId(%u),position(%u), weight(%d), tfmd.weight(%d)", |