Skip to content

Commit

Permalink
LUCENE-4307: rename IR.getTopReaderContext to IR.getContext
Browse files Browse the repository at this point in the history
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/branch_4x@1373086 13f79535-47bb-0310-9956-ffa450edef68
  • Loading branch information
rmuir committed Aug 14, 2012
1 parent 273e631 commit 25f9f13
Show file tree
Hide file tree
Showing 29 changed files with 58 additions and 53 deletions.
3 changes: 3 additions & 0 deletions lucene/CHANGES.txt
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,9 @@ API Changes
leaves() (LUCENE-4152), which lists AtomicReaderContexts including
the doc base of each leaf. (Uwe Schindler, Robert Muir)

* LUCENE-4307: Renamed IndexReader.getTopReaderContext to
IndexReader.getContext. (Robert Muir)

Bug Fixes

* LUCENE-4297: BooleanScorer2 would multiply the coord() factor
Expand Down
2 changes: 1 addition & 1 deletion lucene/MIGRATE.txt
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,7 @@ CompositeReader itsself to build its reader tree. To get all atomic leaves
of a reader, use IndexReader#leaves(), which also provides the doc base
of each leave. Readers that are already atomic return itsself as leaf with
doc base 0. To emulate Lucene 3.x getSequentialSubReaders(),
use getTopReaderContext().children().
use getContext().children().

## LUCENE-2413,LUCENE-3396: Analyzer package changes

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ protected AtomicReader() {
}

@Override
public final AtomicReaderContext getTopReaderContext() {
public final AtomicReaderContext getContext() {
ensureOpen();
return readerContext;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -439,7 +439,7 @@ public QueryAndLimit(Query query, int limit) {
// Delete by query
private static long applyQueryDeletes(Iterable<QueryAndLimit> queriesIter, ReadersAndLiveDocs rld, final SegmentReader reader) throws IOException {
long delCount = 0;
final AtomicReaderContext readerContext = reader.getTopReaderContext();
final AtomicReaderContext readerContext = reader.getContext();
boolean any = false;
for (QueryAndLimit ent : queriesIter) {
Query query = ent.query;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ public String toString() {
protected abstract List<? extends IndexReader> getSequentialSubReaders();

@Override
public final CompositeReaderContext getTopReaderContext() {
public final CompositeReaderContext getContext() {
ensureOpen();
// lazy init without thread safety for perf reasons: Building the readerContext twice does not hurt!
if (readerContext == null) {
Expand Down
12 changes: 7 additions & 5 deletions lucene/core/src/java/org/apache/lucene/index/IndexReader.java
Original file line number Diff line number Diff line change
Expand Up @@ -467,9 +467,11 @@ public final synchronized void close() throws IOException {
protected abstract void doClose() throws IOException;

/**
* Expert: Returns a the root {@link IndexReaderContext} for this
* {@link IndexReader}'s sub-reader tree. Iff this reader is composed of sub
* readers ,ie. this reader being a composite reader, this method returns a
* Expert: Returns the root {@link IndexReaderContext} for this
* {@link IndexReader}'s sub-reader tree.
* <p>
* Iff this reader is composed of sub
* readers, i.e. this reader being a composite reader, this method returns a
* {@link CompositeReaderContext} holding the reader's direct children as well as a
* view of the reader tree's atomic leaf contexts. All sub-
* {@link IndexReaderContext} instances referenced from this readers top-level
Expand All @@ -485,13 +487,13 @@ public final synchronized void close() throws IOException {
*
* @lucene.experimental
*/
public abstract IndexReaderContext getTopReaderContext();
public abstract IndexReaderContext getContext();

/**
* Returns the reader's leaves, or itself if this reader is Atomic.
*/
public final List<AtomicReaderContext> leaves() {
return getTopReaderContext().leaves();
return getContext().leaves();
}

/** Expert: Returns a key for this IndexReader, so FieldCache/CachingWrapperFilter can find
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
*
* <p><b>NOTE</b>: for multi readers, you'll get better
* performance by gathering the sub readers using
* {@link IndexReader#getTopReaderContext()} to get the
* {@link IndexReader#getContext()} to get the
* atomic leaves and then operate per-AtomicReader,
* instead of using this class.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
*
* <p><b>NOTE</b>: for composite readers, you'll get better
* performance by gathering the sub readers using
* {@link IndexReader#getTopReaderContext()} to get the
* {@link IndexReader#getContext()} to get the
* atomic leaves and then operate per-AtomicReader,
* instead of using this class.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
* <p><b>NOTE</b>: this class almost always results in a
* performance hit. If this is important to your use case,
* you'll get better performance by gathering the sub readers using
* {@link IndexReader#getTopReaderContext()} to get the
* {@link IndexReader#getContext()} to get the
* atomic leaves and then operate per-AtomicReader,
* instead of using this class.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ public IndexSearcher(IndexReader r) {
*
* @lucene.experimental */
public IndexSearcher(IndexReader r, ExecutorService executor) {
this(r.getTopReaderContext(), executor);
this(r.getContext(), executor);
}

/**
Expand All @@ -138,7 +138,7 @@ public IndexSearcher(IndexReader r, ExecutorService executor) {
* href="https://issues.apache.org/jira/browse/LUCENE-2239">LUCENE-2239</a>).
*
* @see IndexReaderContext
* @see IndexReader#getTopReaderContext()
* @see IndexReader#getContext()
* @lucene.experimental
*/
public IndexSearcher(IndexReaderContext context, ExecutorService executor) {
Expand All @@ -154,7 +154,7 @@ public IndexSearcher(IndexReaderContext context, ExecutorService executor) {
* Creates a searcher searching the provided top-level {@link IndexReaderContext}.
*
* @see IndexReaderContext
* @see IndexReader#getTopReaderContext()
* @see IndexReader#getContext()
* @lucene.experimental
*/
public IndexSearcher(IndexReaderContext context) {
Expand Down Expand Up @@ -639,7 +639,7 @@ public Weight createNormalizedWeight(Query query) throws IOException {

/**
* Returns this searchers the top-level {@link IndexReaderContext}.
* @see IndexReader#getTopReaderContext()
* @see IndexReader#getContext()
*/
/* sugar for #getReader().getTopReaderContext() */
public IndexReaderContext getTopReaderContext() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ public final Query getQuery() {
@Override
public DocIdSet getDocIdSet(final AtomicReaderContext context, final Bits acceptDocs) throws IOException {
// get a private context that is used to rewrite, createWeight and score eventually
final AtomicReaderContext privateContext = context.reader().getTopReaderContext();
final AtomicReaderContext privateContext = context.reader().getContext();
final Weight weight = new IndexSearcher(privateContext).createNormalizedWeight(query);
return new DocIdSet() {
@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ protected final void addClause(Q topLevel, Term term, int docCount, float boost)


final void collectTerms(IndexReader reader, MultiTermQuery query, TermCollector collector) throws IOException {
IndexReaderContext topReaderContext = reader.getTopReaderContext();
IndexReaderContext topReaderContext = reader.getContext();
Comparator<BytesRef> lastTermComp = null;
for (AtomicReaderContext context : topReaderContext.leaves()) {
final Fields fields = context.reader().fields();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ public class PayloadSpanUtil {
* @param context
* that contains doc with payloads to extract
*
* @see IndexReader#getTopReaderContext()
* @see IndexReader#getContext()
*/
public PayloadSpanUtil(IndexReaderContext context) {
this.context = context;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ private List<Object> getAllDescendantReaderKeys(Object seed) {
if (obj instanceof IndexReader) {
try {
final List<IndexReaderContext> childs =
((IndexReader) obj).getTopReaderContext().children();
((IndexReader) obj).getContext().children();
if (childs != null) { // it is composite reader
for (final IndexReaderContext ctx : childs) {
all.add(ctx.reader().getCoreCacheKey());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ public void testIllegalCustomEncoder() throws Exception {
IndexReader reader = writer.getReader();
writer.close();
assertEquals(numAdded, reader.numDocs());
IndexReaderContext topReaderContext = reader.getTopReaderContext();
IndexReaderContext topReaderContext = reader.getContext();
for (final AtomicReaderContext ctx : topReaderContext.leaves()) {
AtomicReader atomicReader = ctx.reader();
Source source = random().nextBoolean() ? atomicReader.normValues("foo").getSource() : atomicReader.normValues("foo").getDirectSource();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ public void testPositionsSimple() throws IOException {
int num = atLeast(13);
for (int i = 0; i < num; i++) {
BytesRef bytes = new BytesRef("1");
IndexReaderContext topReaderContext = reader.getTopReaderContext();
IndexReaderContext topReaderContext = reader.getContext();
for (AtomicReaderContext atomicReaderContext : topReaderContext.leaves()) {
DocsAndPositionsEnum docsAndPosEnum = getDocsAndPositions(
atomicReaderContext.reader(), bytes, null);
Expand Down Expand Up @@ -138,7 +138,7 @@ public void testRandomPositions() throws IOException {
int num = atLeast(13);
for (int i = 0; i < num; i++) {
BytesRef bytes = new BytesRef("" + term);
IndexReaderContext topReaderContext = reader.getTopReaderContext();
IndexReaderContext topReaderContext = reader.getContext();
for (AtomicReaderContext atomicReaderContext : topReaderContext.leaves()) {
DocsAndPositionsEnum docsAndPosEnum = getDocsAndPositions(
atomicReaderContext.reader(), bytes, null);
Expand Down Expand Up @@ -214,7 +214,7 @@ public void testRandomDocs() throws IOException {
int num = atLeast(13);
for (int i = 0; i < num; i++) {
BytesRef bytes = new BytesRef("" + term);
IndexReaderContext topReaderContext = reader.getTopReaderContext();
IndexReaderContext topReaderContext = reader.getContext();
for (AtomicReaderContext context : topReaderContext.leaves()) {
int maxDoc = context.reader().maxDoc();
DocsEnum docsEnum = _TestUtil.docs(random(), context.reader(), fieldName, bytes, null, null, DocsEnum.FLAG_FREQS);
Expand Down Expand Up @@ -292,7 +292,7 @@ public void testLargeNumberOfPositions() throws IOException {
for (int i = 0; i < num; i++) {
BytesRef bytes = new BytesRef("even");

IndexReaderContext topReaderContext = reader.getTopReaderContext();
IndexReaderContext topReaderContext = reader.getContext();
for (AtomicReaderContext atomicReaderContext : topReaderContext.leaves()) {
DocsAndPositionsEnum docsAndPosEnum = getDocsAndPositions(
atomicReaderContext.reader(), bytes, null);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -339,13 +339,13 @@ private IndexSearcher parallel(Random random, boolean compositeComposite) throws
if (compositeComposite) {
rd1 = new MultiReader(DirectoryReader.open(dir1), DirectoryReader.open(dir1));
rd2 = new MultiReader(DirectoryReader.open(dir2), DirectoryReader.open(dir2));
assertEquals(2, rd1.getTopReaderContext().children().size());
assertEquals(2, rd2.getTopReaderContext().children().size());
assertEquals(2, rd1.getContext().children().size());
assertEquals(2, rd2.getContext().children().size());
} else {
rd1 = DirectoryReader.open(dir1);
rd2 = DirectoryReader.open(dir2);
assertEquals(3, rd1.getTopReaderContext().children().size());
assertEquals(3, rd2.getTopReaderContext().children().size());
assertEquals(3, rd1.getContext().children().size());
assertEquals(3, rd2.getContext().children().size());
}
ParallelCompositeReader pr = new ParallelCompositeReader(rd1, rd2);
return newSearcher(pr);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ private void assertValues(TestType type, Directory dir, long[] values, Type[] so
throws IOException {
DirectoryReader reader = DirectoryReader.open(dir);
assertEquals(1, reader.leaves().size());
IndexReaderContext topReaderContext = reader.getTopReaderContext();
IndexReaderContext topReaderContext = reader.getContext();
List<AtomicReaderContext> leaves = topReaderContext.leaves();
assertEquals(1, leaves.size());
DocValues docValues = leaves.get(0).reader().docValues("promote");
Expand Down Expand Up @@ -374,7 +374,7 @@ public void testMergeIncompatibleTypes() throws IOException {
writer.close();
DirectoryReader reader = DirectoryReader.open(dir);
assertEquals(1, reader.leaves().size());
IndexReaderContext topReaderContext = reader.getTopReaderContext();
IndexReaderContext topReaderContext = reader.getContext();
List<AtomicReaderContext> leaves = topReaderContext.leaves();
DocValues docValues = leaves.get(0).reader().docValues("promote");
assertNotNull(docValues);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ public void testCachingWorks() throws Exception {
writer.close();

IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
AtomicReaderContext context = (AtomicReaderContext) reader.getTopReaderContext();
AtomicReaderContext context = (AtomicReaderContext) reader.getContext();
MockFilter filter = new MockFilter();
CachingWrapperFilter cacher = new CachingWrapperFilter(filter);

Expand All @@ -69,7 +69,7 @@ public void testNullDocIdSet() throws Exception {
writer.close();

IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
AtomicReaderContext context = (AtomicReaderContext) reader.getTopReaderContext();
AtomicReaderContext context = (AtomicReaderContext) reader.getContext();

final Filter filter = new Filter() {
@Override
Expand All @@ -92,7 +92,7 @@ public void testNullDocIdSetIterator() throws Exception {
writer.close();

IndexReader reader = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir));
AtomicReaderContext context = (AtomicReaderContext) reader.getTopReaderContext();
AtomicReaderContext context = (AtomicReaderContext) reader.getContext();

final Filter filter = new Filter() {
@Override
Expand All @@ -115,8 +115,8 @@ public DocIdSetIterator iterator() {
}

private static void assertDocIdSetCacheable(IndexReader reader, Filter filter, boolean shouldCacheable) throws IOException {
assertTrue(reader.getTopReaderContext() instanceof AtomicReaderContext);
AtomicReaderContext context = (AtomicReaderContext) reader.getTopReaderContext();
assertTrue(reader.getContext() instanceof AtomicReaderContext);
AtomicReaderContext context = (AtomicReaderContext) reader.getContext();
final CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
final DocIdSet originalSet = filter.getDocIdSet(context, context.reader().getLiveDocs());
final DocIdSet cachedSet = cacher.getDocIdSet(context, context.reader().getLiveDocs());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ public void testRange_2bit() throws Exception {

@Test
public void testInverseRange() throws Exception {
AtomicReaderContext context = SlowCompositeReaderWrapper.wrap(reader).getTopReaderContext();
AtomicReaderContext context = SlowCompositeReaderWrapper.wrap(reader).getContext();
NumericRangeFilter<Integer> f = NumericRangeFilter.newIntRange("field8", 8, 1000, -1000, true, true);
assertSame("A inverse range should return the EMPTY_DOCIDSET instance", DocIdSet.EMPTY_DOCIDSET, f.getDocIdSet(context, context.reader().getLiveDocs()));
f = NumericRangeFilter.newIntRange("field8", 8, Integer.MAX_VALUE, null, false, false);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ public void testRange_2bit() throws Exception {

@Test
public void testInverseRange() throws Exception {
AtomicReaderContext context = SlowCompositeReaderWrapper.wrap(searcher.getIndexReader()).getTopReaderContext();
AtomicReaderContext context = SlowCompositeReaderWrapper.wrap(searcher.getIndexReader()).getContext();
NumericRangeFilter<Long> f = NumericRangeFilter.newLongRange("field8", 8, 1000L, -1000L, true, true);
assertSame("A inverse range should return the EMPTY_DOCIDSET instance", DocIdSet.EMPTY_DOCIDSET,
f.getDocIdSet(context, context.reader().getLiveDocs()));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,12 +67,12 @@ public void testSpanTermQuery() throws Exception {
SpanTermQuery stq;
Spans spans;
stq = new SpanTermQuery(new Term(PayloadHelper.FIELD, "seventy"));
spans = MultiSpansWrapper.wrap(indexReader.getTopReaderContext(), stq);
spans = MultiSpansWrapper.wrap(indexReader.getContext(), stq);
assertTrue("spans is null and it shouldn't be", spans != null);
checkSpans(spans, 100, 1, 1, 1);

stq = new SpanTermQuery(new Term(PayloadHelper.NO_PAYLOAD_FIELD, "seventy"));
spans = MultiSpansWrapper.wrap(indexReader.getTopReaderContext(), stq);
spans = MultiSpansWrapper.wrap(indexReader.getContext(), stq);
assertTrue("spans is null and it shouldn't be", spans != null);
checkSpans(spans, 100, 0, 0, 0);
}
Expand All @@ -83,19 +83,19 @@ public void testSpanFirst() throws IOException {
SpanFirstQuery sfq;
match = new SpanTermQuery(new Term(PayloadHelper.FIELD, "one"));
sfq = new SpanFirstQuery(match, 2);
Spans spans = MultiSpansWrapper.wrap(indexReader.getTopReaderContext(), sfq);
Spans spans = MultiSpansWrapper.wrap(indexReader.getContext(), sfq);
checkSpans(spans, 109, 1, 1, 1);
//Test more complicated subclause
SpanQuery[] clauses = new SpanQuery[2];
clauses[0] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "one"));
clauses[1] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "hundred"));
match = new SpanNearQuery(clauses, 0, true);
sfq = new SpanFirstQuery(match, 2);
checkSpans(MultiSpansWrapper.wrap(indexReader.getTopReaderContext(), sfq), 100, 2, 1, 1);
checkSpans(MultiSpansWrapper.wrap(indexReader.getContext(), sfq), 100, 2, 1, 1);

match = new SpanNearQuery(clauses, 0, false);
sfq = new SpanFirstQuery(match, 2);
checkSpans(MultiSpansWrapper.wrap(indexReader.getTopReaderContext(), sfq), 100, 2, 1, 1);
checkSpans(MultiSpansWrapper.wrap(indexReader.getContext(), sfq), 100, 2, 1, 1);

}

Expand All @@ -119,7 +119,7 @@ public void testSpanNot() throws Exception {
writer.close();


checkSpans(MultiSpansWrapper.wrap(reader.getTopReaderContext(), snq), 1,new int[]{2});
checkSpans(MultiSpansWrapper.wrap(reader.getContext(), snq), 1,new int[]{2});
reader.close();
directory.close();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ public boolean isCacheable() {
private void tstFilterCard(String mes, int expected, Filter filt)
throws Exception {
// BooleanFilter never returns null DIS or null DISI!
DocIdSetIterator disi = filt.getDocIdSet(reader.getTopReaderContext(), reader.getLiveDocs()).iterator();
DocIdSetIterator disi = filt.getDocIdSet(reader.getContext(), reader.getLiveDocs()).iterator();
int actual = 0;
while (disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
actual++;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,8 @@ public void testMissingTerms() throws Exception {
w.addDocument(doc);
}
IndexReader reader = new SlowCompositeReaderWrapper(w.getReader());
assertTrue(reader.getTopReaderContext() instanceof AtomicReaderContext);
AtomicReaderContext context = (AtomicReaderContext) reader.getTopReaderContext();
assertTrue(reader.getContext() instanceof AtomicReaderContext);
AtomicReaderContext context = (AtomicReaderContext) reader.getContext();
w.close();

TermsFilter tf = new TermsFilter();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ public void testGetFilterHandleNumericParseError() throws Exception {
try {
AtomicReader reader = new SlowCompositeReaderWrapper(DirectoryReader.open(ramDir));
try {
assertNull(filter.getDocIdSet(reader.getTopReaderContext(), reader.getLiveDocs()));
assertNull(filter.getDocIdSet(reader.getContext(), reader.getLiveDocs()));
}
finally {
reader.close();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -353,7 +353,7 @@ public boolean acceptsDocsOutOfOrder() {
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader, false);
indexSearcher.setSimilarity(s.getSimilarity());
Weight w = indexSearcher.createNormalizedWeight(q);
AtomicReaderContext ctx = previousReader.getTopReaderContext();
AtomicReaderContext ctx = previousReader.getContext();
Scorer scorer = w.scorer(ctx, true, false, ctx.reader().getLiveDocs());
if (scorer != null) {
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
Expand Down
Loading

0 comments on commit 25f9f13

Please sign in to comment.