1
package org.apache.lucene.index.memory;
4
* Licensed to the Apache Software Foundation (ASF) under one or more
5
* contributor license agreements. See the NOTICE file distributed with
6
* this work for additional information regarding copyright ownership.
7
* The ASF licenses this file to You under the Apache License, Version 2.0
8
* (the "License"); you may not use this file except in compliance with
9
* the License. You may obtain a copy of the License at
11
* http://www.apache.org/licenses/LICENSE-2.0
13
* Unless required by applicable law or agreed to in writing, software
14
* distributed under the License is distributed on an "AS IS" BASIS,
15
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
* See the License for the specific language governing permissions and
17
* limitations under the License.
20
import java.io.IOException;
21
import java.io.Serializable;
22
import java.io.StringReader;
23
import java.util.Arrays;
24
import java.util.Collection;
25
import java.util.Collections;
26
import java.util.Comparator;
27
import java.util.HashMap;
28
import java.util.HashSet;
29
import java.util.Iterator;
32
import org.apache.lucene.analysis.Analyzer;
33
import org.apache.lucene.analysis.TokenStream;
34
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
35
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
36
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
37
import org.apache.lucene.document.Document;
38
import org.apache.lucene.document.FieldSelector;
39
import org.apache.lucene.index.IndexReader;
40
import org.apache.lucene.index.Term;
41
import org.apache.lucene.index.TermDocs;
42
import org.apache.lucene.index.TermEnum;
43
import org.apache.lucene.index.TermFreqVector;
44
import org.apache.lucene.index.TermPositionVector;
45
import org.apache.lucene.index.TermPositions;
46
import org.apache.lucene.index.TermVectorMapper;
47
import org.apache.lucene.index.FieldInvertState;
48
import org.apache.lucene.search.Collector;
49
import org.apache.lucene.search.IndexSearcher;
50
import org.apache.lucene.search.Query;
51
import org.apache.lucene.search.Searcher;
52
import org.apache.lucene.search.Scorer;
53
import org.apache.lucene.search.Similarity;
54
import org.apache.lucene.store.RAMDirectory; // for javadocs
55
import org.apache.lucene.util.ArrayUtil;
56
import org.apache.lucene.util.Constants; // for javadocs
59
* High-performance single-document main memory Apache Lucene fulltext search index.
63
* This class is a replacement/substitute for a large subset of
64
* {@link RAMDirectory} functionality. It is designed to
65
* enable maximum efficiency for on-the-fly matchmaking combining structured and
66
* fuzzy fulltext search in realtime streaming applications such as Nux XQuery based XML
67
* message queues, publish-subscribe systems for Blogs/newsfeeds, text chat, data acquisition and
68
* distribution systems, application level routers, firewalls, classifiers, etc.
69
* Rather than targeting fulltext search of infrequent queries over huge persistent
70
* data archives (historic search), this class targets fulltext search of huge
71
* numbers of queries over comparatively small transient realtime data (prospective
75
* float score = search(String text, Query query)
78
* Each instance can hold at most one Lucene "document", with a document containing
79
* zero or more "fields", each field having a name and a fulltext value. The
80
* fulltext value is tokenized (split and transformed) into zero or more index terms
81
* (aka words) on <code>addField()</code>, according to the policy implemented by an
82
* Analyzer. For example, Lucene analyzers can split on whitespace, normalize to lower case
83
* for case insensitivity, ignore common terms with little discriminatory value such as "he", "in", "and" (stop
84
* words), reduce the terms to their natural linguistic root form such as "fishing"
85
* being reduced to "fish" (stemming), resolve synonyms/inflexions/thesauri
86
* (upon indexing and/or querying), etc. For details, see
87
* <a target="_blank" href="http://today.java.net/pub/a/today/2003/07/30/LuceneIntro.html">Lucene Analyzer Intro</a>.
89
* Arbitrary Lucene queries can be run against this class - see <a target="_blank"
90
* href="../../../../../../../queryparsersyntax.html">Lucene Query Syntax</a>
91
* as well as <a target="_blank"
92
* href="http://today.java.net/pub/a/today/2003/11/07/QueryParserRules.html">Query Parser Rules</a>.
93
* Note that a Lucene query selects on the field names and associated (indexed)
94
* tokenized terms, not on the original fulltext(s) - the latter are not stored
95
* but rather thrown away immediately after tokenization.
97
* For some interesting background information on search technology, see Bob Wyman's
99
* href="http://bobwyman.pubsub.com/main/2005/05/mary_hodder_poi.html">Prospective Search</a>,
101
* <a target="_blank" href="http://www.acmqueue.org/modules.php?name=Content&pa=showpage&pid=293&page=4">
102
* A Call to Arms - Custom subscriptions</a>, and Tim Bray's
104
* href="http://www.tbray.org/ongoing/When/200x/2003/07/30/OnSearchTOC">On Search, the Series</a>.
107
* <h4>Example Usage</h4>
110
* Analyzer analyzer = PatternAnalyzer.DEFAULT_ANALYZER;
111
* //Analyzer analyzer = new SimpleAnalyzer();
112
* MemoryIndex index = new MemoryIndex();
113
* index.addField("content", "Readings about Salmons and other select Alaska fishing Manuals", analyzer);
114
* index.addField("author", "Tales of James", analyzer);
115
* QueryParser parser = new QueryParser("content", analyzer);
116
* float score = index.search(parser.parse("+author:james +salmon~ +fish* manual~"));
117
* if (score > 0.0f) {
118
* System.out.println("it's a match");
120
* System.out.println("no match found");
122
* System.out.println("indexData=" + index.toString());
126
* <h4>Example XQuery Usage</h4>
129
* (: An XQuery that finds all books authored by James that have something to do with "salmon fishing manuals", sorted by relevance :)
130
* declare namespace lucene = "java:nux.xom.pool.FullTextUtil";
131
* declare variable $query := "+salmon~ +fish* manual~"; (: any arbitrary Lucene query can go here :)
133
* for $book in /books/book[author="James" and lucene:match(abstract, $query) > 0.0]
134
* let $score := lucene:match($book/abstract, $query)
135
* order by $score descending
140
* <h4>No thread safety guarantees</h4>
142
* An instance can be queried multiple times with the same or different queries,
143
* but an instance is not thread-safe. If desired use idioms such as:
145
* MemoryIndex index = ...
146
* synchronized (index) {
147
* // read and/or write index (i.e. add fields and/or query)
152
* <h4>Performance Notes</h4>
154
* Internally there's a new data structure geared towards efficient indexing
155
* and searching, plus the necessary support code to seamlessly plug into the Lucene
158
* This class performs very well for very small texts (e.g. 10 chars)
159
* as well as for large texts (e.g. 10 MB) and everything in between.
160
* Typically, it is about 10-100 times faster than <code>RAMDirectory</code>.
161
* Note that <code>RAMDirectory</code> has particularly
162
* large efficiency overheads for small to medium sized texts, both in time and space.
163
* Indexing a field with N tokens takes O(N) in the best case, and O(N logN) in the worst
164
* case. Memory consumption is probably larger than for <code>RAMDirectory</code>.
166
* Example throughput of many simple term queries over a single MemoryIndex:
167
* ~500000 queries/sec on a MacBook Pro, jdk 1.5.0_06, server VM.
168
* As always, your mileage may vary.
170
* If you're curious about
171
* the whereabouts of bottlenecks, run java 1.5 with the non-perturbing '-server
172
* -agentlib:hprof=cpu=samples,depth=10' flags, then study the trace log and
173
* correlate its hotspot trailer with its call stack headers (see <a
175
* href="http://java.sun.com/developer/technicalArticles/Programming/HPROF.html">
176
* hprof tracing </a>).
179
public class MemoryIndex implements Serializable {
181
/** info for each field: Map<String fieldName, Info field> */
182
private final HashMap<String,Info> fields = new HashMap<String,Info>();
184
/** fields sorted ascending by fieldName; lazily computed on demand */
185
private transient Map.Entry<String,Info>[] sortedFields;
187
/** pos: positions[3*i], startOffset: positions[3*i +1], endOffset: positions[3*i +2] */
188
private final int stride;
190
/** Could be made configurable; See {@link Document#setBoost(float)} */
191
private static final float docBoost = 1.0f;
193
private static final long serialVersionUID = 2782195016849084649L;
195
private static final boolean DEBUG = false;
198
* Sorts term entries into ascending order; also works for
199
* Arrays.binarySearch() and Arrays.sort()
201
private static final Comparator<Object> termComparator = new Comparator<Object>() {
202
@SuppressWarnings("unchecked")
203
public int compare(Object o1, Object o2) {
204
if (o1 instanceof Map.Entry<?,?>) o1 = ((Map.Entry<?,?>) o1).getKey();
205
if (o2 instanceof Map.Entry<?,?>) o2 = ((Map.Entry<?,?>) o2).getKey();
206
if (o1 == o2) return 0;
207
return ((String) o1).compareTo((String) o2);
212
* Constructs an empty instance.
214
public MemoryIndex() {
219
* Constructs an empty instance that can optionally store the start and end
220
* character offset of each token term in the text. This can be useful for
221
* highlighting of hit locations with the Lucene highlighter package.
222
* Private until the highlighter package matures, so that this can actually
223
* be meaningfully integrated.
225
* @param storeOffsets
226
* whether or not to store the start and end character offset of
227
* each token term in the text
229
private MemoryIndex(boolean storeOffsets) {
230
this.stride = storeOffsets ? 3 : 1;
234
* Convenience method; Tokenizes the given field text and adds the resulting
235
* terms to the index; Equivalent to adding an indexed non-keyword Lucene
236
* {@link org.apache.lucene.document.Field} that is
237
* {@link org.apache.lucene.document.Field.Index#ANALYZED tokenized},
238
* {@link org.apache.lucene.document.Field.Store#NO not stored},
239
* {@link org.apache.lucene.document.Field.TermVector#WITH_POSITIONS termVectorStored with positions} (or
240
* {@link org.apache.lucene.document.Field.TermVector#WITH_POSITIONS termVectorStored with positions and offsets}),
243
* a name to be associated with the text
245
* the text to tokenize and index.
247
* the analyzer to use for tokenization
249
public void addField(String fieldName, String text, Analyzer analyzer) {
250
if (fieldName == null)
251
throw new IllegalArgumentException("fieldName must not be null");
253
throw new IllegalArgumentException("text must not be null");
254
if (analyzer == null)
255
throw new IllegalArgumentException("analyzer must not be null");
259
stream = analyzer.reusableTokenStream(fieldName, new StringReader(text));
260
} catch (IOException ex) {
261
throw new RuntimeException(ex);
264
addField(fieldName, stream);
268
* Convenience method; Creates and returns a token stream that generates a
269
* token for each keyword in the given collection, "as is", without any
270
* transforming text analysis. The resulting token stream can be fed into
271
* {@link #addField(String, TokenStream)}, perhaps wrapped into another
272
* {@link org.apache.lucene.analysis.TokenFilter}, as desired.
275
* the keywords to generate tokens for
276
* @return the corresponding token stream
278
public <T> TokenStream keywordTokenStream(final Collection<T> keywords) {
279
// TODO: deprecate & move this method into AnalyzerUtil?
280
if (keywords == null)
281
throw new IllegalArgumentException("keywords must not be null");
283
return new TokenStream() {
284
private Iterator<T> iter = keywords.iterator();
285
private int start = 0;
286
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
287
private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
290
public boolean incrementToken() {
291
if (!iter.hasNext()) return false;
295
throw new IllegalArgumentException("keyword must not be null");
297
String term = obj.toString();
299
termAtt.setEmpty().append(term);
300
offsetAtt.setOffset(start, start+termAtt.length());
301
start += term.length() + 1; // separate words by 1 (blank) character
308
* Equivalent to <code>addField(fieldName, stream, 1.0f)</code>.
311
* a name to be associated with the text
313
* the token stream to retrieve tokens from
315
public void addField(String fieldName, TokenStream stream) {
316
addField(fieldName, stream, 1.0f);
320
* Iterates over the given token stream and adds the resulting terms to the index;
321
* Equivalent to adding a tokenized, indexed, termVectorStored, unstored,
322
* Lucene {@link org.apache.lucene.document.Field}.
323
* Finally closes the token stream. Note that untokenized keywords can be added with this method via
324
* {@link #keywordTokenStream(Collection)}, the Lucene contrib <code>KeywordTokenizer</code> or similar utilities.
327
* a name to be associated with the text
329
* the token stream to retrieve tokens from.
331
* the boost factor for hits for this field
332
* @see org.apache.lucene.document.Field#setBoost(float)
334
public void addField(String fieldName, TokenStream stream, float boost) {
336
if (fieldName == null)
337
throw new IllegalArgumentException("fieldName must not be null");
339
throw new IllegalArgumentException("token stream must not be null");
341
throw new IllegalArgumentException("boost factor must be greater than 0.0");
342
if (fields.get(fieldName) != null)
343
throw new IllegalArgumentException("field must not be added more than once");
345
HashMap<String,ArrayIntList> terms = new HashMap<String,ArrayIntList>();
347
int numOverlapTokens = 0;
350
CharTermAttribute termAtt = stream.addAttribute(CharTermAttribute.class);
351
PositionIncrementAttribute posIncrAttribute = stream.addAttribute(PositionIncrementAttribute.class);
352
OffsetAttribute offsetAtt = stream.addAttribute(OffsetAttribute.class);
354
while (stream.incrementToken()) {
355
String term = termAtt.toString();
356
if (term.length() == 0) continue; // nothing to do
357
// if (DEBUG) System.err.println("token='" + term + "'");
359
final int posIncr = posIncrAttribute.getPositionIncrement();
364
ArrayIntList positions = terms.get(term);
365
if (positions == null) { // term not seen before
366
positions = new ArrayIntList(stride);
367
terms.put(term, positions);
372
positions.add(pos, offsetAtt.startOffset(), offsetAtt.endOffset());
377
// ensure infos.numTokens > 0 invariant; needed for correct operation of terms()
379
boost = boost * docBoost; // see DocumentWriter.addDocument(...)
380
fields.put(fieldName, new Info(terms, numTokens, numOverlapTokens, boost));
381
sortedFields = null; // invalidate sorted view, if any
383
} catch (IOException e) { // can never happen
384
throw new RuntimeException(e);
387
if (stream != null) stream.close();
388
} catch (IOException e2) {
389
throw new RuntimeException(e2);
395
* Creates and returns a searcher that can be used to execute arbitrary
396
* Lucene queries and to collect the resulting query results as hits.
400
public IndexSearcher createSearcher() {
401
MemoryIndexReader reader = new MemoryIndexReader();
402
IndexSearcher searcher = new IndexSearcher(reader); // ensures no auto-close !!
403
reader.setSearcher(searcher); // to later get hold of searcher.getSimilarity()
408
* Convenience method that efficiently returns the relevance score by
409
* matching this index against the given Lucene query expression.
412
* an arbitrary Lucene query to run against this index
413
* @return the relevance score of the matchmaking; A number in the range
414
* [0.0 .. 1.0], with 0.0 indicating no match. The higher the number
415
* the better the match.
418
public float search(Query query) {
420
throw new IllegalArgumentException("query must not be null");
422
Searcher searcher = createSearcher();
424
final float[] scores = new float[1]; // inits to 0.0f (no match)
425
searcher.search(query, new Collector() {
426
private Scorer scorer;
429
public void collect(int doc) throws IOException {
430
scores[0] = scorer.score();
434
public void setScorer(Scorer scorer) throws IOException {
435
this.scorer = scorer;
439
public boolean acceptsDocsOutOfOrder() {
444
public void setNextReader(IndexReader reader, int docBase) { }
446
float score = scores[0];
448
} catch (IOException e) { // can never happen (RAMDirectory)
449
throw new RuntimeException(e);
453
* Note that it is harmless and important for good performance to
454
* NOT close the index reader!!! This avoids all sorts of
455
* unnecessary baggage and locking in the Lucene IndexReader
456
* superclass, all of which is completely unnecessary for this main
457
* memory index data structure without thread-safety claims.
459
* Wishing IndexReader would be an interface...
461
* Actually with the new tight createSearcher() API auto-closing is now
462
* made impossible, hence searcher.close() would be harmless and also
463
* would not degrade performance...
469
* Returns a reasonable approximation of the main memory [bytes] consumed by
470
* this instance. Useful for smart memory sensititive caches/pools. Assumes
471
* fieldNames are interned, whereas tokenized terms are memory-overlaid.
473
* @return the main memory consumption
475
public int getMemorySize() {
476
// for example usage in a smart cache see nux.xom.pool.Pool
480
size += VM.sizeOfObject(2*PTR + INT); // memory index
481
if (sortedFields != null) size += VM.sizeOfObjectArray(sortedFields.length);
483
size += VM.sizeOfHashMap(fields.size());
484
for (Map.Entry<String, Info> entry : fields.entrySet()) { // for each Field Info
485
Info info = entry.getValue();
486
size += VM.sizeOfObject(2*INT + 3*PTR); // Info instance vars
487
if (info.sortedTerms != null) size += VM.sizeOfObjectArray(info.sortedTerms.length);
489
int len = info.terms.size();
490
size += VM.sizeOfHashMap(len);
491
Iterator<Map.Entry<String,ArrayIntList>> iter2 = info.terms.entrySet().iterator();
492
while (--len >= 0) { // for each term
493
Map.Entry<String,ArrayIntList> e = iter2.next();
494
size += VM.sizeOfObject(PTR + 3*INT); // assumes substring() memory overlay
495
// size += STR + 2 * ((String) e.getKey()).length();
496
ArrayIntList positions = e.getValue();
497
size += VM.sizeOfArrayIntList(positions.size());
503
private int numPositions(ArrayIntList positions) {
504
return positions.size() / stride;
507
/** sorts into ascending order (on demand), reusing memory along the way */
508
private void sortFields() {
509
if (sortedFields == null) sortedFields = sort(fields);
512
/** returns a view of the given map's entries, sorted ascending by key */
513
private static <K,V> Map.Entry<K,V>[] sort(HashMap<K,V> map) {
514
int size = map.size();
515
@SuppressWarnings("unchecked")
516
Map.Entry<K,V>[] entries = new Map.Entry[size];
518
Iterator<Map.Entry<K,V>> iter = map.entrySet().iterator();
519
for (int i=0; i < size; i++) {
520
entries[i] = iter.next();
523
if (size > 1) ArrayUtil.quickSort(entries, termComparator);
528
* Returns a String representation of the index data for debugging purposes.
530
* @return the string representation
533
public String toString() {
534
StringBuilder result = new StringBuilder(256);
537
int sumPositions = 0;
540
for (int i=0; i < sortedFields.length; i++) {
541
Map.Entry<String,Info> entry = sortedFields[i];
542
String fieldName = entry.getKey();
543
Info info = entry.getValue();
545
result.append(fieldName + ":\n");
548
int numPositions = 0;
549
for (int j=0; j < info.sortedTerms.length; j++) {
550
Map.Entry<String,ArrayIntList> e = info.sortedTerms[j];
551
String term = e.getKey();
552
ArrayIntList positions = e.getValue();
553
result.append("\t'" + term + "':" + numPositions(positions) + ":");
554
result.append(positions.toString(stride)); // ignore offsets
556
numPositions += numPositions(positions);
557
numChars += term.length();
560
result.append("\tterms=" + info.sortedTerms.length);
561
result.append(", positions=" + numPositions);
562
result.append(", Kchars=" + (numChars/1000.0f));
564
sumPositions += numPositions;
565
sumChars += numChars;
566
sumTerms += info.sortedTerms.length;
569
result.append("\nfields=" + sortedFields.length);
570
result.append(", terms=" + sumTerms);
571
result.append(", positions=" + sumPositions);
572
result.append(", Kchars=" + (sumChars/1000.0f));
573
return result.toString();
577
///////////////////////////////////////////////////////////////////////////////
579
///////////////////////////////////////////////////////////////////////////////
581
* Index data structure for a field; Contains the tokenized term texts and
584
private static final class Info implements Serializable {
587
* Term strings and their positions for this field: Map <String
588
* termText, ArrayIntList positions>
590
private final HashMap<String,ArrayIntList> terms;
592
/** Terms sorted ascending by term text; computed on demand */
593
private transient Map.Entry<String,ArrayIntList>[] sortedTerms;
595
/** Number of added tokens for this field */
596
private final int numTokens;
598
/** Number of overlapping tokens for this field */
599
private final int numOverlapTokens;
601
/** Boost factor for hits for this field */
602
private final float boost;
604
/** Term for this field's fieldName, lazily computed on demand */
605
public transient Term template;
607
private static final long serialVersionUID = 2882195016849084649L;
609
public Info(HashMap<String,ArrayIntList> terms, int numTokens, int numOverlapTokens, float boost) {
611
this.numTokens = numTokens;
612
this.numOverlapTokens = numOverlapTokens;
617
* Sorts hashed terms into ascending order, reusing memory along the
618
* way. Note that sorting is lazily delayed until required (often it's
619
* not required at all). If a sorted view is required then hashing +
620
* sort + binary search is still faster and smaller than TreeMap usage
621
* (which would be an alternative and somewhat more elegant approach,
622
* apart from more sophisticated Tries / prefix trees).
624
public void sortTerms() {
625
if (sortedTerms == null) sortedTerms = sort(terms);
628
/** note that the frequency can be calculated as numPosition(getPositions(x)) */
629
public ArrayIntList getPositions(String term) {
630
return terms.get(term);
633
/** note that the frequency can be calculated as numPosition(getPositions(x)) */
634
public ArrayIntList getPositions(int pos) {
635
return sortedTerms[pos].getValue();
638
public float getBoost() {
645
///////////////////////////////////////////////////////////////////////////////
647
///////////////////////////////////////////////////////////////////////////////
649
* Efficient resizable auto-expanding list holding <code>int</code> elements;
650
* implemented with arrays.
652
private static final class ArrayIntList implements Serializable {
654
private int[] elements;
655
private int size = 0;
657
private static final long serialVersionUID = 2282195016849084649L;
659
public ArrayIntList() {
663
public ArrayIntList(int initialCapacity) {
664
elements = new int[initialCapacity];
667
public void add(int elem) {
668
if (size == elements.length) ensureCapacity(size + 1);
669
elements[size++] = elem;
672
public void add(int pos, int start, int end) {
673
if (size + 3 > elements.length) ensureCapacity(size + 3);
674
elements[size] = pos;
675
elements[size+1] = start;
676
elements[size+2] = end;
680
public int get(int index) {
681
if (index >= size) throwIndex(index);
682
return elements[index];
689
public int[] toArray(int stride) {
690
int[] arr = new int[size() / stride];
692
System.arraycopy(elements, 0, arr, 0, size); // fast path
694
for (int i=0, j=0; j < size; i++, j += stride) arr[i] = elements[j];
699
private void ensureCapacity(int minCapacity) {
700
int newCapacity = Math.max(minCapacity, (elements.length * 3) / 2 + 1);
701
int[] newElements = new int[newCapacity];
702
System.arraycopy(elements, 0, newElements, 0, size);
703
elements = newElements;
706
private void throwIndex(int index) {
707
throw new IndexOutOfBoundsException("index: " + index
708
+ ", size: " + size);
711
/** returns the first few positions (without offsets); debug only */
712
public String toString(int stride) {
713
int s = size() / stride;
714
int len = Math.min(10, s); // avoid printing huge lists
715
StringBuilder buf = new StringBuilder(4*len);
717
for (int i = 0; i < len; i++) {
718
buf.append(get(i*stride));
719
if (i < len-1) buf.append(", ");
721
if (len != s) buf.append(", ..."); // and some more...
723
return buf.toString();
728
///////////////////////////////////////////////////////////////////////////////
730
///////////////////////////////////////////////////////////////////////////////
731
private static final Term MATCH_ALL_TERM = new Term("");
734
* Search support for Lucene framework integration; implements all methods
735
* required by the Lucene IndexReader contracts.
737
private final class MemoryIndexReader extends IndexReader {
739
private Searcher searcher; // needed to find searcher.getSimilarity()
741
private MemoryIndexReader() {
742
super(); // avoid as much superclass baggage as possible
743
readerFinishedListeners = Collections.synchronizedSet(new HashSet<ReaderFinishedListener>());
746
private Info getInfo(String fieldName) {
747
return fields.get(fieldName);
750
private Info getInfo(int pos) {
751
return sortedFields[pos].getValue();
755
public int docFreq(Term term) {
756
Info info = getInfo(term.field());
758
if (info != null) freq = info.getPositions(term.text()) != null ? 1 : 0;
759
if (DEBUG) System.err.println("MemoryIndexReader.docFreq: " + term + ", freq:" + freq);
764
public TermEnum terms() {
765
if (DEBUG) System.err.println("MemoryIndexReader.terms()");
766
return terms(MATCH_ALL_TERM);
770
public TermEnum terms(Term term) {
771
if (DEBUG) System.err.println("MemoryIndexReader.terms: " + term);
773
int i; // index into info.sortedTerms
774
int j; // index into sortedFields
777
if (sortedFields.length == 1 && sortedFields[0].getKey() == term.field()) {
780
j = Arrays.binarySearch(sortedFields, term.field(), termComparator);
783
if (j < 0) { // not found; choose successor
786
if (j < sortedFields.length) getInfo(j).sortTerms();
788
Info info = getInfo(j);
790
i = Arrays.binarySearch(info.sortedTerms, term.text(), termComparator);
791
if (i < 0) { // not found; choose successor
793
if (i >= info.sortedTerms.length) { // move to next successor
796
if (j < sortedFields.length) getInfo(j).sortTerms();
803
return new TermEnum() {
805
private int srtTermsIdx = ix; // index into info.sortedTerms
806
private int srtFldsIdx = jx; // index into sortedFields
809
public boolean next() {
810
if (DEBUG) System.err.println("TermEnum.next");
811
if (srtFldsIdx >= sortedFields.length) return false;
812
Info info = getInfo(srtFldsIdx);
813
if (++srtTermsIdx < info.sortedTerms.length) return true;
818
if (srtFldsIdx >= sortedFields.length) return false;
819
getInfo(srtFldsIdx).sortTerms();
825
if (DEBUG) System.err.println("TermEnum.term: " + srtTermsIdx);
826
if (srtFldsIdx >= sortedFields.length) return null;
827
Info info = getInfo(srtFldsIdx);
828
if (srtTermsIdx >= info.sortedTerms.length) return null;
829
// if (DEBUG) System.err.println("TermEnum.term: " + i + ", " + info.sortedTerms[i].getKey());
830
return createTerm(info, srtFldsIdx, info.sortedTerms[srtTermsIdx].getKey());
834
public int docFreq() {
835
if (DEBUG) System.err.println("TermEnum.docFreq");
836
if (srtFldsIdx >= sortedFields.length) return 0;
837
Info info = getInfo(srtFldsIdx);
838
if (srtTermsIdx >= info.sortedTerms.length) return 0;
839
return numPositions(info.getPositions(srtTermsIdx));
843
public void close() {
844
if (DEBUG) System.err.println("TermEnum.close");
847
/** Returns a new Term object, minimizing String.intern() overheads. */
848
private Term createTerm(Info info, int pos, String text) {
849
// Assertion: sortFields has already been called before
850
Term template = info.template;
851
if (template == null) { // not yet cached?
852
String fieldName = sortedFields[pos].getKey();
853
template = new Term(fieldName);
854
info.template = template;
857
return template.createTerm(text);
864
public TermPositions termPositions() {
865
if (DEBUG) System.err.println("MemoryIndexReader.termPositions");
867
return new TermPositions() {
869
private boolean hasNext;
870
private int cursor = 0;
871
private ArrayIntList current;
874
public void seek(Term term) {
876
if (DEBUG) System.err.println(".seek: " + term);
878
hasNext = true; // term==null means match all docs
880
Info info = getInfo(term.field());
881
current = info == null ? null : info.getPositions(term.text());
882
hasNext = (current != null);
887
public void seek(TermEnum termEnum) {
888
if (DEBUG) System.err.println(".seekEnum");
889
seek(termEnum.term());
893
if (DEBUG) System.err.println(".doc");
898
int freq = current != null ? numPositions(current) : (term == null ? 1 : 0);
899
if (DEBUG) System.err.println(".freq: " + freq);
903
public boolean next() {
904
if (DEBUG) System.err.println(".next: " + current + ", oldHasNext=" + hasNext);
905
boolean next = hasNext;
910
public int read(int[] docs, int[] freqs) {
911
if (DEBUG) System.err.println(".read: " + docs.length);
912
if (!hasNext) return 0;
919
public boolean skipTo(int target) {
920
if (DEBUG) System.err.println(".skipTo: " + target);
924
public void close() {
925
if (DEBUG) System.err.println(".close");
928
public int nextPosition() { // implements TermPositions
929
int pos = current.get(cursor);
931
if (DEBUG) System.err.println(".nextPosition: " + pos);
937
* @throws UnsupportedOperationException
939
public int getPayloadLength() {
940
throw new UnsupportedOperationException();
945
* @throws UnsupportedOperationException
947
public byte[] getPayload(byte[] data, int offset) throws IOException {
948
throw new UnsupportedOperationException();
951
public boolean isPayloadAvailable() {
960
public TermDocs termDocs() {
961
if (DEBUG) System.err.println("MemoryIndexReader.termDocs");
962
return termPositions();
966
public TermFreqVector[] getTermFreqVectors(int docNumber) {
967
if (DEBUG) System.err.println("MemoryIndexReader.getTermFreqVectors");
968
TermFreqVector[] vectors = new TermFreqVector[fields.size()];
969
// if (vectors.length == 0) return null;
970
Iterator<String> iter = fields.keySet().iterator();
971
for (int i=0; i < vectors.length; i++) {
972
vectors[i] = getTermFreqVector(docNumber, iter.next());
978
public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException
980
if (DEBUG) System.err.println("MemoryIndexReader.getTermFreqVectors");
982
// if (vectors.length == 0) return null;
983
for (final String fieldName : fields.keySet())
985
getTermFreqVector(docNumber, fieldName, mapper);
990
public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException
992
if (DEBUG) System.err.println("MemoryIndexReader.getTermFreqVector");
993
final Info info = getInfo(field);
998
mapper.setExpectations(field, info.sortedTerms.length, stride != 1, true);
999
for (int i = info.sortedTerms.length; --i >=0;){
1001
ArrayIntList positions = info.sortedTerms[i].getValue();
1002
int size = positions.size();
1003
org.apache.lucene.index.TermVectorOffsetInfo[] offsets =
1004
new org.apache.lucene.index.TermVectorOffsetInfo[size / stride];
1006
for (int k=0, j=1; j < size; k++, j += stride) {
1007
int start = positions.get(j);
1008
int end = positions.get(j+1);
1009
offsets[k] = new org.apache.lucene.index.TermVectorOffsetInfo(start, end);
1011
mapper.map(info.sortedTerms[i].getKey(),
1012
numPositions(info.sortedTerms[i].getValue()),
1013
offsets, (info.sortedTerms[i].getValue()).toArray(stride));
1018
public TermFreqVector getTermFreqVector(int docNumber, final String fieldName) {
1019
if (DEBUG) System.err.println("MemoryIndexReader.getTermFreqVector");
1020
final Info info = getInfo(fieldName);
1021
if (info == null) return null; // TODO: or return empty vector impl???
1024
return new TermPositionVector() {
1026
private final Map.Entry<String,ArrayIntList>[] sortedTerms = info.sortedTerms;
1028
public String getField() {
1033
return sortedTerms.length;
1036
public String[] getTerms() {
1037
String[] terms = new String[sortedTerms.length];
1038
for (int i=sortedTerms.length; --i >= 0; ) {
1039
terms[i] = sortedTerms[i].getKey();
1044
public int[] getTermFrequencies() {
1045
int[] freqs = new int[sortedTerms.length];
1046
for (int i=sortedTerms.length; --i >= 0; ) {
1047
freqs[i] = numPositions(sortedTerms[i].getValue());
1052
public int indexOf(String term) {
1053
int i = Arrays.binarySearch(sortedTerms, term, termComparator);
1054
return i >= 0 ? i : -1;
1057
public int[] indexesOf(String[] terms, int start, int len) {
1058
int[] indexes = new int[len];
1059
for (int i=0; i < len; i++) {
1060
indexes[i] = indexOf(terms[start++]);
1066
public int[] getTermPositions(int index) {
1067
return sortedTerms[index].getValue().toArray(stride);
1070
// lucene >= 1.9 (remove this method for lucene-1.4.3)
1071
public org.apache.lucene.index.TermVectorOffsetInfo[] getOffsets(int index) {
1072
if (stride == 1) return null; // no offsets stored
1074
ArrayIntList positions = sortedTerms[index].getValue();
1075
int size = positions.size();
1076
org.apache.lucene.index.TermVectorOffsetInfo[] offsets =
1077
new org.apache.lucene.index.TermVectorOffsetInfo[size / stride];
1079
for (int i=0, j=1; j < size; i++, j += stride) {
1080
int start = positions.get(j);
1081
int end = positions.get(j+1);
1082
offsets[i] = new org.apache.lucene.index.TermVectorOffsetInfo(start, end);
1090
private Similarity getSimilarity() {
1091
if (searcher != null) return searcher.getSimilarity();
1092
return Similarity.getDefault();
1095
private void setSearcher(Searcher searcher) {
1096
this.searcher = searcher;
1099
/** performance hack: cache norms to avoid repeated expensive calculations */
1100
private byte[] cachedNorms;
1101
private String cachedFieldName;
1102
private Similarity cachedSimilarity;
1105
public byte[] norms(String fieldName) {
1106
byte[] norms = cachedNorms;
1107
Similarity sim = getSimilarity();
1108
if (fieldName != cachedFieldName || sim != cachedSimilarity) { // not cached?
1109
Info info = getInfo(fieldName);
1110
int numTokens = info != null ? info.numTokens : 0;
1111
int numOverlapTokens = info != null ? info.numOverlapTokens : 0;
1112
float boost = info != null ? info.getBoost() : 1.0f;
1113
FieldInvertState invertState = new FieldInvertState(0, numTokens, numOverlapTokens, 0, boost);
1114
float n = sim.computeNorm(fieldName, invertState);
1115
byte norm = sim.encodeNormValue(n);
1116
norms = new byte[] {norm};
1118
// cache it for future reuse
1119
cachedNorms = norms;
1120
cachedFieldName = fieldName;
1121
cachedSimilarity = sim;
1122
if (DEBUG) System.err.println("MemoryIndexReader.norms: " + fieldName + ":" + n + ":" + norm + ":" + numTokens);
1128
public void norms(String fieldName, byte[] bytes, int offset) {
1129
if (DEBUG) System.err.println("MemoryIndexReader.norms*: " + fieldName);
1130
byte[] norms = norms(fieldName);
1131
System.arraycopy(norms, 0, bytes, offset, norms.length);
1135
protected void doSetNorm(int doc, String fieldName, byte value) {
1136
throw new UnsupportedOperationException();
1140
public int numDocs() {
1141
if (DEBUG) System.err.println("MemoryIndexReader.numDocs");
1142
return fields.size() > 0 ? 1 : 0;
1146
public int maxDoc() {
1147
if (DEBUG) System.err.println("MemoryIndexReader.maxDoc");
1152
public Document document(int n) {
1153
if (DEBUG) System.err.println("MemoryIndexReader.document");
1154
return new Document(); // there are no stored fields
1157
//When we convert to JDK 1.5 make this Set<String>
1159
public Document document(int n, FieldSelector fieldSelector) throws IOException {
1160
if (DEBUG) System.err.println("MemoryIndexReader.document");
1161
return new Document(); // there are no stored fields
1165
public boolean isDeleted(int n) {
1166
if (DEBUG) System.err.println("MemoryIndexReader.isDeleted");
1171
public boolean hasDeletions() {
1172
if (DEBUG) System.err.println("MemoryIndexReader.hasDeletions");
1177
protected void doDelete(int docNum) {
1178
throw new UnsupportedOperationException();
1182
protected void doUndeleteAll() {
1183
throw new UnsupportedOperationException();
1187
protected void doCommit(Map<String,String> commitUserData) {
1188
if (DEBUG) System.err.println("MemoryIndexReader.doCommit");
1192
protected void doClose() {
1193
if (DEBUG) System.err.println("MemoryIndexReader.doClose");
1196
// lucene >= 1.9 (remove this method for lucene-1.4.3)
1198
public Collection<String> getFieldNames(FieldOption fieldOption) {
1199
if (DEBUG) System.err.println("MemoryIndexReader.getFieldNamesOption");
1200
if (fieldOption == FieldOption.UNINDEXED)
1201
return Collections.<String>emptySet();
1202
if (fieldOption == FieldOption.INDEXED_NO_TERMVECTOR)
1203
return Collections.<String>emptySet();
1204
if (fieldOption == FieldOption.TERMVECTOR_WITH_OFFSET && stride == 1)
1205
return Collections.<String>emptySet();
1206
if (fieldOption == FieldOption.TERMVECTOR_WITH_POSITION_OFFSET && stride == 1)
1207
return Collections.<String>emptySet();
1209
return Collections.unmodifiableSet(fields.keySet());
1214
///////////////////////////////////////////////////////////////////////////////
1216
///////////////////////////////////////////////////////////////////////////////
1217
private static final class VM {
1219
public static final int PTR = Constants.JRE_IS_64BIT ? 8 : 4;
1221
// bytes occupied by primitive data types
1222
public static final int BOOLEAN = 1;
1223
public static final int BYTE = 1;
1224
public static final int CHAR = 2;
1225
public static final int SHORT = 2;
1226
public static final int INT = 4;
1227
public static final int LONG = 8;
1228
public static final int FLOAT = 4;
1229
public static final int DOUBLE = 8;
1231
private static final int LOG_PTR = (int) Math.round(log2(PTR));
1234
* Object header of any heap allocated Java object.
1235
* ptr to class, info for monitor, gc, hash, etc.
1237
private static final int OBJECT_HEADER = 2*PTR;
1239
private VM() {} // not instantiable
1246
private static int sizeOf(int n) {
1247
return (((n-1) >> LOG_PTR) + 1) << LOG_PTR;
1250
public static int sizeOfObject(int n) {
1251
return sizeOf(OBJECT_HEADER + n);
1254
public static int sizeOfObjectArray(int len) {
1255
return sizeOfObject(INT + PTR*len);
1258
public static int sizeOfCharArray(int len) {
1259
return sizeOfObject(INT + CHAR*len);
1262
public static int sizeOfIntArray(int len) {
1263
return sizeOfObject(INT + INT*len);
1266
public static int sizeOfString(int len) {
1267
return sizeOfObject(3*INT + PTR) + sizeOfCharArray(len);
1270
public static int sizeOfHashMap(int len) {
1271
return sizeOfObject(4*PTR + 4*INT) + sizeOfObjectArray(len)
1272
+ len * sizeOfObject(3*PTR + INT); // entries
1275
// note: does not include referenced objects
1276
public static int sizeOfArrayList(int len) {
1277
return sizeOfObject(PTR + 2*INT) + sizeOfObjectArray(len);
1280
public static int sizeOfArrayIntList(int len) {
1281
return sizeOfObject(PTR + INT) + sizeOfIntArray(len);
1284
/** logarithm to the base 2. Example: log2(4) == 2, log2(8) == 3 */
1285
private static double log2(double value) {
1286
return Math.log(value) / Math.log(2);