~slub.team/goobi-indexserver/3.x

« back to all changes in this revision

Viewing changes to lucene/src/java/org/apache/lucene/index/TermsHash.java

  • Committer: Sebastian Meyer
  • Date: 2012-08-03 09:12:40 UTC
  • Revision ID: sebastian.meyer@slub-dresden.de-20120803091240-x6861b0vabq1xror
Remove Lucene and Solr source code and add patches instead
Fix Bug #985487: Auto-suggestion for the search interface

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
package org.apache.lucene.index;
2
 
 
3
 
/**
4
 
 * Licensed to the Apache Software Foundation (ASF) under one or more
5
 
 * contributor license agreements.  See the NOTICE file distributed with
6
 
 * this work for additional information regarding copyright ownership.
7
 
 * The ASF licenses this file to You under the Apache License, Version 2.0
8
 
 * (the "License"); you may not use this file except in compliance with
9
 
 * the License.  You may obtain a copy of the License at
10
 
 *
11
 
 *     http://www.apache.org/licenses/LICENSE-2.0
12
 
 *
13
 
 * Unless required by applicable law or agreed to in writing, software
14
 
 * distributed under the License is distributed on an "AS IS" BASIS,
15
 
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
 
 * See the License for the specific language governing permissions and
17
 
 * limitations under the License.
18
 
 */
19
 
 
20
 
import java.io.IOException;
21
 
import java.util.Collection;
22
 
import java.util.HashMap;
23
 
import java.util.HashSet;
24
 
import java.util.Iterator;
25
 
import java.util.Map;
26
 
 
27
 
/** This class implements {@link InvertedDocConsumer}, which
28
 
 *  is passed each token produced by the analyzer on each
29
 
 *  field.  It stores these tokens in a hash table, and
30
 
 *  allocates separate byte streams per token.  Consumers of
31
 
 *  this class, eg {@link FreqProxTermsWriter} and {@link
32
 
 *  TermVectorsTermsWriter}, write their own byte streams
33
 
 *  under each term.
34
 
 */
35
 
final class TermsHash extends InvertedDocConsumer {
36
 
 
37
 
  final TermsHashConsumer consumer;
38
 
  final TermsHash nextTermsHash;
39
 
  final DocumentsWriter docWriter;
40
 
 
41
 
  boolean trackAllocations;
42
 
 
43
 
  public TermsHash(final DocumentsWriter docWriter, boolean trackAllocations, final TermsHashConsumer consumer, final TermsHash nextTermsHash) {
44
 
    this.docWriter = docWriter;
45
 
    this.consumer = consumer;
46
 
    this.nextTermsHash = nextTermsHash;
47
 
    this.trackAllocations = trackAllocations;
48
 
  }
49
 
 
50
 
  @Override
51
 
  InvertedDocConsumerPerThread addThread(DocInverterPerThread docInverterPerThread) {
52
 
    return new TermsHashPerThread(docInverterPerThread, this, nextTermsHash, null);
53
 
  }
54
 
 
55
 
  TermsHashPerThread addThread(DocInverterPerThread docInverterPerThread, TermsHashPerThread primaryPerThread) {
56
 
    return new TermsHashPerThread(docInverterPerThread, this, nextTermsHash, primaryPerThread);
57
 
  }
58
 
 
59
 
  @Override
60
 
  void setFieldInfos(FieldInfos fieldInfos) {
61
 
    this.fieldInfos = fieldInfos;
62
 
    consumer.setFieldInfos(fieldInfos);
63
 
  }
64
 
 
65
 
  @Override
66
 
  public void abort() {
67
 
    try {
68
 
      consumer.abort();
69
 
    } finally {
70
 
      if (nextTermsHash != null) {
71
 
        nextTermsHash.abort();
72
 
      }
73
 
    }
74
 
  }
75
 
 
76
 
  @Override
77
 
  synchronized void flush(Map<InvertedDocConsumerPerThread,Collection<InvertedDocConsumerPerField>> threadsAndFields, final SegmentWriteState state) throws IOException {
78
 
    Map<TermsHashConsumerPerThread,Collection<TermsHashConsumerPerField>> childThreadsAndFields = new HashMap<TermsHashConsumerPerThread,Collection<TermsHashConsumerPerField>>();
79
 
    Map<InvertedDocConsumerPerThread,Collection<InvertedDocConsumerPerField>> nextThreadsAndFields;
80
 
 
81
 
    if (nextTermsHash != null)
82
 
      nextThreadsAndFields = new HashMap<InvertedDocConsumerPerThread,Collection<InvertedDocConsumerPerField>>();
83
 
    else
84
 
      nextThreadsAndFields = null;
85
 
 
86
 
    for (final Map.Entry<InvertedDocConsumerPerThread,Collection<InvertedDocConsumerPerField>> entry : threadsAndFields.entrySet()) {
87
 
 
88
 
      TermsHashPerThread perThread = (TermsHashPerThread) entry.getKey();
89
 
 
90
 
      Collection<InvertedDocConsumerPerField> fields = entry.getValue();
91
 
 
92
 
      Iterator<InvertedDocConsumerPerField> fieldsIt = fields.iterator();
93
 
      Collection<TermsHashConsumerPerField> childFields = new HashSet<TermsHashConsumerPerField>();
94
 
      Collection<InvertedDocConsumerPerField> nextChildFields;
95
 
 
96
 
      if (nextTermsHash != null)
97
 
        nextChildFields = new HashSet<InvertedDocConsumerPerField>();
98
 
      else
99
 
        nextChildFields = null;
100
 
 
101
 
      while(fieldsIt.hasNext()) {
102
 
        TermsHashPerField perField = (TermsHashPerField) fieldsIt.next();
103
 
        childFields.add(perField.consumer);
104
 
        if (nextTermsHash != null)
105
 
          nextChildFields.add(perField.nextPerField);
106
 
      }
107
 
 
108
 
      childThreadsAndFields.put(perThread.consumer, childFields);
109
 
      if (nextTermsHash != null)
110
 
        nextThreadsAndFields.put(perThread.nextPerThread, nextChildFields);
111
 
    }
112
 
    
113
 
    consumer.flush(childThreadsAndFields, state);
114
 
 
115
 
    if (nextTermsHash != null)
116
 
      nextTermsHash.flush(nextThreadsAndFields, state);
117
 
  }
118
 
 
119
 
  @Override
120
 
  synchronized public boolean freeRAM() {
121
 
    return false;
122
 
  }
123
 
}