1
package org.apache.lucene.index;
4
* Licensed to the Apache Software Foundation (ASF) under one or more
5
* contributor license agreements. See the NOTICE file distributed with
6
* this work for additional information regarding copyright ownership.
7
* The ASF licenses this file to You under the Apache License, Version 2.0
8
* (the "License"); you may not use this file except in compliance with
9
* the License. You may obtain a copy of the License at
11
* http://www.apache.org/licenses/LICENSE-2.0
13
* Unless required by applicable law or agreed to in writing, software
14
* distributed under the License is distributed on an "AS IS" BASIS,
15
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
* See the License for the specific language governing permissions and
17
* limitations under the License.
20
import org.apache.lucene.util.LuceneTestCase;
21
import org.apache.lucene.analysis.Analyzer;
22
import org.apache.lucene.analysis.SimpleAnalyzer;
23
import org.apache.lucene.analysis.standard.StandardAnalyzer;
24
import org.apache.lucene.document.Document;
25
import org.apache.lucene.document.Field;
26
import org.apache.lucene.document.Field.Index;
27
import org.apache.lucene.document.Field.Store;
28
import org.apache.lucene.store.Directory;
29
import org.apache.lucene.store.FSDirectory;
30
import org.apache.lucene.store.RAMDirectory;
33
import java.io.IOException;
34
import java.util.EmptyStackException;
35
import java.util.Random;
36
import java.util.Stack;
39
* Tests for the "IndexModifier" class, including accesses from two threads at the
42
* @author Daniel Naber
45
public class TestIndexModifier extends LuceneTestCase {
47
private int docCount = 0;
49
private final Term allDocTerm = new Term("all", "x");
51
public void testIndex() throws IOException {
52
Directory ramDir = new RAMDirectory();
53
IndexModifier i = new IndexModifier(ramDir, new StandardAnalyzer(), true);
54
i.addDocument(getDoc());
55
assertEquals(1, i.docCount());
57
i.addDocument(getDoc(), new SimpleAnalyzer());
58
assertEquals(2, i.docCount());
60
assertEquals(2, i.docCount());
63
assertEquals(1, i.docCount());
65
assertEquals(1, i.docCount());
66
i.addDocument(getDoc());
67
i.addDocument(getDoc());
69
// depend on merge policy - assertEquals(3, i.docCount());
70
i.deleteDocuments(allDocTerm);
71
assertEquals(0, i.docCount());
73
assertEquals(0, i.docCount());
76
assertNull(i.getInfoStream());
77
assertTrue(i.getUseCompoundFile());
78
assertEquals(IndexWriter.DISABLE_AUTO_FLUSH, i.getMaxBufferedDocs());
79
assertEquals(10000, i.getMaxFieldLength());
80
assertEquals(10, i.getMergeFactor());
81
// test setting properties:
82
i.setMaxBufferedDocs(100);
84
i.setMaxFieldLength(250000);
85
i.addDocument(getDoc());
86
i.setUseCompoundFile(false);
88
assertEquals(100, i.getMaxBufferedDocs());
89
assertEquals(25, i.getMergeFactor());
90
assertEquals(250000, i.getMaxFieldLength());
91
assertFalse(i.getUseCompoundFile());
93
// test setting properties when internally the reader is opened:
94
i.deleteDocuments(allDocTerm);
95
i.setMaxBufferedDocs(100);
97
i.setMaxFieldLength(250000);
98
i.addDocument(getDoc());
99
i.setUseCompoundFile(false);
101
assertEquals(100, i.getMaxBufferedDocs());
102
assertEquals(25, i.getMergeFactor());
103
assertEquals(250000, i.getMaxFieldLength());
104
assertFalse(i.getUseCompoundFile());
110
} catch (IllegalStateException e) {
111
// expected exception
115
public void testExtendedIndex() throws IOException {
116
Directory ramDir = new RAMDirectory();
117
PowerIndex powerIndex = new PowerIndex(ramDir, new StandardAnalyzer(), true);
118
powerIndex.addDocument(getDoc());
119
powerIndex.addDocument(getDoc());
120
powerIndex.addDocument(getDoc());
121
powerIndex.addDocument(getDoc());
122
powerIndex.addDocument(getDoc());
124
assertEquals(5, powerIndex.docFreq(allDocTerm));
128
private Document getDoc() {
129
Document doc = new Document();
130
doc.add(new Field("body", Integer.toString(docCount), Field.Store.YES, Field.Index.UN_TOKENIZED));
131
doc.add(new Field("all", "x", Field.Store.YES, Field.Index.UN_TOKENIZED));
136
public void testIndexWithThreads() throws IOException {
137
testIndexInternal(0);
138
testIndexInternal(10);
139
testIndexInternal(50);
142
private void testIndexInternal(int maxWait) throws IOException {
143
final boolean create = true;
144
//Directory rd = new RAMDirectory();
145
// work on disk to make sure potential lock problems are tested:
146
String tempDir = System.getProperty("java.io.tmpdir");
148
throw new IOException("java.io.tmpdir undefined, cannot run test");
149
File indexDir = new File(tempDir, "lucenetestindex");
150
Directory rd = FSDirectory.getDirectory(indexDir);
152
IndexThread.idStack.clear();
153
IndexModifier index = new IndexModifier(rd, new StandardAnalyzer(), create);
154
IndexThread thread1 = new IndexThread(index, maxWait, 1);
156
IndexThread thread2 = new IndexThread(index, maxWait, 2);
158
while(thread1.isAlive() || thread2.isAlive()) {
161
} catch (InterruptedException e) {
162
throw new RuntimeException(e);
166
int added = thread1.added + thread2.added;
167
int deleted = thread1.deleted + thread2.deleted;
168
assertEquals(added-deleted, index.docCount());
174
} catch(IllegalStateException e) {
175
// expected exception
180
private void rmDir(File dir) {
181
File[] files = dir.listFiles();
182
for (int i = 0; i < files.length; i++) {
188
private class PowerIndex extends IndexModifier {
189
public PowerIndex(Directory dir, Analyzer analyzer, boolean create) throws IOException {
190
super(dir, analyzer, create);
192
public int docFreq(Term term) throws IOException {
193
synchronized(directory) {
196
return indexReader.docFreq(term);
203
class IndexThread extends Thread {
205
private final static int ITERATIONS = 500; // iterations of thread test
208
static Stack idStack = new Stack();
213
private int maxWait = 10;
214
private IndexModifier index;
215
private int threadNumber;
216
private Random random;
218
IndexThread(IndexModifier index, int maxWait, int threadNumber) {
220
this.maxWait = maxWait;
221
this.threadNumber = threadNumber;
222
// TODO: test case is not reproducible despite pseudo-random numbers:
223
random = new Random(101+threadNumber); // constant seed for better reproducability
228
for(int i = 0; i < ITERATIONS; i++) {
229
int rand = random.nextInt(101);
232
} else if (rand < 60) {
233
Document doc = getDocument();
234
index.addDocument(doc);
235
idStack.push(doc.get("id"));
238
// we just delete the last document added and remove it
239
// from the id stack so that it won't be removed twice:
242
delId = (String)idStack.pop();
243
} catch (EmptyStackException e) {
246
Term delTerm = new Term("id", new Integer(delId).toString());
247
int delCount = index.deleteDocuments(delTerm);
249
throw new RuntimeException("Internal error: " + threadNumber + " deleted " + delCount +
250
" documents, term=" + delTerm);
256
rand = random.nextInt(maxWait);
257
//System.out.println("waiting " + rand + "ms");
259
} catch (InterruptedException e) {
260
throw new RuntimeException(e);
264
} catch (IOException e) {
265
throw new RuntimeException(e);
269
private Document getDocument() {
270
Document doc = new Document();
271
synchronized (getClass()) {
272
doc.add(new Field("id", Integer.toString(id), Field.Store.YES,
273
Field.Index.UN_TOKENIZED));
277
doc.add(new Field("content", Integer.toString(random.nextInt(1000)), Field.Store.YES,
278
Field.Index.TOKENIZED));
279
doc.add(new Field("content", Integer.toString(random.nextInt(1000)), Field.Store.YES,
280
Field.Index.TOKENIZED));
281
doc.add(new Field("all", "x", Field.Store.YES, Field.Index.TOKENIZED));