FileContentRecord record = new FileContentRecord();
try
{
- File file = new File( repositoryDir, path );
record.setRepositoryId( this.repository.getId() );
record.setFilename( path );
- record.setContents( FileUtils.readFileToString( file, null ) );
// Test for possible artifact reference syntax.
try
index.modifyRecord( record );
}
- catch ( IOException e )
- {
- triggerConsumerError( READ_CONTENT, "Unable to read file contents: " + e.getMessage() );
- }
catch ( RepositoryIndexException e )
{
triggerConsumerError( INDEX_ERROR, "Unable to index file contents: " + e.getMessage() );
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.maven.archiva.indexer.lucene.analyzers.FilenamesTokenizer;
+import org.apache.maven.archiva.indexer.lucene.analyzers.ArtifactIdTokenizer;
+import org.apache.maven.archiva.indexer.lucene.analyzers.GroupIdTokenizer;
import java.io.Reader;
return new FilenamesTokenizer( reader );
}
+ if ( FileContentKeys.ARTIFACTID.equals( field ))
+ {
+ return new ArtifactIdTokenizer(reader);
+ }
+
+ if ( FileContentKeys.GROUPID.equals( field ) )
+ {
+ return new GroupIdTokenizer(reader);
+ }
+
return STANDARD.tokenStream( field, reader );
}
}
public class FileContentConverter
implements LuceneEntryConverter
{
-
public Document convert( LuceneRepositoryContentRecord record )
{
if ( !( record instanceof FileContentRecord ) )
doc.addFieldTokenized( ArtifactKeys.TYPE, filecontent.getArtifact().getType() );
doc.addFieldUntokenized( ArtifactKeys.CLASSIFIER, filecontent.getArtifact().getClassifier() );
}
-
+
doc.addFieldTokenized( FileContentKeys.FILENAME, filecontent.getFilename() );
- doc.addFieldTokenized( FileContentKeys.CONTENT, filecontent.getContents() );
return doc.getDocument();
}
// Filecontent Specifics
record.setFilename( document.get( FileContentKeys.FILENAME ) );
- record.setContents( document.get( FileContentKeys.CONTENT ) );
return record;
}
{
analyzer = new FileContentAnalyzer();
converter = new FileContentConverter();
- queryParser = new MultiFieldQueryParser( new String[] { FileContentKeys.FILENAME, FileContentKeys.CONTENT },
- analyzer );
+ queryParser = new MultiFieldQueryParser( new String[] {
+ FileContentKeys.FILENAME,
+ FileContentKeys.ARTIFACTID,
+ FileContentKeys.GROUPID,
+ FileContentKeys.ARTIFACTID_EXACT,
+ FileContentKeys.GROUPID_EXACT,
+ FileContentKeys.VERSION,
+ FileContentKeys.VERSION_EXACT},
+ analyzer );
+ //We prefer the narrowing approach to search results.
+ queryParser.setDefaultOperator(MultiFieldQueryParser.Operator.AND);
}
public String getId()
public static final String ID = "filecontent";
public static final String FILENAME = "filename";
-
- public static final String CONTENT = "content";
}
*/
private ArchivaArtifact artifact;
- private String contents;
-
public String getRepositoryId()
{
return repositoryId;
this.repositoryId = repositoryId;
}
- public String getContents()
- {
- return contents;
- }
-
- public void setContents( String contents )
- {
- this.contents = contents;
- }
-
public String getPrimaryKey()
{
return repositoryId + ":" + filename;
--- /dev/null
+package org.apache.maven.archiva.indexer.lucene.analyzers;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.io.Reader;
+import org.apache.lucene.analysis.CharTokenizer;
+
+/**
+ * Lucene Tokenizer for {@link ArtifactKeys#ARTIFACTID} fields.
+ */
+public class ArtifactIdTokenizer extends CharTokenizer
+{
+ public ArtifactIdTokenizer( Reader reader )
+ {
+ super( reader );
+ }
+
+ /**
+ * Break on "-" for "atlassian-plugins-core"
+ * @param c
+ * @return
+ */
+ @Override
+ protected boolean isTokenChar(char c)
+ {
+ return (c != '-');
+ }
+}
FileContentRecord record = new FileContentRecord();
record.setRepositoryId( "repo1.mirror" );
record.setArtifact( artifact );
- record.setContents( "org.apache.archiva:archiva-test:1.0:jar org.apache.archiva.test.MyClassName" );
record.setFilename( "archiva-test-1.0.jar" );
results.addHit( record );
FileContentRecord record = new FileContentRecord();
record.setRepositoryId( "repo1.mirror" );
record.setArtifact( artifact );
- record.setContents( "org.apache.archiva:archiva-test:1.0:jar" );
record.setFilename( "archiva-test-1.0.jar" );
results.addHit( record );