* Improve table clone_blocks and so DbCloneIndex.
* Add purge of old clone blocks.
import java.io.File;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import org.sonar.api.batch.SensorContext;
import org.sonar.api.database.DatabaseSession;
import org.sonar.api.database.model.ResourceModel;
-import org.sonar.api.database.model.Snapshot;
import org.sonar.api.measures.CoreMetrics;
import org.sonar.api.measures.Measure;
import org.sonar.api.resources.InputFile;
import org.sonar.duplications.block.BlockChunker;
import org.sonar.duplications.detector.original.OriginalCloneDetectionAlgorithm;
import org.sonar.duplications.index.CloneGroup;
-import org.sonar.duplications.index.CloneIndex;
import org.sonar.duplications.index.ClonePart;
-import org.sonar.duplications.index.PackedMemoryCloneIndex;
import org.sonar.duplications.java.JavaStatementBuilder;
import org.sonar.duplications.java.JavaTokenProducer;
import org.sonar.duplications.statement.Statement;
import org.sonar.duplications.statement.StatementChunker;
import org.sonar.duplications.token.TokenChunker;
import org.sonar.duplications.token.TokenQueue;
-import org.sonar.plugins.cpd.index.CombinedCloneIndex;
import org.sonar.plugins.cpd.index.DbCloneIndex;
-
-import com.google.common.collect.Lists;
+import org.sonar.plugins.cpd.index.SonarCloneIndex;
public class SonarEngine implements CpdEngine {
}
// Create index
- CloneIndex index = new PackedMemoryCloneIndex();
+ final SonarCloneIndex index;
if (isCrossProject(project)) {
Logs.INFO.info("Enabled cross-project analysis");
- Snapshot currentSnapshot = resourcePersister.getSnapshot(project);
- Snapshot lastSnapshot = resourcePersister.getLastSnapshot(currentSnapshot, false);
- DbCloneIndex db = new DbCloneIndex(dbSession, currentSnapshot.getId(), lastSnapshot == null ? null : lastSnapshot.getId());
- index = new CombinedCloneIndex(index, db);
+ index = new SonarCloneIndex(new DbCloneIndex(dbSession, resourcePersister, project));
+ } else {
+ index = new SonarCloneIndex();
}
TokenChunker tokenChunker = JavaTokenProducer.build();
BlockChunker blockChunker = new BlockChunker(BLOCK_SIZE);
for (InputFile inputFile : inputFiles) {
+ Resource resource = getResource(inputFile);
+ String resourceKey = getFullKey(project, resource);
+
File file = inputFile.getFile();
TokenQueue tokenQueue = tokenChunker.chunk(file);
List<Statement> statements = statementChunker.chunk(tokenQueue);
- Resource resource = getResource(inputFile);
- List<Block> blocks = blockChunker.chunk(getFullKey(project, resource), statements);
- for (Block block : blocks) {
- index.insert(block);
- }
+ List<Block> blocks = blockChunker.chunk(resourceKey, statements);
+ index.insert(resource, blocks);
}
// Detect
for (InputFile inputFile : inputFiles) {
Resource resource = getResource(inputFile);
+ String resourceKey = getFullKey(project, resource);
- List<Block> fileBlocks = Lists.newArrayList(index.getByResourceId(getFullKey(project, resource)));
+ Collection<Block> fileBlocks = index.getByResource(resource, resourceKey);
List<CloneGroup> clones = OriginalCloneDetectionAlgorithm.detect(index, fileBlocks);
if (!clones.isEmpty()) {
// Save
+++ /dev/null
-/*
- * Sonar, open source software quality management tool.
- * Copyright (C) 2008-2011 SonarSource
- * mailto:contact AT sonarsource DOT com
- *
- * Sonar is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 3 of the License, or (at your option) any later version.
- *
- * Sonar is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with Sonar; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02
- */
-package org.sonar.plugins.cpd.index;
-
-import java.util.Collection;
-import java.util.List;
-
-import org.sonar.duplications.block.Block;
-import org.sonar.duplications.block.ByteArray;
-import org.sonar.duplications.index.AbstractCloneIndex;
-import org.sonar.duplications.index.CloneIndex;
-
-import com.google.common.collect.Lists;
-
-public class CombinedCloneIndex extends AbstractCloneIndex {
-
- private final CloneIndex mem;
- private final DbCloneIndex db;
-
- public CombinedCloneIndex(CloneIndex mem, DbCloneIndex db) {
- this.mem = mem;
- this.db = db;
- }
-
- public Collection<Block> getByResourceId(String resourceId) {
- db.prepareCache(resourceId);
- return mem.getByResourceId(resourceId);
- }
-
- public Collection<Block> getBySequenceHash(ByteArray hash) {
- List<Block> result = Lists.newArrayList();
- result.addAll(mem.getBySequenceHash(hash));
- result.addAll(db.getBySequenceHash(hash));
- return result;
- }
-
- public void insert(Block block) {
- mem.insert(block);
- db.insert(block);
- }
-
-}
import java.util.List;
import java.util.Map;
+import javax.persistence.Query;
+
+import org.hibernate.ejb.HibernateQuery;
+import org.hibernate.transform.Transformers;
import org.sonar.api.database.DatabaseSession;
+import org.sonar.api.database.model.Snapshot;
+import org.sonar.api.resources.Project;
+import org.sonar.api.resources.Resource;
+import org.sonar.batch.index.ResourcePersister;
import org.sonar.duplications.block.Block;
import org.sonar.duplications.block.ByteArray;
-import org.sonar.duplications.index.AbstractCloneIndex;
import org.sonar.jpa.entity.CloneBlock;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
-public class DbCloneIndex extends AbstractCloneIndex {
+public class DbCloneIndex {
+
+ private final Map<ByteArray, Collection<Block>> cache = Maps.newHashMap();
- private final Map<ByteArray, List<Block>> cache = Maps.newHashMap();
+ private final DatabaseSession session;
+ private final ResourcePersister resourcePersister;
+ private final int currentProjectSnapshotId;
+ private final Integer lastSnapshotId;
- private DatabaseSession session;
- private int currentSnapshotId;
- private Integer lastSnapshotId;
+ public DbCloneIndex(DatabaseSession session, ResourcePersister resourcePersister, Project currentProject) {
+ this.session = session;
+ this.resourcePersister = resourcePersister;
+ Snapshot currentSnapshot = resourcePersister.getSnapshotOrFail(currentProject);
+ Snapshot lastSnapshot = resourcePersister.getLastSnapshot(currentSnapshot, false);
+ this.currentProjectSnapshotId = currentSnapshot.getId();
+ this.lastSnapshotId = lastSnapshot == null ? null : lastSnapshot.getId();
+ }
- public DbCloneIndex(DatabaseSession session, Integer currentSnapshotId, Integer lastSnapshotId) {
+ /**
+ * For tests.
+ */
+ DbCloneIndex(DatabaseSession session, ResourcePersister resourcePersister, Integer currentProjectSnapshotId, Integer prevSnapshotId) {
this.session = session;
- this.currentSnapshotId = currentSnapshotId;
- this.lastSnapshotId = lastSnapshotId;
+ this.resourcePersister = resourcePersister;
+ this.currentProjectSnapshotId = currentProjectSnapshotId;
+ this.lastSnapshotId = prevSnapshotId;
}
- public void prepareCache(String resourceKey) {
- String sql = "SELECT block.id, hash, block.snapshot_id, resource_key, index_in_file, start_line, end_line FROM clone_blocks AS block, snapshots AS snapshot" +
- " WHERE block.snapshot_id=snapshot.id AND snapshot.islast=true" +
- " AND hash IN ( SELECT hash FROM clone_blocks WHERE resource_key = :resource_key AND snapshot_id = :current_snapshot_id )";
+ int getSnapshotIdFor(Resource resource) {
+ return resourcePersister.getSnapshotOrFail(resource).getId();
+ }
+
+ public void prepareCache(Resource resource) {
+ int resourceSnapshotId = getSnapshotIdFor(resource);
+
+ // Order of columns is important - see code below!
+ String sql = "SELECT hash, resource.kee, index_in_file, start_line, end_line" +
+ " FROM clone_blocks AS block, snapshots AS snapshot, projects AS resource" +
+ " WHERE block.snapshot_id=snapshot.id AND snapshot.islast=true AND snapshot.project_id=resource.id" +
+ " AND hash IN ( SELECT hash FROM clone_blocks WHERE snapshot_id = :resource_snapshot_id AND project_snapshot_id = :current_project_snapshot_id )";
if (lastSnapshotId != null) {
// Filter for blocks from previous snapshot of current project
- sql += " AND snapshot.id != " + lastSnapshotId;
+ sql += " AND block.project_snapshot_id != :last_project_snapshot_id";
}
- List<CloneBlock> blocks = session.getEntityManager()
- .createNativeQuery(sql, CloneBlock.class)
- .setParameter("resource_key", resourceKey)
- .setParameter("current_snapshot_id", currentSnapshotId)
- .getResultList();
+ Query query = session.getEntityManager().createNativeQuery(sql)
+ .setParameter("resource_snapshot_id", resourceSnapshotId)
+ .setParameter("current_project_snapshot_id", currentProjectSnapshotId);
+ if (lastSnapshotId != null) {
+ query.setParameter("last_project_snapshot_id", lastSnapshotId);
+ }
+ // Ugly hack for mapping results of custom SQL query into plain list (MyBatis is coming soon)
+ ((HibernateQuery) query).getHibernateQuery().setResultTransformer(Transformers.TO_LIST);
+ List<List<Object>> blocks = query.getResultList();
cache.clear();
- for (CloneBlock dbBlock : blocks) {
- Block block = new Block(dbBlock.getResourceKey(), new ByteArray(dbBlock.getHash()), dbBlock.getIndexInFile(), dbBlock.getStartLine(), dbBlock.getEndLine());
+ for (List<Object> dbBlock : blocks) {
+ String hash = (String) dbBlock.get(0);
+ String resourceKey = (String) dbBlock.get(1);
+ int indexInFile = (Integer) dbBlock.get(2);
+ int startLine = (Integer) dbBlock.get(3);
+ int endLine = (Integer) dbBlock.get(4);
+
+ Block block = new Block(resourceKey, new ByteArray(hash), indexInFile, startLine, endLine);
- List<Block> sameHash = cache.get(block.getBlockHash());
+ // Group blocks by hash
+ Collection<Block> sameHash = cache.get(block.getBlockHash());
if (sameHash == null) {
sameHash = Lists.newArrayList();
cache.put(block.getBlockHash(), sameHash);
}
}
- public Collection<Block> getByResourceId(String resourceId) {
- throw new UnsupportedOperationException();
- }
-
- public Collection<Block> getBySequenceHash(ByteArray sequenceHash) {
- List<Block> result = cache.get(sequenceHash);
+ public Collection<Block> getByHash(ByteArray hash) {
+ Collection<Block> result = cache.get(hash);
if (result != null) {
return result;
} else {
- // not in cache
return Collections.emptyList();
}
}
- public void insert(Block block) {
- CloneBlock dbBlock = new CloneBlock(currentSnapshotId,
- block.getBlockHash().toString(),
- block.getResourceId(),
- block.getIndexInFile(),
- block.getFirstLineNumber(),
- block.getLastLineNumber());
- session.save(dbBlock);
+ public void insert(Resource resource, Collection<Block> blocks) {
+ int resourceSnapshotId = getSnapshotIdFor(resource);
+ for (Block block : blocks) {
+ CloneBlock dbBlock = new CloneBlock(
+ currentProjectSnapshotId,
+ resourceSnapshotId,
+ block.getBlockHash().toString(),
+ block.getIndexInFile(),
+ block.getFirstLineNumber(),
+ block.getLastLineNumber());
+ session.save(dbBlock);
+ }
+ session.commit();
}
}
--- /dev/null
+/*
+ * Sonar, open source software quality management tool.
+ * Copyright (C) 2008-2011 SonarSource
+ * mailto:contact AT sonarsource DOT com
+ *
+ * Sonar is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 3 of the License, or (at your option) any later version.
+ *
+ * Sonar is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Sonar; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02
+ */
+package org.sonar.plugins.cpd.index;
+
+import java.util.Collection;
+import java.util.List;
+
+import org.sonar.api.resources.Resource;
+import org.sonar.duplications.block.Block;
+import org.sonar.duplications.block.ByteArray;
+import org.sonar.duplications.index.AbstractCloneIndex;
+import org.sonar.duplications.index.CloneIndex;
+import org.sonar.duplications.index.PackedMemoryCloneIndex;
+
+import com.google.common.collect.Lists;
+
+public class SonarCloneIndex extends AbstractCloneIndex {
+
+ private final CloneIndex mem = new PackedMemoryCloneIndex();
+ private final DbCloneIndex db;
+
+ public SonarCloneIndex() {
+ this(null);
+ }
+
+ public SonarCloneIndex(DbCloneIndex db) {
+ this.db = db;
+ }
+
+ public void insert(Resource resource, Collection<Block> blocks) {
+ for (Block block : blocks) {
+ mem.insert(block);
+ }
+ if (db != null) {
+ db.insert(resource, blocks);
+ }
+ }
+
+ public Collection<Block> getByResource(Resource resource, String resourceKey) {
+ if (db != null) {
+ db.prepareCache(resource);
+ }
+ return mem.getByResourceId(resourceKey);
+ }
+
+ public Collection<Block> getBySequenceHash(ByteArray hash) {
+ if (db == null) {
+ return mem.getBySequenceHash(hash);
+ } else {
+ List<Block> result = Lists.newArrayList(mem.getBySequenceHash(hash));
+ result.addAll(db.getByHash(hash));
+ return result;
+ }
+ }
+
+ public Collection<Block> getByResourceId(String resourceId) {
+ throw new UnsupportedOperationException();
+ }
+
+ public void insert(Block block) {
+ throw new UnsupportedOperationException();
+ }
+
+}
import static org.hamcrest.Matchers.is;
import static org.junit.Assert.assertThat;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.spy;
+import java.util.Arrays;
import java.util.Collection;
import java.util.Iterator;
-import org.junit.Before;
import org.junit.Test;
+import org.sonar.api.resources.JavaFile;
+import org.sonar.api.resources.Resource;
import org.sonar.duplications.block.Block;
import org.sonar.duplications.block.ByteArray;
import org.sonar.jpa.test.AbstractDbUnitTestCase;
private DbCloneIndex index;
- @Before
- public void setUp() {
- index = new DbCloneIndex(getSession(), 5, 4);
- }
-
- @Test(expected = UnsupportedOperationException.class)
- public void shouldNotGetByResource() {
- index.getByResourceId("foo");
- }
-
@Test
public void shouldGetByHash() {
- setupData("fixture");
+ Resource resource = new JavaFile("foo");
+ index = spy(new DbCloneIndex(getSession(), null, 9, 7));
+ doReturn(10).when(index).getSnapshotIdFor(resource);
+ setupData("shouldGetByHash");
- index.prepareCache("foo");
- Collection<Block> blocks = index.getBySequenceHash(new ByteArray("aa"));
+ index.prepareCache(resource);
+ Collection<Block> blocks = index.getByHash(new ByteArray("aa"));
Iterator<Block> blocksIterator = blocks.iterator();
assertThat(blocks.size(), is(1));
Block block = blocksIterator.next();
- assertThat(block.getResourceId(), is("bar-last"));
+ assertThat("block resourceId", block.getResourceId(), is("bar-last"));
+ assertThat("block hash", block.getBlockHash(), is(new ByteArray("aa")));
+ assertThat("block index in file", block.getIndexInFile(), is(0));
+ assertThat("block start line", block.getFirstLineNumber(), is(1));
+ assertThat("block end line", block.getLastLineNumber(), is(2));
}
@Test
public void shouldInsert() {
- setupData("fixture");
+ Resource resource = new JavaFile("foo");
+ index = spy(new DbCloneIndex(getSession(), null, 1, null));
+ doReturn(2).when(index).getSnapshotIdFor(resource);
+ setupData("shouldInsert");
- index.insert(new Block("baz", new ByteArray("bb"), 0, 0, 1));
+ index.insert(resource, Arrays.asList(new Block("foo", new ByteArray("bb"), 0, 1, 2)));
checkTables("shouldInsert", "clone_blocks");
}
+++ /dev/null
-<dataset>
-
- <snapshots id="1" status="P" islast="false" />
- <snapshots id="2" status="P" islast="true" />
-
- <snapshots id="3" status="P" islast="false" />
- <snapshots id="4" status="P" islast="true" />
- <snapshots id="5" status="U" islast="false" />
-
- <!-- Old snapshot of another project -->
- <clone_blocks id="1" snapshot_id="1" hash="aa" resource_key="bar-old" index_in_file="0" start_line="0" end_line="1" />
-
- <!-- Last snapshot of another project -->
- <clone_blocks id="2" snapshot_id="2" hash="aa" resource_key="bar-last" index_in_file="0" start_line="0" end_line="1" />
-
- <!-- Old snapshot of current project -->
- <clone_blocks id="3" snapshot_id="3" hash="aa" resource_key="foo-old" index_in_file="0" start_line="0" end_line="1" />
-
- <!-- Last snapshot of current project -->
- <clone_blocks id="4" snapshot_id="4" hash="aa" resource_key="foo-last" index_in_file="0" start_line="0" end_line="1" />
-
- <!-- New snapshot of current project -->
- <clone_blocks id="5" snapshot_id="5" hash="aa" resource_key="foo" index_in_file="0" start_line="0" end_line="1" />
-
-</dataset>
--- /dev/null
+<dataset>
+
+ <snapshots id="1" status="P" islast="false" project_id="0" />
+ <snapshots id="2" status="P" islast="false" project_id="1" />
+ <projects id="1" kee="bar-old" enabled="true" scope="FIL" qualifier="CLA" />
+
+ <snapshots id="3" status="P" islast="true" />
+ <snapshots id="4" status="P" islast="true" project_id="2" />
+ <projects id="2" kee="bar-last" enabled="true" scope="FIL" qualifier="CLA" />
+
+ <snapshots id="5" status="P" islast="false" />
+ <snapshots id="6" status="P" islast="false" project_id="3" />
+ <projects id="3" kee="foo-old" enabled="true" scope="FIL" qualifier="CLA" />
+
+ <snapshots id="7" status="P" islast="true" />
+ <snapshots id="8" status="P" islast="true" project_id="4" />
+ <projects id="4" kee="foo-last" enabled="true" scope="FIL" qualifier="CLA" />
+
+ <snapshots id="9" status="U" islast="false" />
+ <snapshots id="10" status="U" islast="false" project_id="5" />
+ <projects id="5" kee="foo" enabled="true" scope="FIL" qualifier="CLA" />
+
+ <!-- Old snapshot of another project -->
+ <!-- bar-old -->
+ <clone_blocks id="1" project_snapshot_id="1" snapshot_id="2" hash="bb" index_in_file="0" start_line="0" end_line="0" />
+
+ <!-- Last snapshot of another project -->
+ <!-- bar-last -->
+ <clone_blocks id="2" project_snapshot_id="3" snapshot_id="4" hash="aa" index_in_file="0" start_line="1" end_line="2" />
+
+ <!-- Old snapshot of current project -->
+ <!-- foo-old -->
+ <clone_blocks id="3" project_snapshot_id="5" snapshot_id="6" hash="bb" index_in_file="0" start_line="0" end_line="0" />
+
+ <!-- Last snapshot of current project -->
+ <!-- foo-last -->
+ <clone_blocks id="4" project_snapshot_id="7" snapshot_id="8" hash="bb" index_in_file="0" start_line="0" end_line="0" />
+
+ <!-- New snapshot of current project -->
+ <!-- foo -->
+ <clone_blocks id="5" project_snapshot_id="9" snapshot_id="10" hash="aa" index_in_file="0" start_line="0" end_line="0" />
+
+</dataset>
<dataset>
- <snapshots id="1" status="P" islast="false" />
- <snapshots id="2" status="P" islast="true" />
+ <snapshots id="1" status="U" islast="false" project_id="0" />
+ <snapshots id="2" status="U" islast="false" project_id="1" />
+ <projects id="1" kee="foo" enabled="true" scope="FIL" qualifier="CLA" />
- <snapshots id="3" status="P" islast="false" />
- <snapshots id="4" status="P" islast="true" />
- <snapshots id="5" status="U" islast="false" />
-
- <!-- Old snapshot of another project -->
- <clone_blocks id="1" snapshot_id="1" hash="aa" resource_key="bar-old" index_in_file="0" start_line="0" end_line="1" />
-
- <!-- Last snapshot of another project -->
- <clone_blocks id="2" snapshot_id="2" hash="aa" resource_key="bar-last" index_in_file="0" start_line="0" end_line="1" />
-
- <!-- Old snapshot of current project -->
- <clone_blocks id="3" snapshot_id="3" hash="aa" resource_key="foo-old" index_in_file="0" start_line="0" end_line="1" />
-
- <!-- Last snapshot of current project -->
- <clone_blocks id="4" snapshot_id="4" hash="aa" resource_key="foo-last" index_in_file="0" start_line="0" end_line="1" />
-
- <!-- New snapshot of current project -->
- <clone_blocks id="5" snapshot_id="5" hash="aa" resource_key="foo" index_in_file="0" start_line="0" end_line="1" />
-
- <clone_blocks id="6" snapshot_id="5" hash="bb" resource_key="baz" index_in_file="0" start_line="0" end_line="1" />
+ <clone_blocks id="1" project_snapshot_id="1" snapshot_id="2" hash="bb" index_in_file="0" start_line="1" end_line="2" />
</dataset>
--- /dev/null
+<dataset>
+
+ <snapshots id="1" status="U" islast="false" project_id="0" />
+ <snapshots id="2" status="U" islast="false" project_id="1" />
+ <projects id="1" kee="foo" enabled="true" scope="FIL" qualifier="CLA" />
+
+</dataset>
import org.sonar.api.database.model.*;
import org.sonar.api.design.DependencyDto;
import org.sonar.api.utils.TimeProfiler;
+import org.sonar.jpa.entity.CloneBlock;
import javax.persistence.Query;
import java.util.List;
deleteSources(session, snapshotIds);
deleteViolations(session, snapshotIds);
deleteDependencies(session, snapshotIds);
+ deleteCloneBlocks(session, snapshotIds);
deleteSnapshots(session, snapshotIds);
}
executeQuery(session, "delete violations", snapshotIds, "delete from " + RuleFailureModel.class.getSimpleName() + " e where e.snapshotId in (:ids)");
}
+ /**
+ * @since 2.11
+ */
+ private static void deleteCloneBlocks(DatabaseSession session, List<Integer> snapshotIds) {
+ executeQuery(session, "delete clone blocks", snapshotIds, "delete from " + CloneBlock.class.getSimpleName() + " e where e.snapshotId in (:ids)");
+ }
+
/**
* Delete SNAPSHOTS table
*/
<!--parent_dependency_id="[null]" project_snapshot_id="1"-->
<!--dep_usage="INHERITS" dep_weight="1" from_scope="FIL" to_scope="FIL"/>-->
-</dataset>
\ No newline at end of file
+ <!--<clone_blocks id="1" project_snapshot_id="1" snapshot_id="3" hash="bb" index_in_file="0" start_line="0" end_line="0" />-->
+ <!--<clone_blocks id="2" project_snapshot_id="1" snapshot_id="4" hash="bb" index_in_file="0" start_line="0" end_line="0" />-->
+
+</dataset>
parent_dependency_id="[null]" project_snapshot_id="1"
dep_usage="INHERITS" dep_weight="1" from_scope="FIL" to_scope="FIL" />
-</dataset>
\ No newline at end of file
+ <clone_blocks id="1" project_snapshot_id="1" snapshot_id="3" hash="bb" index_in_file="0" start_line="0" end_line="0" />
+ <clone_blocks id="2" project_snapshot_id="1" snapshot_id="4" hash="bb" index_in_file="0" start_line="0" end_line="0" />
+
+</dataset>
import javax.persistence.Id;
import javax.persistence.Table;
-import org.sonar.api.database.model.ResourceModel;
-
/**
* @since 2.11
*/
@Column(name = "snapshot_id", updatable = false, nullable = false)
private Integer snapshotId;
+ @Column(name = "project_snapshot_id", updatable = false, nullable = false)
+ private Integer projectSnapshotId;
+
@Column(name = "hash", updatable = false, nullable = false, length = BLOCK_HASH_SIZE)
private String hash;
- @Column(name = "resource_key", updatable = false, nullable = false, length = ResourceModel.KEY_SIZE)
- private String resourceKey;
-
@Column(name = "index_in_file", updatable = false, nullable = false)
private Integer indexInFile;
public CloneBlock() {
}
- public CloneBlock(Integer snapshotId, String hash, String resourceKey, Integer indexInFile, Integer startLine, Integer endLine) {
+ public CloneBlock(Integer projectSnapshotId, Integer snapshotId, String hash, Integer indexInFile, Integer startLine, Integer endLine) {
+ this.projectSnapshotId = projectSnapshotId;
this.snapshotId = snapshotId;
this.hash = hash;
this.indexInFile = indexInFile;
- this.resourceKey = resourceKey;
this.startLine = startLine;
this.endLine = endLine;
}
return snapshotId;
}
- public String getResourceKey() {
- return resourceKey;
+ public Integer getProjectSnapshotId() {
+ return projectSnapshotId;
}
public String getHash() {
def self.up
create_table :clone_blocks do |t|
+ t.column :project_snapshot_id, :integer, :null => false
t.column :snapshot_id, :integer, :null => false
t.column :hash, :string, :null => false, :limit => 50
- t.column :resource_key, :string, :null => false, :limit => 400
t.column :index_in_file, :integer, :null => false
t.column :start_line, :integer, :null => false
t.column :end_line, :integer, :null => false
end
+ add_index :clone_blocks, :project_snapshot_id, :name => 'clone_blocks_project_snapshot'
+ add_index :clone_blocks, :snapshot_id, :name => 'clone_blocks_snapshot'
add_index :clone_blocks, :hash, :name => 'clone_blocks_hash'
- add_index :clone_blocks, [:snapshot_id, :resource_key], :name => 'clone_blocks_resource'
end
end
);
CREATE TABLE CLONE_BLOCKS (
+ PROJECT_SNAPSHOT_ID INTEGER,
SNAPSHOT_ID INTEGER,
HASH VARCHAR(50),
- RESOURCE_KEY VARCHAR(400),
INDEX_IN_FILE INTEGER NOT NULL,
START_LINE INTEGER NOT NULL,
END_LINE INTEGER NOT NULL
);
+CREATE INDEX CLONE_BLOCKS_PROJECT_SNAPSHOT ON CLONE_BLOCKS (PROJECT_SNAPSHOT_ID);
+CREATE INDEX CLONE_BLOCKS_SNAPSHOT ON CLONE_BLOCKS (SNAPSHOT_ID);
CREATE INDEX CLONE_BLOCKS_HASH ON CLONE_BLOCKS (HASH);
-CREATE INDEX CLONE_BLOCKS_RESOURCE ON CLONE_BLOCKS (SNAPSHOT_ID, RESOURCE_KEY);