Collection<String> hashes = from(cpdTextBlocks).transform(CpdTextBlockToHash.INSTANCE).toList();
List<DuplicationUnitDto> dtos = selectDuplicates(component, hashes);
+ if (dtos.isEmpty()) {
+ return;
+ }
+
Collection<Block> duplicatedBlocks = from(dtos).transform(DtoToBlock.INSTANCE).toList();
Collection<Block> originBlocks = from(cpdTextBlocks).transform(new CpdTextBlockToBlock(component.getKey())).toList();
+ LOGGER.trace("Found {} duplicated cpd blocks on file {}", duplicatedBlocks.size(), component.getKey());
integrateCrossProjectDuplications.computeCpd(component, originBlocks, duplicatedBlocks);
}
--- /dev/null
+/*
+ * SonarQube, open source software quality management tool.
+ * Copyright (C) 2008-2014 SonarSource
+ * mailto:contact AT sonarsource DOT com
+ *
+ * SonarQube is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 3 of the License, or (at your option) any later version.
+ *
+ * SonarQube is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+package org.sonar.server.computation.step;
+
+import org.sonar.batch.protocol.output.BatchReport;
+import org.sonar.core.util.CloseableIterator;
+import org.sonar.db.DbClient;
+import org.sonar.db.DbSession;
+import org.sonar.db.duplication.DuplicationUnitDto;
+import org.sonar.server.computation.batch.BatchReportReader;
+import org.sonar.server.computation.component.Component;
+import org.sonar.server.computation.component.CrawlerDepthLimit;
+import org.sonar.server.computation.component.DbIdsRepository;
+import org.sonar.server.computation.component.DepthTraversalTypeAwareCrawler;
+import org.sonar.server.computation.component.TreeRootHolder;
+import org.sonar.server.computation.component.TypeAwareVisitorAdapter;
+import org.sonar.server.computation.duplication.CrossProjectDuplicationStatusHolder;
+
+import static org.sonar.server.computation.component.ComponentVisitor.Order.PRE_ORDER;
+
+/**
+ * Persist cross project duplications text blocks into DUPLICATIONS_INDEX table
+ */
+public class PersistCrossProjectDuplicationIndexStep implements ComputationStep {
+
+ private final DbClient dbClient;
+ private final TreeRootHolder treeRootHolder;
+ private final BatchReportReader reportReader;
+ private final DbIdsRepository dbIdsRepository;
+ private final CrossProjectDuplicationStatusHolder crossProjectDuplicationStatusHolder;
+
+ public PersistCrossProjectDuplicationIndexStep(DbClient dbClient, DbIdsRepository dbIdsRepository, TreeRootHolder treeRootHolder, BatchReportReader reportReader,
+ CrossProjectDuplicationStatusHolder crossProjectDuplicationStatusHolder) {
+ this.dbClient = dbClient;
+ this.treeRootHolder = treeRootHolder;
+ this.reportReader = reportReader;
+ this.dbIdsRepository = dbIdsRepository;
+ this.crossProjectDuplicationStatusHolder = crossProjectDuplicationStatusHolder;
+ }
+
+ @Override
+ public void execute() {
+ DbSession session = dbClient.openSession(true);
+ try {
+ if (crossProjectDuplicationStatusHolder.isEnabled()) {
+ Component project = treeRootHolder.getRoot();
+ long projectSnapshotId = dbIdsRepository.getSnapshotId(project);
+ new DepthTraversalTypeAwareCrawler(new DuplicationVisitor(session, projectSnapshotId)).visit(project);
+ }
+ session.commit();
+ } finally {
+ dbClient.closeSession(session);
+ }
+ }
+
+ private class DuplicationVisitor extends TypeAwareVisitorAdapter {
+
+ private final DbSession session;
+ private final long projectSnapshotId;
+
+ private DuplicationVisitor(DbSession session, long projectSnapshotId) {
+ super(CrawlerDepthLimit.FILE, PRE_ORDER);
+ this.session = session;
+ this.projectSnapshotId = projectSnapshotId;
+ }
+
+ @Override
+ public void visitFile(Component file) {
+ visitComponent(file);
+ }
+
+ private void visitComponent(Component component) {
+ int indexInFile = 0;
+ try (CloseableIterator<BatchReport.CpdTextBlock> blocks = reportReader.readCpdTextBlocks(component.getReportAttributes().getRef())) {
+ while (blocks.hasNext()) {
+ BatchReport.CpdTextBlock block = blocks.next();
+ dbClient.duplicationDao().insert(
+ session,
+ new DuplicationUnitDto()
+ .setHash(block.getHash())
+ .setStartLine(block.getStartLine())
+ .setEndLine(block.getEndLine())
+ .setIndexInFile(indexInFile++)
+ .setSnapshotId(dbIdsRepository.getSnapshotId(component))
+ .setProjectSnapshotId(projectSnapshotId)
+ );
+ }
+ }
+ }
+ }
+
+ @Override
+ public String getDescription() {
+ return "Persist cross project duplications index";
+ }
+
+}
PersistDuplicationsStep.class,
PersistFileSourcesStep.class,
PersistTestsStep.class,
+ PersistCrossProjectDuplicationIndexStep.class,
// Switch snapshot and purge
SwitchSnapshotStep.class,
import org.sonar.server.computation.snapshot.Snapshot;
import static java.util.Arrays.asList;
-import static java.util.Collections.singletonList;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.verifyZeroInteractions;
.setIndexInFile(0)
.setProjectSnapshotId(otherProjectSnapshot.getId())
.setSnapshotId(otherFileSnapshot.getId());
- dbClient.duplicationDao().insert(dbSession, singletonList(duplicate));
+ dbClient.duplicationDao().insert(dbSession, duplicate);
dbSession.commit();
BatchReport.CpdTextBlock originBlock = BatchReport.CpdTextBlock.newBuilder()
.setIndexInFile(0)
.setProjectSnapshotId(otherProjectSnapshot.getId())
.setSnapshotId(otherFileSnapshot.getId());
- dbClient.duplicationDao().insert(dbSession, singletonList(duplicate));
+ dbClient.duplicationDao().insert(dbSession, duplicate);
dbSession.commit();
BatchReport.CpdTextBlock originBlock = BatchReport.CpdTextBlock.newBuilder()
verifyZeroInteractions(integrateCrossProjectDuplications);
}
+ @Test
+ public void nothing_to_do_when_cpd_text_blocks_exists_but_no_duplicated_found() throws Exception {
+ analysisMetadataHolder
+ .setCrossProjectDuplicationEnabled(true)
+ .setBranch(null)
+ .setBaseProjectSnapshot(baseProjectSnapshot);
+
+ BatchReport.CpdTextBlock originBlock = BatchReport.CpdTextBlock.newBuilder()
+ .setHash("a8998353e96320ec")
+ .setStartLine(30)
+ .setEndLine(45)
+ .setStartTokenIndex(0)
+ .setEndTokenIndex(10)
+ .build();
+ batchReportReader.putDuplicationBlocks(FILE_REF, asList(originBlock));
+
+ underTest.execute();
+
+ verifyZeroInteractions(integrateCrossProjectDuplications);
+ }
+
private ComponentDto createProject(String projectKey) {
ComponentDto project = ComponentTesting.newProjectDto().setKey(projectKey);
dbClient.componentDao().insert(dbSession, project);
--- /dev/null
+/*
+ * SonarQube, open source software quality management tool.
+ * Copyright (C) 2008-2014 SonarSource
+ * mailto:contact AT sonarsource DOT com
+ *
+ * SonarQube is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 3 of the License, or (at your option) any later version.
+ *
+ * SonarQube is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+package org.sonar.server.computation.step;
+
+import java.util.Collections;
+import java.util.Map;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.sonar.api.utils.System2;
+import org.sonar.batch.protocol.output.BatchReport;
+import org.sonar.db.DbClient;
+import org.sonar.db.DbTester;
+import org.sonar.server.computation.batch.BatchReportReaderRule;
+import org.sonar.server.computation.batch.TreeRootHolderRule;
+import org.sonar.server.computation.component.Component;
+import org.sonar.server.computation.component.DbIdsRepositoryImpl;
+import org.sonar.server.computation.component.ReportComponent;
+import org.sonar.server.computation.duplication.CrossProjectDuplicationStatusHolder;
+
+import static java.util.Collections.singletonList;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class PersistCrossProjectDuplicationIndexStepTest {
+
+ static final int FILE_REF = 2;
+ static final Component FILE = ReportComponent.builder(Component.Type.FILE, FILE_REF).build();
+ static final long FILE_SNAPSHOT_ID = 11L;
+
+ static final Component PROJECT = ReportComponent.builder(Component.Type.PROJECT, 1)
+ .addChildren(FILE)
+ .build();
+ static final long PROJECT_SNAPSHOT_ID = 10L;
+
+ static final BatchReport.CpdTextBlock CPD_TEXT_BLOCK = BatchReport.CpdTextBlock.newBuilder()
+ .setHash("a8998353e96320ec")
+ .setStartLine(30)
+ .setEndLine(45)
+ .build();
+
+ @Rule
+ public DbTester dbTester = DbTester.create(System2.INSTANCE);
+
+ @Rule
+ public BatchReportReaderRule reportReader = new BatchReportReaderRule();
+
+ @Rule
+ public TreeRootHolderRule treeRootHolder = new TreeRootHolderRule().setRoot(PROJECT);
+
+ CrossProjectDuplicationStatusHolder crossProjectDuplicationStatusHolder = mock(CrossProjectDuplicationStatusHolder.class);
+
+ DbIdsRepositoryImpl dbIdsRepository = new DbIdsRepositoryImpl();
+
+ DbClient dbClient = dbTester.getDbClient();
+
+ ComputationStep underTest = new PersistCrossProjectDuplicationIndexStep(dbClient, dbIdsRepository, treeRootHolder, reportReader, crossProjectDuplicationStatusHolder);
+
+ @Before
+ public void setUp() throws Exception {
+ dbIdsRepository.setSnapshotId(PROJECT, PROJECT_SNAPSHOT_ID);
+ dbIdsRepository.setSnapshotId(FILE, FILE_SNAPSHOT_ID);
+ }
+
+ @Test
+ public void persist_cpd_text_blocks() throws Exception {
+ when(crossProjectDuplicationStatusHolder.isEnabled()).thenReturn(true);
+ reportReader.putDuplicationBlocks(FILE_REF, singletonList(CPD_TEXT_BLOCK));
+
+ underTest.execute();
+
+ Map<String, Object> dto = dbTester.selectFirst("select hash as \"hash\", start_line as \"startLine\", end_line as \"endLine\", index_in_file as \"indexInFile\", " +
+ "snapshot_id as \"snapshotId\", project_snapshot_id as \"projectSnapshotId\" from duplications_index");
+ assertThat(dto.get("hash")).isEqualTo(CPD_TEXT_BLOCK.getHash());
+ assertThat(dto.get("startLine")).isEqualTo(30L);
+ assertThat(dto.get("endLine")).isEqualTo(45L);
+ assertThat(dto.get("indexInFile")).isEqualTo(0L);
+ assertThat(dto.get("snapshotId")).isEqualTo(FILE_SNAPSHOT_ID);
+ assertThat(dto.get("projectSnapshotId")).isEqualTo(PROJECT_SNAPSHOT_ID);
+ }
+
+ @Test
+ public void nothing_to_persist_when_no_cpd_text_blocks_in_report() throws Exception {
+ when(crossProjectDuplicationStatusHolder.isEnabled()).thenReturn(true);
+ reportReader.putDuplicationBlocks(FILE_REF, Collections.<BatchReport.CpdTextBlock>emptyList());
+
+ underTest.execute();
+
+ assertThat(dbTester.countRowsOfTable("duplications_index")).isEqualTo(0);
+ }
+
+ @Test
+ public void nothing_to_do_when_cross_project_duplication_is_disabled() throws Exception {
+ when(crossProjectDuplicationStatusHolder.isEnabled()).thenReturn(false);
+ reportReader.putDuplicationBlocks(FILE_REF, singletonList(CPD_TEXT_BLOCK));
+
+ underTest.execute();
+
+ assertThat(dbTester.countRowsOfTable("duplications_index")).isEqualTo(0);
+ }
+
+}
* Insert rows in the table DUPLICATIONS_INDEX.
* Note that generated ids are not returned.
*/
- public void insert(DbSession session, Collection<DuplicationUnitDto> units) {
- DuplicationMapper mapper = session.getMapper(DuplicationMapper.class);
- for (DuplicationUnitDto unit : units) {
- mapper.batchInsert(unit);
- }
+ public void insert(DbSession session, DuplicationUnitDto dto) {
+ session.getMapper(DuplicationMapper.class).batchInsert(dto);
}
}
db.prepareDbUnit(getClass(), "insert.xml");
dbSession.commit();
- dao.insert(dbSession, singletonList(new DuplicationUnitDto()
+ dao.insert(dbSession, new DuplicationUnitDto()
.setProjectSnapshotId(1)
.setSnapshotId(2)
.setHash("bb")
.setIndexInFile(0)
.setStartLine(1)
- .setEndLine(2)));
+ .setEndLine(2));
dbSession.commit();
db.assertDbUnit(getClass(), "insert-result.xml", "duplications_index");