3 * Copyright (C) 2009-2016 SonarSource SA
4 * mailto:contact AT sonarsource DOT com
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 3 of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 package org.sonar.duplications.detector.original;
22 import java.util.ArrayList;
23 import java.util.Collection;
24 import java.util.Collections;
25 import java.util.HashMap;
26 import java.util.List;
28 import org.sonar.duplications.block.Block;
29 import org.sonar.duplications.block.ByteArray;
30 import org.sonar.duplications.index.CloneGroup;
31 import org.sonar.duplications.index.CloneIndex;
32 import org.sonar.duplications.index.ClonePart;
35 * Implementation of algorithm described in paper
36 * <a href="http://www4.in.tum.de/~juergens/publications/icsm2010_crc.pdf">Index-Based Code Clone Detection: Incremental, Distributed, Scalable</a>
37 * by Benjamin Hummel, Elmar Juergens, Michael Conradt and Lars Heinemann.
39 public final class OriginalCloneDetectionAlgorithm {
41 private final CloneIndex cloneIndex;
42 private final Filter filter = new Filter();
43 private String originResourceId;
45 private OriginalCloneDetectionAlgorithm(CloneIndex cloneIndex) {
46 this.cloneIndex = cloneIndex;
49 private BlocksGroup[] createGroups(Collection<Block> fileBlocks) {
50 // 2: let f be the list of tuples corresponding to filename sorted by statement index
51 // either read from the index or calculated on the fly
52 int size = fileBlocks.size();
54 // Godin: create one group per unique hash
55 // TODO Godin: can we create map with expected size?
56 Map<ByteArray, BlocksGroup> groupsByHash = new HashMap<>();
57 for (Block fileBlock : fileBlocks) {
58 ByteArray hash = fileBlock.getBlockHash();
59 BlocksGroup sameHash = groupsByHash.get(hash);
60 if (sameHash == null) {
61 sameHash = BlocksGroup.empty();
62 groupsByHash.put(hash, sameHash);
64 sameHash.blocks.add(fileBlock);
67 // Godin: retrieve blocks from index
68 for (Map.Entry<ByteArray, BlocksGroup> entry : groupsByHash.entrySet()) {
69 ByteArray hash = entry.getKey();
70 BlocksGroup group = entry.getValue();
71 for (Block blockFromIndex : cloneIndex.getBySequenceHash(hash)) {
72 // Godin: skip blocks for this file if they come from index
73 if (!originResourceId.equals(blockFromIndex.getResourceId())) {
74 group.blocks.add(blockFromIndex);
77 Collections.sort(group.blocks, BlocksGroup.BlockComparator.INSTANCE);
80 // 3: let c be a list with c(0) = empty
81 BlocksGroup[] sameHashBlocksGroups = new BlocksGroup[size + 2];
82 sameHashBlocksGroups[0] = BlocksGroup.empty();
83 // 4: for i := 1 to length(f) do
84 for (Block fileBlock : fileBlocks) {
85 ByteArray hash = fileBlock.getBlockHash();
86 int i = fileBlock.getIndexInFile() + 1;
87 // 5: retrieve tuples with same sequence hash as f(i)
88 // 6: store this set as c(i)
89 sameHashBlocksGroups[i] = groupsByHash.get(hash);
92 // Godin: allows to report clones at the end of file, because condition at line 13 would be evaluated as true
93 sameHashBlocksGroups[size + 1] = BlocksGroup.empty();
95 return sameHashBlocksGroups;
98 private void findClones(Collection<Block> fileBlocks) {
99 originResourceId = fileBlocks.iterator().next().getResourceId();
101 BlocksGroup[] sameHashBlocksGroups = createGroups(fileBlocks);
103 // 7: for i := 1 to length(c) do
104 for (int i = 1; i < sameHashBlocksGroups.length; i++) {
105 // In the main loop (starting from Line 7), we first check
106 // whether any new clones might start at this position. If there
107 // is only a single tuple with this hash (which has to belong
108 // to the inspected file at the current location) we skip this loop
109 // iteration. The same holds if all tuples at position i have already
110 // been present at position i − 1, as in this case any clone group
111 // found at position i would be included in a clone group starting
112 // at position i − 1.
114 // Although we use the subset operator in the
115 // algorithm description, this is not really a subset operation,
116 // as of course the statement index of the tuples in c(i) will be
117 // increased by 1 compared to the corresponding ones in c(i − 1)
118 // and the hash and info fields will differ.
120 // 8: if |c(i)| < 2 or c(i) subsumed by c(i - 1) then
121 if (sameHashBlocksGroups[i].size() < 2 || sameHashBlocksGroups[i].subsumedBy(sameHashBlocksGroups[i - 1], 1)) {
122 // 9: continue with next loop iteration
126 // The set a introduced in Line 10 is called the active set and
127 // contains all tuples corresponding to clones which have not yet
128 // been reported. At each iteration of the inner loop the set a
129 // is reduced to tuples which are also present in c(j); again the
130 // intersection operator has to account for the increased statement
131 // index and different hash and info fields. The new value is
132 // stored in a0. Clones are only reported, if tuples are lost in
133 // Line 12, as otherwise all current clones could be prolonged
134 // by one statement. Clone reporting matches tuples that, after
135 // correction of the statement index, appear in both c(i) and a,
136 // each matched pair corresponds to a single clone. Its location
137 // can be extracted from the filename and info fields.
140 BlocksGroup currentBlocksGroup = sameHashBlocksGroups[i];
141 // 11: for j := i + 1 to length(c) do
142 for (int j = i + 1; j < sameHashBlocksGroups.length; j++) {
143 // 12: let a0 := a intersect c(j)
144 BlocksGroup intersectedBlocksGroup = currentBlocksGroup.intersect(sameHashBlocksGroups[j]);
146 // 13: if |a0| < |a| then
147 if (intersectedBlocksGroup.size() < currentBlocksGroup.size()) {
148 // 14: report clones from c(i) to a (see text)
150 // One problem of this algorithm is that clone classes with
151 // multiple instances in the same file are encountered and
152 // reported multiple times. Furthermore, when calculating the clone
153 // groups for all files in a system, clone groups will be reported
154 // more than once as well. Both cases can be avoided, by
155 // checking whether the first element of a0 (with respect to a
156 // fixed order) is equal to f(j) and only report in this case.
158 Block first = currentBlocksGroup.first(originResourceId);
159 if (first.getIndexInFile() == j - 2) {
160 // Godin: We report clones, which start in i-1 and end in j-2, so length is j-2-(i-1)+1=j-i
161 reportClones(sameHashBlocksGroups[i], currentBlocksGroup, j - i);
165 currentBlocksGroup = intersectedBlocksGroup;
167 // Line 16 early exits the inner loop if either no more clones are starting
168 // from position i (i.e., a is too small), or if all tuples from a
169 // have already been in c(i − 1), corrected for statement index.
170 // In this case they have already been reported in the previous
171 // iteration of the outer loop.
173 // IMPORTANT Godin: note that difference in indexes between "a" and "c(i-1)" greater than one,
174 // so method subsumedBy should take this into account
176 // 16: if |a| < 2 or a subsumed by c(i-1) then
177 if (currentBlocksGroup.size() < 2 || currentBlocksGroup.subsumedBy(sameHashBlocksGroups[i - 1], j - i + 1)) {
178 // 17: break inner loop
185 private void reportClones(BlocksGroup beginGroup, BlocksGroup endGroup, int cloneLength) {
186 List<Block[]> pairs = beginGroup.pairs(endGroup, cloneLength);
188 ClonePart origin = null;
189 List<ClonePart> parts = new ArrayList<>();
191 for (int i = 0; i < pairs.size(); i++) {
192 Block[] pair = pairs.get(i);
193 Block firstBlock = pair[0];
194 Block lastBlock = pair[1];
195 ClonePart part = new ClonePart(firstBlock.getResourceId(),
196 firstBlock.getIndexInFile(),
197 firstBlock.getStartLine(),
198 lastBlock.getEndLine());
200 if (originResourceId.equals(part.getResourceId())) {
201 if (origin == null || part.getUnitStart() < origin.getUnitStart()) {
209 filter.add(CloneGroup.builder().setLength(cloneLength).setOrigin(origin).setParts(parts).build());
213 * Performs detection and returns list of clone groups between file (which represented as a collection of blocks) and index.
214 * Note that this method ignores blocks for this file, that will be retrieved from index.
216 public static List<CloneGroup> detect(CloneIndex cloneIndex, Collection<Block> fileBlocks) {
217 if (fileBlocks.isEmpty()) {
218 return Collections.emptyList();
220 OriginalCloneDetectionAlgorithm reporter = new OriginalCloneDetectionAlgorithm(cloneIndex);
221 reporter.findClones(fileBlocks);
222 return reporter.filter.getResult();