]> source.dussan.org Git - jgit.git/commitdiff
Implement Bram Cohen's Patience Diff 99/1499/10
authorShawn O. Pearce <spearce@spearce.org>
Thu, 2 Sep 2010 21:41:15 +0000 (14:41 -0700)
committerShawn O. Pearce <spearce@spearce.org>
Tue, 21 Sep 2010 01:15:22 +0000 (18:15 -0700)
Change-Id: Ic7a76df2861ea6c569ab9756a62018987912bd13
Signed-off-by: Shawn O. Pearce <spearce@spearce.org>
org.eclipse.jgit.test/tst/org/eclipse/jgit/diff/PatienceDiffTest.java [new file with mode: 0644]
org.eclipse.jgit/src/org/eclipse/jgit/diff/PatienceDiff.java [new file with mode: 0644]
org.eclipse.jgit/src/org/eclipse/jgit/diff/PatienceDiffIndex.java [new file with mode: 0644]
org.eclipse.jgit/src/org/eclipse/jgit/diff/SequenceComparator.java

diff --git a/org.eclipse.jgit.test/tst/org/eclipse/jgit/diff/PatienceDiffTest.java b/org.eclipse.jgit.test/tst/org/eclipse/jgit/diff/PatienceDiffTest.java
new file mode 100644 (file)
index 0000000..70ff450
--- /dev/null
@@ -0,0 +1,231 @@
+/*
+ * Copyright (C) 2010, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ *   copyright notice, this list of conditions and the following
+ *   disclaimer in the documentation and/or other materials provided
+ *   with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ *   names of its contributors may be used to endorse or promote
+ *   products derived from this software without specific prior
+ *   written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.diff;
+
+import java.io.UnsupportedEncodingException;
+
+import junit.framework.TestCase;
+
+public class PatienceDiffTest extends TestCase {
+       public void testEmptyInputs() {
+               EditList r = diff(t(""), t(""));
+               assertTrue("is empty", r.isEmpty());
+       }
+
+       public void testCreateFile() {
+               EditList r = diff(t(""), t("AB"));
+               assertEquals(1, r.size());
+               assertEquals(new Edit(0, 0, 0, 2), r.get(0));
+       }
+
+       public void testDeleteFile() {
+               EditList r = diff(t("AB"), t(""));
+               assertEquals(1, r.size());
+               assertEquals(new Edit(0, 2, 0, 0), r.get(0));
+       }
+
+       public void testDegenerate_InsertMiddle() {
+               EditList r = diff(t("ac"), t("aBc"));
+               assertEquals(1, r.size());
+               assertEquals(new Edit(1, 1, 1, 2), r.get(0));
+       }
+
+       public void testDegenerate_DeleteMiddle() {
+               EditList r = diff(t("aBc"), t("ac"));
+               assertEquals(1, r.size());
+               assertEquals(new Edit(1, 2, 1, 1), r.get(0));
+       }
+
+       public void testDegenerate_ReplaceMiddle() {
+               EditList r = diff(t("bCd"), t("bEd"));
+               assertEquals(1, r.size());
+               assertEquals(new Edit(1, 2, 1, 2), r.get(0));
+       }
+
+       public void testDegenerate_InsertsIntoMidPosition() {
+               EditList r = diff(t("aaaa"), t("aaXaa"));
+               assertEquals(1, r.size());
+               assertEquals(new Edit(2, 2, 2, 3), r.get(0));
+       }
+
+       public void testDegenerate_InsertStart() {
+               EditList r = diff(t("bc"), t("Abc"));
+               assertEquals(1, r.size());
+               assertEquals(new Edit(0, 0, 0, 1), r.get(0));
+       }
+
+       public void testDegenerate_DeleteStart() {
+               EditList r = diff(t("Abc"), t("bc"));
+               assertEquals(1, r.size());
+               assertEquals(new Edit(0, 1, 0, 0), r.get(0));
+       }
+
+       public void testDegenerate_InsertEnd() {
+               EditList r = diff(t("bc"), t("bcD"));
+               assertEquals(1, r.size());
+               assertEquals(new Edit(2, 2, 2, 3), r.get(0));
+       }
+
+       public void testDegenerate_DeleteEnd() {
+               EditList r = diff(t("bcD"), t("bc"));
+               assertEquals(1, r.size());
+               assertEquals(new Edit(2, 3, 2, 2), r.get(0));
+       }
+
+       public void testEdit_ReplaceCommonDelete() {
+               EditList r = diff(t("RbC"), t("Sb"));
+               assertEquals(2, r.size());
+               assertEquals(new Edit(0, 1, 0, 1), r.get(0));
+               assertEquals(new Edit(2, 3, 2, 2), r.get(1));
+       }
+
+       public void testEdit_CommonReplaceCommonDeleteCommon() {
+               EditList r = diff(t("aRbCd"), t("aSbd"));
+               assertEquals(2, r.size());
+               assertEquals(new Edit(1, 2, 1, 2), r.get(0));
+               assertEquals(new Edit(3, 4, 3, 3), r.get(1));
+       }
+
+       public void testEdit_MoveBlock() {
+               EditList r = diff(t("aYYbcdz"), t("abcdYYz"));
+               assertEquals(2, r.size());
+               assertEquals(new Edit(1, 3, 1, 1), r.get(0));
+               assertEquals(new Edit(6, 6, 4, 6), r.get(1));
+       }
+
+       public void testEdit_InvertBlocks() {
+               EditList r = diff(t("aYYbcdXXz"), t("aXXbcdYYz"));
+               assertEquals(2, r.size());
+               assertEquals(new Edit(1, 3, 1, 3), r.get(0));
+               assertEquals(new Edit(6, 8, 6, 8), r.get(1));
+       }
+
+       public void testEdit_NoUniqueMiddleSideA() {
+               EditList r = diff(t("aRRSSz"), t("aSSRRz"));
+               assertEquals(1, r.size());
+               assertEquals(new Edit(1, 5, 1, 5), r.get(0));
+       }
+
+       public void testEdit_NoUniqueMiddleSideB() {
+               EditList r = diff(t("aRSz"), t("aSSRRz"));
+               assertEquals(1, r.size());
+               assertEquals(new Edit(1, 3, 1, 5), r.get(0));
+       }
+
+       public void testEdit_UniqueCommonLargerThanMatchPoint() {
+               // We are testing 3 unique common matches, but two of
+               // them are consumed as part of the 1st's LCS region.
+               EditList r = diff(t("AbdeZ"), t("PbdeQR"));
+               assertEquals(2, r.size());
+               assertEquals(new Edit(0, 1, 0, 1), r.get(0));
+               assertEquals(new Edit(4, 5, 4, 6), r.get(1));
+       }
+
+       public void testEdit_CommonGrowsPrefixAndSuffix() {
+               // Here there is only one common unique point, but we can grow it
+               // in both directions to find the LCS in the middle.
+               EditList r = diff(t("AaabccZ"), t("PaabccR"));
+               assertEquals(2, r.size());
+               assertEquals(new Edit(0, 1, 0, 1), r.get(0));
+               assertEquals(new Edit(6, 7, 6, 7), r.get(1));
+       }
+
+       public void testEdit_DuplicateAButCommonUniqueInB() {
+               EditList r = diff(t("AbbcR"), t("CbcS"));
+               assertEquals(2, r.size());
+               assertEquals(new Edit(0, 2, 0, 1), r.get(0));
+               assertEquals(new Edit(4, 5, 3, 4), r.get(1));
+       }
+
+       public void testPerformanceTestDeltaLength() {
+               String a = DiffTestDataGenerator.generateSequence(40000, 971, 3);
+               String b = DiffTestDataGenerator.generateSequence(40000, 1621, 5);
+               CharArray ac = new CharArray(a);
+               CharArray bc = new CharArray(b);
+               EditList r = new PatienceDiff().diff(new CharCmp(), ac, bc);
+               assertEquals(25, r.size());
+       }
+
+       private static EditList diff(RawText a, RawText b) {
+               return new PatienceDiff().diff(RawTextComparator.DEFAULT, a, b);
+       }
+
+       private static RawText t(String text) {
+               StringBuilder r = new StringBuilder();
+               for (int i = 0; i < text.length(); i++) {
+                       r.append(text.charAt(i));
+                       r.append('\n');
+               }
+               try {
+                       return new RawText(r.toString().getBytes("UTF-8"));
+               } catch (UnsupportedEncodingException e) {
+                       throw new RuntimeException(e);
+               }
+       }
+
+       private static class CharArray extends Sequence {
+               final char[] array;
+
+               public CharArray(String s) {
+                       array = s.toCharArray();
+               }
+
+               @Override
+               public int size() {
+                       return array.length;
+               }
+       }
+
+       private static class CharCmp extends SequenceComparator<CharArray> {
+               @Override
+               public boolean equals(CharArray a, int ai, CharArray b, int bi) {
+                       return a.array[ai] == b.array[bi];
+               }
+
+               @Override
+               public int hash(CharArray seq, int ptr) {
+                       return seq.array[ptr];
+               }
+       }
+}
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/diff/PatienceDiff.java b/org.eclipse.jgit/src/org/eclipse/jgit/diff/PatienceDiff.java
new file mode 100644 (file)
index 0000000..44e1f79
--- /dev/null
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) 2010, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ *   copyright notice, this list of conditions and the following
+ *   disclaimer in the documentation and/or other materials provided
+ *   with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ *   names of its contributors may be used to endorse or promote
+ *   products derived from this software without specific prior
+ *   written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.diff;
+
+/**
+ * An implementation of the patience difference algorithm.
+ *
+ * This implementation was derived by using the 4 rules that are outlined in
+ * Bram Cohen's <a href="http://bramcohen.livejournal.com/73318.html">blog</a>.
+ *
+ * Because this algorithm requires finding a unique common point to center the
+ * longest common subsequence around, input sequences which have no unique
+ * elements create a degenerate Edit that simply replaces all of one sequence
+ * with all of the other sequence. For many source code files and other human
+ * maintained text, this isn't likely to occur. When it does occur, it can be
+ * easier to read the resulting large-scale replace than to navigate through a
+ * lot of slices of common-but-not-unique lines, like curly braces on lone
+ * lines, or XML close tags. Consequently this algorithm is willing to create a
+ * degenerate Edit in the worst case, in exchange for what may still be
+ * perceived to be an easier to read patch script.
+ *
+ * In a nutshell, the implementation defines an Edit that replaces all of
+ * sequence {@code a} with all of {@code b}. This Edit is reduced and/or split
+ * to remove common elements, until only Edits spanning non-common elements
+ * remain. Those {@link Edit}s are the differences.
+ *
+ * A slightly more detailed description of the implementation is:
+ *
+ * <ol>
+ * <li>Define an Edit that spans the entire two sequences. This edit replaces
+ * all of {@code a} with all of {@code b}.</li>
+ *
+ * <li>Shrink the Edit by shifting the starting points later in the sequence to
+ * skip over any elements that are common between {@code a} and {@code b}.
+ * Likewise shift the ending points earlier in the sequence to skip any trailing
+ * elements that are common. The first and last element of the edit are now not
+ * common, however there may be common content within the interior of the Edit
+ * that hasn't been discovered yet.</li>
+ *
+ * <li>Find unique elements within the Edit region that are in both sequences.
+ * This is currently accomplished by hashing the elements and merging them
+ * through a custom hash table in {@link PatienceDiffIndex}.</li>
+ *
+ * <li>Order the common unique elements by their position within {@code b}.</li>
+ *
+ * <li>For each unique element, stretch an Edit around it in both directions,
+ * consuming neighboring elements that are common to both sequences. Select the
+ * longest such Edit out of the unique element list. During this stretching,
+ * some subsequent unique elements may be consumed into an earlier's common
+ * Edit. This means not all unique elements are evaluated.</li>
+ *
+ * <li>Split the Edit region at the longest common edit. Because step 2 shrank
+ * the initial region, there must be at least one element before, and at least
+ * one element after the split.</li>
+ *
+ * <li>Recurse on the before and after split points, starting from step 3. Step
+ * 2 doesn't need to be done again because any common part was already removed
+ * by the prior step 2 or 5.</li>
+ * </ol>
+ */
+public class PatienceDiff implements DiffAlgorithm {
+       /** Algorithm we use when there are no common unique lines in a region. */
+       private DiffAlgorithm fallback;
+
+       /**
+        * Set the algorithm used when there are no common unique lines remaining.
+        *
+        * @param alg
+        *            the secondary algorithm. If null the region will be denoted as
+        *            a single REPLACE block.
+        */
+       public void setFallbackAlgorithm(DiffAlgorithm alg) {
+               fallback = alg;
+       }
+
+       public <S extends Sequence, C extends SequenceComparator<? super S>> EditList diff(
+                       C cmp, S a, S b) {
+               Edit region = new Edit(0, a.size(), 0, b.size());
+               region = cmp.reduceCommonStartEnd(a, b, region);
+
+               switch (region.getType()) {
+               case INSERT:
+               case DELETE: {
+                       EditList r = new EditList();
+                       r.add(region);
+                       return r;
+               }
+
+               case REPLACE: {
+                       SubsequenceComparator<S> cs = new SubsequenceComparator<S>(cmp);
+                       Subsequence<S> as = Subsequence.a(a, region);
+                       Subsequence<S> bs = Subsequence.b(b, region);
+                       return Subsequence.toBase(diffImpl(cs, as, bs), as, bs);
+               }
+
+               case EMPTY:
+                       return new EditList();
+
+               default:
+                       throw new IllegalStateException();
+               }
+       }
+
+       private <S extends Sequence, C extends SequenceComparator<? super S>> EditList diffImpl(
+                       C cmp, S a, S b) {
+               State<S> s = new State<S>(new HashedSequencePair<S>(cmp, a, b));
+               s.diff(new Edit(0, s.a.size(), 0, s.b.size()), null, 0, 0);
+               return s.edits;
+       }
+
+       private class State<S extends Sequence> {
+               private final HashedSequenceComparator<S> cmp;
+
+               private final HashedSequence<S> a;
+
+               private final HashedSequence<S> b;
+
+               /** Result edits we have determined that must be made to convert a to b. */
+               final EditList edits;
+
+               State(HashedSequencePair<S> p) {
+                       this.cmp = p.getComparator();
+                       this.a = p.getA();
+                       this.b = p.getB();
+                       this.edits = new EditList();
+               }
+
+               private void diff(Edit r, long[] pCommon, int pIdx, int pEnd) {
+                       switch (r.getType()) {
+                       case INSERT:
+                       case DELETE:
+                               edits.add(r);
+                               return;
+
+                       case REPLACE:
+                               break;
+
+                       case EMPTY:
+                       default:
+                               throw new IllegalStateException();
+                       }
+
+                       PatienceDiffIndex<S> p;
+
+                       p = new PatienceDiffIndex<S>(cmp, a, b, r, pCommon, pIdx, pEnd);
+                       Edit lcs = p.findLongestCommonSequence();
+
+                       if (lcs != null) {
+                               pCommon = p.nCommon;
+                               pIdx = p.cIdx;
+                               pEnd = p.nCnt;
+                               p = null;
+
+                               diff(r.before(lcs), pCommon, 0, pIdx);
+                               diff(r.after(lcs), pCommon, pIdx + 1, pEnd);
+
+                       } else if (fallback != null) {
+                               p = null;
+                               pCommon = null;
+
+                               SubsequenceComparator<HashedSequence<S>> cs;
+                               cs = new SubsequenceComparator<HashedSequence<S>>(cmp);
+
+                               Subsequence<HashedSequence<S>> as = Subsequence.a(a, r);
+                               Subsequence<HashedSequence<S>> bs = Subsequence.b(b, r);
+                               EditList res = fallback.diff(cs, as, bs);
+                               edits.addAll(Subsequence.toBase(res, as, bs));
+
+                       } else {
+                               edits.add(r);
+                       }
+               }
+       }
+}
diff --git a/org.eclipse.jgit/src/org/eclipse/jgit/diff/PatienceDiffIndex.java b/org.eclipse.jgit/src/org/eclipse/jgit/diff/PatienceDiffIndex.java
new file mode 100644 (file)
index 0000000..310c0d5
--- /dev/null
@@ -0,0 +1,442 @@
+/*
+ * Copyright (C) 2010, Google Inc.
+ * and other copyright owners as documented in the project's IP log.
+ *
+ * This program and the accompanying materials are made available
+ * under the terms of the Eclipse Distribution License v1.0 which
+ * accompanies this distribution, is reproduced below, and is
+ * available at http://www.eclipse.org/org/documents/edl-v10.php
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ *   copyright notice, this list of conditions and the following
+ *   disclaimer in the documentation and/or other materials provided
+ *   with the distribution.
+ *
+ * - Neither the name of the Eclipse Foundation, Inc. nor the
+ *   names of its contributors may be used to endorse or promote
+ *   products derived from this software without specific prior
+ *   written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package org.eclipse.jgit.diff;
+
+/**
+ * Supports {@link PatienceDiff} by finding unique but common elements.
+ *
+ * This index object is constructed once for each region being considered by the
+ * main {@link PatienceDiff} algorithm, which really means its once for each
+ * recursive step. Each index instance processes a fixed sized region from the
+ * sequences, and during recursion the region is split into two smaller segments
+ * and processed again.
+ *
+ * Index instances from a higher level invocation message some state into a
+ * lower level invocation by passing the {@link #nCommon} array from the higher
+ * invocation into the two sub-steps as {@link #pCommon}. This permits some
+ * matching work that was already done in the higher invocation to be reused in
+ * the sub-step and can save a lot of time when element equality is expensive.
+ *
+ * @param <S>
+ *            type of sequence the scanner will scan.
+ */
+final class PatienceDiffIndex<S extends Sequence> {
+       private static final int A_DUPLICATE = 1;
+
+       private static final int B_DUPLICATE = 2;
+
+       private static final int DUPLICATE_MASK = B_DUPLICATE | A_DUPLICATE;
+
+       private static final int A_SHIFT = 2;
+
+       private static final int B_SHIFT = 31 + 2;
+
+       private static final int PTR_MASK = 0x7fffffff;
+
+       private final HashedSequenceComparator<S> cmp;
+
+       private final HashedSequence<S> a;
+
+       private final HashedSequence<S> b;
+
+       private final Edit region;
+
+       /** Pairs of beginB, endB indices previously found to be common and unique. */
+       private final long[] pCommon;
+
+       /** First valid index in {@link #pCommon}. */
+       private final int pBegin;
+
+       /** 1 past the last valid entry in {@link #pCommon}. */
+       private final int pEnd;
+
+       /** Keyed by {@code cmp.hash() & tableMask} to yield an entry offset. */
+       private final int[] table;
+
+       private final int tableMask;
+
+       // To save memory the buckets for hash chains are stored in correlated
+       // arrays. This permits us to get 3 values per entry, without paying
+       // the penalty for an object header on each entry.
+
+       /** Cached hash value for an element as returned by {@link #cmp}. */
+       private final int[] hash;
+
+       /**
+        * A matched (or partially examined) element from the two sequences.
+        *
+        * This is actually a 4-tuple: (bPtr, aPtrP1, bDuplicate, aDuplicate).
+        *
+        * bPtr and aPtr are each 31 bits. bPtr is exactly the position in the b
+        * sequence, while aPtrP1 is {@code aPtr + 1}. This permits us to determine
+        * if there is corresponding element in a by testing for aPtrP1 != 0. If it
+        * equals 0, there is no element in a. If it equals 1, element 0 of a
+        * matches with element bPtr of b.
+        *
+        * bDuplicate is 1 if this element occurs more than once in b; likewise
+        * aDuplicate is 1 if this element occurs more than once in a. These flags
+        * permit each element to only be added to the index once. As the duplicates
+        * are the low 2 bits a unique record meets (@code (rec & 2) == 0}.
+        */
+       private final long[] ptrs;
+
+       /** Array index of the next entry in the table; 0 if at end of chain. */
+       private final int[] next;
+
+       /** Total number of entries that exist in {@link #ptrs}. */
+       private int entryCnt;
+
+       /** Number of entries in {@link #ptrs} that are actually unique. */
+       private int uniqueCommonCnt;
+
+       /**
+        * Pairs of beginB, endB indices found to be common and unique.
+        *
+        * In order to find the longest common (but unique) sequence within a
+        * region, we also found all of the other common but unique sequences in
+        * that same region. This array stores all of those results, allowing them
+        * to be passed into the subsequent recursive passes so we can later reuse
+        * these matches and avoid recomputing the same points again.
+        */
+       long[] nCommon;
+
+       /** Number of items in {@link #nCommon}. */
+       int nCnt;
+
+       /** Index of the longest common subsequence in {@link #nCommon}. */
+       int cIdx;
+
+       PatienceDiffIndex(HashedSequenceComparator<S> cmp, //
+                       HashedSequence<S> a, //
+                       HashedSequence<S> b, //
+                       Edit region, //
+                       long[] pCommon, int pIdx, int pCnt) {
+               this.cmp = cmp;
+               this.a = a;
+               this.b = b;
+               this.region = region;
+               this.pCommon = pCommon;
+               this.pBegin = pIdx;
+               this.pEnd = pCnt;
+
+               final int blockCnt = region.getLengthB();
+               if (blockCnt < 1) {
+                       table = new int[] {};
+                       tableMask = 0;
+
+                       hash = new int[] {};
+                       ptrs = new long[] {};
+                       next = new int[] {};
+
+               } else {
+                       table = new int[tableSize(blockCnt)];
+                       tableMask = table.length - 1;
+
+                       // As we insert elements we preincrement so that 0 is never a
+                       // valid entry. Therefore we have to allocate one extra space.
+                       //
+                       hash = new int[1 + blockCnt];
+                       ptrs = new long[hash.length];
+                       next = new int[hash.length];
+               }
+       }
+
+       /**
+        * Index elements in sequence B for later matching with sequence A.
+        *
+        * This is the first stage of preparing an index to find the longest common
+        * sequence. Elements of sequence B in the range [ptr, end) are scanned in
+        * order and added to the internal hashtable.
+        *
+        * If prior matches were given in the constructor, these may be used to
+        * fast-forward through sections of B to avoid unnecessary recomputation.
+        */
+       private void scanB() {
+               // We insert in ascending order so that a later scan of the table
+               // from 0 through entryCnt will iterate through B in order. This
+               // is the desired result ordering from match().
+               //
+               int ptr = region.beginB;
+               final int end = region.endB;
+               int pIdx = pBegin;
+               SCAN: while (ptr < end) {
+                       final int key = cmp.hash(b, ptr);
+                       final int tIdx = key & tableMask;
+
+                       if (pIdx < pEnd) {
+                               final long priorRec = pCommon[pIdx];
+                               if (ptr == bOf(priorRec)) {
+                                       // We know this region is unique from a prior pass.
+                                       // Insert the start point, and skip right to the end.
+                                       //
+                                       insertB(key, tIdx, ptr);
+                                       pIdx++;
+                                       ptr = aOfRaw(priorRec);
+                                       continue SCAN;
+                               }
+                       }
+
+                       // We aren't sure what the status of this element is. Add
+                       // it to our hashtable, and flag it as duplicate if there
+                       // was already a different entry present.
+                       //
+                       for (int eIdx = table[tIdx]; eIdx != 0; eIdx = next[eIdx]) {
+                               if (hash[eIdx] != key)
+                                       continue;
+
+                               final long rec = ptrs[eIdx];
+                               if (cmp.equals(b, ptr, b, bOf(rec))) {
+                                       ptrs[eIdx] = rec | B_DUPLICATE;
+                                       ptr++;
+                                       continue SCAN;
+                               }
+                       }
+
+                       insertB(key, tIdx, ptr);
+                       ptr++;
+               }
+       }
+
+       private void insertB(final int key, final int tIdx, int ptr) {
+               final int eIdx = ++entryCnt;
+               hash[eIdx] = key;
+               ptrs[eIdx] = ((long) ptr) << B_SHIFT;
+               next[eIdx] = table[tIdx];
+               table[tIdx] = eIdx;
+       }
+
+       /**
+        * Index elements in sequence A for later matching.
+        *
+        * This is the second stage of preparing an index to find the longest common
+        * sequence. The state requires {@link #scanB()} to have been invoked first.
+        *
+        * Each element of A in the range [ptr, end) are searched for in the
+        * internal hashtable, to see if B has already registered a location.
+        *
+        * If prior matches were given in the constructor, these may be used to
+        * fast-forward through sections of A to avoid unnecessary recomputation.
+        */
+       private void scanA() {
+               int ptr = region.beginA;
+               final int end = region.endA;
+               int pLast = pBegin - 1;
+               SCAN: while (ptr < end) {
+                       final int key = cmp.hash(a, ptr);
+                       final int tIdx = key & tableMask;
+
+                       for (int eIdx = table[tIdx]; eIdx != 0; eIdx = next[eIdx]) {
+                               final long rec = ptrs[eIdx];
+
+                               if (isDuplicate(rec) || hash[eIdx] != key)
+                                       continue;
+
+                               final int aPtr = aOfRaw(rec);
+                               if (aPtr != 0 && cmp.equals(a, ptr, a, aPtr - 1)) {
+                                       ptrs[eIdx] = rec | A_DUPLICATE;
+                                       uniqueCommonCnt--;
+                                       ptr++;
+                                       continue SCAN;
+                               }
+
+                               final int bs = bOf(rec);
+                               if (!cmp.equals(a, ptr, b, bs)) {
+                                       ptr++;
+                                       continue SCAN;
+                               }
+
+                               // This element is both common and unique. Link the
+                               // two sequences together at this point.
+                               //
+                               ptrs[eIdx] = rec | (((long) (ptr + 1)) << A_SHIFT);
+                               uniqueCommonCnt++;
+
+                               if (pBegin < pEnd) {
+                                       // If we have prior match point data, we might be able
+                                       // to locate the length of the match and skip past all
+                                       // of those elements. We try to take advantage of the
+                                       // fact that pCommon is sorted by B, and its likely that
+                                       // matches in A appear in the same order as they do in B.
+                                       //
+                                       for (int pIdx = pLast + 1;; pIdx++) {
+                                               if (pIdx == pEnd)
+                                                       pIdx = pBegin;
+                                               else if (pIdx == pLast)
+                                                       break;
+
+                                               final long priorRec = pCommon[pIdx];
+                                               final int priorB = bOf(priorRec);
+                                               if (bs < priorB)
+                                                       break;
+                                               if (bs == priorB) {
+                                                       ptr += aOfRaw(priorRec) - priorB;
+                                                       pLast = pIdx;
+                                                       continue SCAN;
+                                               }
+                                       }
+                               }
+
+                               ptr++;
+                               continue SCAN;
+                       }
+
+                       ptr++;
+               }
+       }
+
+       /**
+        * Scan all potential matches and find the longest common sequence.
+        *
+        * If this method returns non-null, the caller should copy out the
+        * {@link #nCommon} array and pass that through to the recursive sub-steps
+        * so that existing common matches can be reused rather than recomputed.
+        *
+        * @return an edit covering the longest common sequence. Null if there are
+        *         no common unique sequences present.
+        */
+       Edit findLongestCommonSequence() {
+               scanB();
+               scanA();
+
+               if (uniqueCommonCnt == 0)
+                       return null;
+
+               nCommon = new long[uniqueCommonCnt];
+               int pIdx = pBegin;
+               Edit lcs = new Edit(0, 0);
+
+               MATCH: for (int eIdx = 1; eIdx <= entryCnt; eIdx++) {
+                       final long rec = ptrs[eIdx];
+                       if (isDuplicate(rec) || aOfRaw(rec) == 0)
+                               continue;
+
+                       int bs = bOf(rec);
+                       if (bs < lcs.endB)
+                               continue;
+
+                       int as = aOf(rec);
+                       if (pIdx < pEnd) {
+                               final long priorRec = pCommon[pIdx];
+                               if (bs == bOf(priorRec)) {
+                                       // We had a prior match and we know its unique.
+                                       // Reuse its region rather than computing again.
+                                       //
+                                       int be = aOfRaw(priorRec);
+
+                                       if (lcs.getLengthB() < be - bs) {
+                                               as -= bOf(rec) - bs;
+                                               lcs.beginA = as;
+                                               lcs.beginB = bs;
+                                               lcs.endA = as + (be - bs);
+                                               lcs.endB = be;
+                                               cIdx = nCnt;
+                                       }
+
+                                       nCommon[nCnt] = priorRec;
+                                       if (++nCnt == uniqueCommonCnt)
+                                               break MATCH;
+
+                                       pIdx++;
+                                       continue MATCH;
+                               }
+                       }
+
+                       // We didn't have prior match data, or this is the first time
+                       // seeing this particular pair. Extend the region as large as
+                       // possible and remember it for future use.
+                       //
+                       int ae = as + 1;
+                       int be = bs + 1;
+
+                       while (region.beginA < as && region.beginB < bs
+                                       && cmp.equals(a, as - 1, b, bs - 1)) {
+                               as--;
+                               bs--;
+                       }
+                       while (ae < region.endA && be < region.endB
+                                       && cmp.equals(a, ae, b, be)) {
+                               ae++;
+                               be++;
+                       }
+
+                       if (lcs.getLengthB() < be - bs) {
+                               lcs.beginA = as;
+                               lcs.beginB = bs;
+                               lcs.endA = ae;
+                               lcs.endB = be;
+                               cIdx = nCnt;
+                       }
+
+                       nCommon[nCnt] = (((long) bs) << B_SHIFT) | (((long) be) << A_SHIFT);
+                       if (++nCnt == uniqueCommonCnt)
+                               break MATCH;
+               }
+
+               return lcs;
+       }
+
+       private static boolean isDuplicate(long rec) {
+               return (((int) rec) & DUPLICATE_MASK) != 0;
+       }
+
+       private static int aOfRaw(long rec) {
+               return ((int) (rec >>> A_SHIFT)) & PTR_MASK;
+       }
+
+       private static int aOf(long rec) {
+               return aOfRaw(rec) - 1;
+       }
+
+       private static int bOf(long rec) {
+               return (int) (rec >>> B_SHIFT);
+       }
+
+       private static int tableSize(final int worstCaseBlockCnt) {
+               int shift = 32 - Integer.numberOfLeadingZeros(worstCaseBlockCnt);
+               int sz = 1 << (shift - 1);
+               if (sz < worstCaseBlockCnt)
+                       sz <<= 1;
+               return sz;
+       }
+}
index 5ec23510ec1715e43fd9282945a210fbfb20f67a..790a3942c920828ce09f5d49c8a3d2de047c9821 100644 (file)
@@ -92,4 +92,41 @@ public abstract class SequenceComparator<S extends Sequence> {
         * @return hash the hash value.
         */
        public abstract int hash(S seq, int ptr);
+
+       /**
+        * Modify the edit to remove common leading and trailing items.
+        *
+        * The supplied edit {@code e} is reduced in size by moving the beginning A
+        * and B points so the edit does not cover any items that are in common
+        * between the two sequences. The ending A and B points are also shifted to
+        * remove common items from the end of the region.
+        *
+        * @param a
+        *            the first sequence.
+        * @param b
+        *            the second sequence.
+        * @param e
+        *            the edit to start with and update.
+        * @return {@code e} if it was updated in-place, otherwise a new edit
+        *         containing the reduced region.
+        */
+       public Edit reduceCommonStartEnd(S a, S b, Edit e) {
+               // Skip over items that are common at the start.
+               //
+               while (e.beginA < e.endA && e.beginB < e.endB
+                               && equals(a, e.beginA, b, e.beginB)) {
+                       e.beginA++;
+                       e.beginB++;
+               }
+
+               // Skip over items that are common at the end.
+               //
+               while (e.beginA < e.endA && e.beginB < e.endB
+                               && equals(a, e.endA - 1, b, e.endB - 1)) {
+                       e.endA--;
+                       e.endB--;
+               }
+
+               return e;
+       }
 }