import static org.eclipse.jgit.lib.ConfigConstants.CONFIG_KEY_ALGORITHM;
import static org.eclipse.jgit.lib.Constants.OBJ_BLOB;
-import java.io.BufferedOutputStream;
import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedList;
import java.util.List;
import java.util.Map;
-
import org.eclipse.jgit.annotations.NonNull;
import org.eclipse.jgit.attributes.Attributes;
import org.eclipse.jgit.diff.DiffAlgorithm;
import org.eclipse.jgit.diff.Sequence;
import org.eclipse.jgit.dircache.DirCache;
import org.eclipse.jgit.dircache.DirCacheBuildIterator;
-import org.eclipse.jgit.dircache.DirCacheBuilder;
-import org.eclipse.jgit.dircache.DirCacheCheckout;
import org.eclipse.jgit.dircache.DirCacheCheckout.CheckoutMetadata;
import org.eclipse.jgit.dircache.DirCacheEntry;
import org.eclipse.jgit.errors.BinaryBlobException;
-import org.eclipse.jgit.errors.CorruptObjectException;
-import org.eclipse.jgit.errors.IncorrectObjectTypeException;
-import org.eclipse.jgit.errors.IndexWriteException;
-import org.eclipse.jgit.errors.MissingObjectException;
-import org.eclipse.jgit.errors.NoWorkTreeException;
import org.eclipse.jgit.lib.Config;
-import org.eclipse.jgit.lib.ConfigConstants;
import org.eclipse.jgit.lib.Constants;
import org.eclipse.jgit.lib.CoreConfig.EolStreamType;
import org.eclipse.jgit.lib.FileMode;
import org.eclipse.jgit.treewalk.CanonicalTreeParser;
import org.eclipse.jgit.treewalk.NameConflictTreeWalk;
import org.eclipse.jgit.treewalk.TreeWalk;
-import org.eclipse.jgit.treewalk.TreeWalk.OperationType;
import org.eclipse.jgit.treewalk.WorkingTreeIterator;
-import org.eclipse.jgit.treewalk.WorkingTreeOptions;
import org.eclipse.jgit.treewalk.filter.TreeFilter;
import org.eclipse.jgit.util.FS;
import org.eclipse.jgit.util.LfsFactory;
-import org.eclipse.jgit.util.LfsFactory.LfsInputStream;
+import org.eclipse.jgit.util.WorkTreeUpdater;
+import org.eclipse.jgit.util.WorkTreeUpdater.StreamLoader;
import org.eclipse.jgit.util.TemporaryBuffer;
-import org.eclipse.jgit.util.io.EolStreamTypeUtil;
/**
* A three-way merger performing a content-merge if necessary
*/
public class ResolveMerger extends ThreeWayMerger {
+
/**
* If the merge fails (means: not stopped because of unresolved conflicts)
* this enum is used to explain why it failed
protected static final int T_FILE = 4;
/**
- * Builder to update the cache during this merge.
- *
- * @since 3.4
+ * Handler for repository I/O actions.
*/
- protected DirCacheBuilder builder;
+ protected WorkTreeUpdater workTreeUpdater;
/**
* merge result as tree
protected ObjectId resultTree;
/**
- * Paths that could not be merged by this merger because of an unsolvable
- * conflict.
- *
- * @since 3.4
- */
- protected List<String> unmergedPaths = new ArrayList<>();
-
- /**
- * Files modified during this merge operation.
- *
- * @since 3.4
- */
- protected List<String> modifiedFiles = new LinkedList<>();
-
- /**
- * If the merger has nothing to do for a file but check it out at the end of
- * the operation, it can be added here.
- *
- * @since 3.4
+ * Files modified during this operation. Note this list is only updated after a successful write.
*/
- protected Map<String, DirCacheEntry> toBeCheckedOut = new HashMap<>();
+ protected List<String> modifiedFiles = new ArrayList<>();
/**
- * Paths in this list will be deleted from the local copy at the end of the
- * operation.
+ * Paths that could not be merged by this merger because of an unsolvable
+ * conflict.
*
* @since 3.4
*/
- protected List<String> toBeDeleted = new ArrayList<>();
+ protected List<String> unmergedPaths = new ArrayList<>();
/**
* Low-level textual merge results. Will be passed on to the callers in case
*/
protected boolean inCore;
- /**
- * Set to true if this merger should use the default dircache of the
- * repository and should handle locking and unlocking of the dircache. If
- * this merger should work in-core or if an explicit dircache was specified
- * during construction then this field is set to false.
- * @since 3.0
- */
- protected boolean implicitDirCache;
-
/**
* Directory cache
* @since 3.0
*/
protected MergeAlgorithm mergeAlgorithm;
- /**
- * The {@link WorkingTreeOptions} are needed to determine line endings for
- * merged files.
- *
- * @since 4.11
- */
- protected WorkingTreeOptions workingTreeOptions;
-
- /**
- * The size limit (bytes) which controls a file to be stored in {@code Heap}
- * or {@code LocalFile} during the merge.
- */
- private int inCoreLimit;
-
/**
* The {@link ContentMergeStrategy} to use for "resolve" and "recursive"
* merges.
@NonNull
private ContentMergeStrategy contentStrategy = ContentMergeStrategy.CONFLICT;
- /**
- * Keeps {@link CheckoutMetadata} for {@link #checkout()}.
- */
- private Map<String, CheckoutMetadata> checkoutMetadata;
-
- /**
- * Keeps {@link CheckoutMetadata} for {@link #cleanUp()}.
- */
- private Map<String, CheckoutMetadata> cleanupMetadata;
-
private static MergeAlgorithm getMergeAlgorithm(Config config) {
SupportedAlgorithm diffAlg = config.getEnum(
CONFIG_DIFF_SECTION, null, CONFIG_KEY_ALGORITHM,
return new MergeAlgorithm(DiffAlgorithm.getAlgorithm(diffAlg));
}
- private static int getInCoreLimit(Config config) {
- return config.getInt(
- ConfigConstants.CONFIG_MERGE_SECTION, ConfigConstants.CONFIG_KEY_IN_CORE_LIMIT, 10 << 20);
- }
-
private static String[] defaultCommitNames() {
- return new String[] { "BASE", "OURS", "THEIRS" }; //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
+ return new String[]{"BASE", "OURS", "THEIRS"}; //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
}
private static final Attributes NO_ATTRIBUTES = new Attributes();
super(local);
Config config = local.getConfig();
mergeAlgorithm = getMergeAlgorithm(config);
- inCoreLimit = getInCoreLimit(config);
commitNames = defaultCommitNames();
this.inCore = inCore;
-
- if (inCore) {
- implicitDirCache = false;
- dircache = DirCache.newInCore();
- } else {
- implicitDirCache = true;
- workingTreeOptions = local.getConfig().get(WorkingTreeOptions.KEY);
- }
}
/**
mergeAlgorithm = getMergeAlgorithm(config);
commitNames = defaultCommitNames();
inCore = true;
- implicitDirCache = false;
- dircache = DirCache.newInCore();
}
/**
/** {@inheritDoc} */
@Override
protected boolean mergeImpl() throws IOException {
- if (implicitDirCache) {
- dircache = nonNullRepo().lockDirCache();
- }
- if (!inCore) {
- checkoutMetadata = new HashMap<>();
- cleanupMetadata = new HashMap<>();
- }
- try {
- return mergeTrees(mergeBase(), sourceTrees[0], sourceTrees[1],
- false);
- } finally {
- checkoutMetadata = null;
- cleanupMetadata = null;
- if (implicitDirCache) {
- dircache.unlock();
- }
- }
- }
-
- private void checkout() throws NoWorkTreeException, IOException {
- // Iterate in reverse so that "folder/file" is deleted before
- // "folder". Otherwise this could result in a failing path because
- // of a non-empty directory, for which delete() would fail.
- for (int i = toBeDeleted.size() - 1; i >= 0; i--) {
- String fileName = toBeDeleted.get(i);
- File f = new File(nonNullRepo().getWorkTree(), fileName);
- if (!f.delete())
- if (!f.isDirectory())
- failingPaths.put(fileName,
- MergeFailureReason.COULD_NOT_DELETE);
- modifiedFiles.add(fileName);
- }
- for (Map.Entry<String, DirCacheEntry> entry : toBeCheckedOut
- .entrySet()) {
- DirCacheEntry cacheEntry = entry.getValue();
- if (cacheEntry.getFileMode() == FileMode.GITLINK) {
- new File(nonNullRepo().getWorkTree(), entry.getKey()).mkdirs();
- } else {
- DirCacheCheckout.checkoutEntry(db, cacheEntry, reader, false,
- checkoutMetadata.get(entry.getKey()));
- modifiedFiles.add(entry.getKey());
- }
- }
- }
-
- /**
- * Reverts the worktree after an unsuccessful merge. We know that for all
- * modified files the old content was in the old index and the index
- * contained only stage 0. In case if inCore operation just clear the
- * history of modified files.
- *
- * @throws java.io.IOException
- * @throws org.eclipse.jgit.errors.CorruptObjectException
- * @throws org.eclipse.jgit.errors.NoWorkTreeException
- * @since 3.4
- */
- protected void cleanUp() throws NoWorkTreeException,
- CorruptObjectException,
- IOException {
- if (inCore) {
- modifiedFiles.clear();
- return;
- }
-
- DirCache dc = nonNullRepo().readDirCache();
- Iterator<String> mpathsIt=modifiedFiles.iterator();
- while(mpathsIt.hasNext()) {
- String mpath = mpathsIt.next();
- DirCacheEntry entry = dc.getEntry(mpath);
- if (entry != null) {
- DirCacheCheckout.checkoutEntry(db, entry, reader, false,
- cleanupMetadata.get(mpath));
- }
- mpathsIt.remove();
- }
+ return mergeTrees(mergeBase(), sourceTrees[0], sourceTrees[1],
+ false);
}
/**
private DirCacheEntry add(byte[] path, CanonicalTreeParser p, int stage,
Instant lastMod, long len) {
if (p != null && !p.getEntryFileMode().equals(FileMode.TREE)) {
- DirCacheEntry e = new DirCacheEntry(path, stage);
- e.setFileMode(p.getEntryFileMode());
- e.setObjectId(p.getEntryObjectId());
- e.setLastModified(lastMod);
- e.setLength(len);
- builder.add(e);
- return e;
+ return workTreeUpdater.addExistingToIndex(p.getEntryObjectId(), path,
+ p.getEntryFileMode(), stage,
+ lastMod, (int) len);
}
return null;
}
* @return the entry which was added to the index
*/
private DirCacheEntry keep(DirCacheEntry e) {
- DirCacheEntry newEntry = new DirCacheEntry(e.getRawPath(),
- e.getStage());
- newEntry.setFileMode(e.getFileMode());
- newEntry.setObjectId(e.getObjectId());
- newEntry.setLastModified(e.getLastModifiedInstant());
- newEntry.setLength(e.getLength());
- builder.add(newEntry);
- return newEntry;
- }
-
- /**
- * Remembers the {@link CheckoutMetadata} for the given path; it may be
- * needed in {@link #checkout()} or in {@link #cleanUp()}.
- *
- * @param map
- * to add the metadata to
- * @param path
- * of the current node
- * @param attributes
- * to use for determining the metadata
- * @throws IOException
- * if the smudge filter cannot be determined
- * @since 6.1
- */
- protected void addCheckoutMetadata(Map<String, CheckoutMetadata> map,
- String path, Attributes attributes)
- throws IOException {
- if (map != null) {
- EolStreamType eol = EolStreamTypeUtil.detectStreamType(
- OperationType.CHECKOUT_OP, workingTreeOptions,
- attributes);
- CheckoutMetadata data = new CheckoutMetadata(eol,
- tw.getSmudgeCommand(attributes));
- map.put(path, data);
- }
+ return workTreeUpdater.addExistingToIndex(e.getObjectId(), e.getRawPath(), e.getFileMode(),
+ e.getStage(), e.getLastModifiedInstant(), e.getLength());
}
/**
protected void addToCheckout(String path, DirCacheEntry entry,
Attributes[] attributes)
throws IOException {
- toBeCheckedOut.put(path, entry);
- addCheckoutMetadata(cleanupMetadata, path, attributes[T_OURS]);
- addCheckoutMetadata(checkoutMetadata, path, attributes[T_THEIRS]);
+ EolStreamType cleanupStreamType = workTreeUpdater.detectCheckoutStreamType(attributes[T_OURS]);
+ String cleanupSmudgeCommand = tw.getSmudgeCommand(attributes[T_OURS]);
+ EolStreamType checkoutStreamType = workTreeUpdater.detectCheckoutStreamType(attributes[T_THEIRS]);
+ String checkoutSmudgeCommand = tw.getSmudgeCommand(attributes[T_THEIRS]);
+ workTreeUpdater.addToCheckout(path, entry, cleanupStreamType, cleanupSmudgeCommand,
+ checkoutStreamType, checkoutSmudgeCommand);
}
/**
* Remember a path for deletion, and remember its {@link CheckoutMetadata}
- * in case it has to be restored in {@link #cleanUp()}.
+ * in case it has to be restored in the cleanUp.
*
* @param path
* of the entry
*/
protected void addDeletion(String path, boolean isFile,
Attributes attributes) throws IOException {
- toBeDeleted.add(path);
- if (isFile) {
- addCheckoutMetadata(cleanupMetadata, path, attributes);
- }
+ if (db == null || nonNullRepo().isBare() || !isFile)
+ return;
+
+ File file = new File(nonNullRepo().getWorkTree(), path);
+ EolStreamType streamType = workTreeUpdater.detectCheckoutStreamType(attributes);
+ String smudgeCommand = tw.getSmudgeCommand(attributes);
+ workTreeUpdater.deleteFile(path, file, streamType, smudgeCommand);
}
/**
* @return <code>false</code> if the merge will fail because the index entry
* didn't match ours or the working-dir file was dirty and a
* conflict occurred
- * @throws org.eclipse.jgit.errors.MissingObjectException
- * @throws org.eclipse.jgit.errors.IncorrectObjectTypeException
- * @throws org.eclipse.jgit.errors.CorruptObjectException
* @throws java.io.IOException
* @since 6.1
*/
CanonicalTreeParser ours, CanonicalTreeParser theirs,
DirCacheBuildIterator index, WorkingTreeIterator work,
boolean ignoreConflicts, Attributes[] attributes)
- throws MissingObjectException, IncorrectObjectTypeException,
- CorruptObjectException, IOException {
+ throws IOException {
enterSubtree = true;
final int modeO = tw.getRawMode(T_OURS);
final int modeT = tw.getRawMode(T_THEIRS);
final int modeB = tw.getRawMode(T_BASE);
boolean gitLinkMerging = isGitLink(modeO) || isGitLink(modeT)
|| isGitLink(modeB);
- if (modeO == 0 && modeT == 0 && modeB == 0)
+ if (modeO == 0 && modeT == 0 && modeB == 0) {
// File is either untracked or new, staged but uncommitted
return true;
+ }
- if (isIndexDirty())
+ if (isIndexDirty()) {
return false;
+ }
DirCacheEntry ourDce = null;
if (modeB == modeT && tw.idEqual(T_BASE, T_THEIRS)) {
// THEIRS was not changed compared to BASE. All changes must be in
// OURS. OURS is chosen. We can keep the existing entry.
- if (ourDce != null)
+ if (ourDce != null) {
keep(ourDce);
+ }
// no checkout needed!
return true;
}
// THEIRS. THEIRS is chosen.
// Check worktree before checking out THEIRS
- if (isWorktreeDirty(work, ourDce))
+ if (isWorktreeDirty(work, ourDce)) {
return false;
+ }
if (nonTree(modeT)) {
// we know about length and lastMod only after we have written
// the new content.
enterSubtree = false;
return true;
}
- if (nonTree(modeB))
+ if (nonTree(modeB)) {
add(tw.getRawPath(), base, DirCacheEntry.STAGE_1, EPOCH, 0);
- if (nonTree(modeO))
+ }
+ if (nonTree(modeO)) {
add(tw.getRawPath(), ours, DirCacheEntry.STAGE_2, EPOCH, 0);
- if (nonTree(modeT))
+ }
+ if (nonTree(modeT)) {
add(tw.getRawPath(), theirs, DirCacheEntry.STAGE_3, EPOCH, 0);
+ }
unmergedPaths.add(tw.getPathString());
enterSubtree = false;
return true;
// tells us we are in a subtree because of index or working-dir).
// If they are both folders no content-merge is required - we can
// return here.
- if (!nonTree(modeO))
+ if (!nonTree(modeO)) {
return true;
+ }
// ours and theirs are both files, just fall out of the if block
// and do the content merge
} else if (!attributes[T_OURS].canBeContentMerged()) {
// File marked as binary
switch (getContentMergeStrategy()) {
- case OURS:
- keep(ourDce);
- return true;
- case THEIRS:
- DirCacheEntry theirEntry = add(tw.getRawPath(), theirs,
- DirCacheEntry.STAGE_0, EPOCH, 0);
- addToCheckout(tw.getPathString(), theirEntry, attributes);
- return true;
- default:
- break;
+ case OURS:
+ keep(ourDce);
+ return true;
+ case THEIRS:
+ DirCacheEntry theirEntry = add(tw.getRawPath(), theirs,
+ DirCacheEntry.STAGE_0, EPOCH, 0);
+ addToCheckout(tw.getPathString(), theirEntry, attributes);
+ return true;
+ default:
+ break;
}
add(tw.getRawPath(), base, DirCacheEntry.STAGE_1, EPOCH, 0);
add(tw.getRawPath(), ours, DirCacheEntry.STAGE_2, EPOCH, 0);
getContentMergeStrategy());
} catch (BinaryBlobException e) {
switch (getContentMergeStrategy()) {
- case OURS:
- keep(ourDce);
- return true;
- case THEIRS:
- DirCacheEntry theirEntry = add(tw.getRawPath(), theirs,
- DirCacheEntry.STAGE_0, EPOCH, 0);
- addToCheckout(tw.getPathString(), theirEntry, attributes);
- return true;
- default:
- result = new MergeResult<>(Collections.emptyList());
- result.setContainsConflicts(true);
- break;
+ case OURS:
+ keep(ourDce);
+ return true;
+ case THEIRS:
+ DirCacheEntry theirEntry = add(tw.getRawPath(), theirs,
+ DirCacheEntry.STAGE_0, EPOCH, 0);
+ addToCheckout(tw.getPathString(), theirEntry, attributes);
+ return true;
+ default:
+ result = new MergeResult<>(Collections.emptyList());
+ result.setContainsConflicts(true);
+ break;
}
}
if (ignoreConflicts) {
if (result.containsConflicts() && !ignoreConflicts) {
unmergedPaths.add(currentPath);
}
- modifiedFiles.add(currentPath);
- addCheckoutMetadata(cleanupMetadata, currentPath,
- attributes[T_OURS]);
- addCheckoutMetadata(checkoutMetadata, currentPath,
- attributes[T_THEIRS]);
+ workTreeUpdater.markAsModified(currentPath);
+ // Entry is null - only adds the metadata.
+ addToCheckout(currentPath, null, attributes);
} else if (modeO != modeT) {
// OURS or THEIRS has been deleted
if (((modeO != 0 && !tw.idEqual(T_BASE, T_OURS)) || (modeT != 0 && !tw
}
private boolean isIndexDirty() {
- if (inCore)
+ if (inCore) {
return false;
+ }
final int modeI = tw.getRawMode(T_INDEX);
final int modeO = tw.getRawMode(T_OURS);
// Index entry has to match ours to be considered clean
final boolean isDirty = nonTree(modeI)
&& !(modeO == modeI && tw.idEqual(T_INDEX, T_OURS));
- if (isDirty)
+ if (isDirty) {
failingPaths
.put(tw.getPathString(), MergeFailureReason.DIRTY_INDEX);
+ }
return isDirty;
}
private boolean isWorktreeDirty(WorkingTreeIterator work,
DirCacheEntry ourDce) throws IOException {
- if (work == null)
+ if (work == null) {
return false;
+ }
final int modeF = tw.getRawMode(T_FILE);
final int modeO = tw.getRawMode(T_OURS);
// Worktree entry has to match ours to be considered clean
boolean isDirty;
- if (ourDce != null)
+ if (ourDce != null) {
isDirty = work.isModified(ourDce, true, reader);
- else {
+ } else {
isDirty = work.isModeDifferent(modeO);
- if (!isDirty && nonTree(modeF))
+ if (!isDirty && nonTree(modeF)) {
isDirty = !tw.idEqual(T_FILE, T_OURS);
+ }
}
// Ignore existing empty directories
if (isDirty && modeF == FileMode.TYPE_TREE
- && modeO == FileMode.TYPE_MISSING)
+ && modeO == FileMode.TYPE_MISSING) {
isDirty = false;
- if (isDirty)
+ }
+ if (isDirty) {
failingPaths.put(tw.getPathString(),
MergeFailureReason.DIRTY_WORKTREE);
+ }
return isDirty;
}
* @param theirs
* @param result
* @param attributes
- * @throws FileNotFoundException
* @throws IOException
*/
private void updateIndex(CanonicalTreeParser base,
CanonicalTreeParser ours, CanonicalTreeParser theirs,
MergeResult<RawText> result, Attributes attributes)
- throws FileNotFoundException,
- IOException {
+ throws IOException {
TemporaryBuffer rawMerged = null;
try {
rawMerged = doMerge(result);
// No conflict occurred, the file will contain fully merged content.
// The index will be populated with the new merged version.
- DirCacheEntry dce = new DirCacheEntry(tw.getPathString());
-
+ Instant lastModified =
+ mergedFile == null ? null : nonNullRepo().getFS().lastModifiedInstant(mergedFile);
// Set the mode for the new content. Fall back to REGULAR_FILE if
// we can't merge modes of OURS and THEIRS.
int newMode = mergeFileModes(tw.getRawMode(0), tw.getRawMode(1),
tw.getRawMode(2));
- dce.setFileMode(newMode == FileMode.MISSING.getBits()
- ? FileMode.REGULAR_FILE : FileMode.fromBits(newMode));
- if (mergedFile != null) {
- dce.setLastModified(
- nonNullRepo().getFS().lastModifiedInstant(mergedFile));
- dce.setLength((int) mergedFile.length());
- }
- dce.setObjectId(insertMergeResult(rawMerged, attributes));
- builder.add(dce);
+ FileMode mode = newMode == FileMode.MISSING.getBits()
+ ? FileMode.REGULAR_FILE : FileMode.fromBits(newMode);
+ workTreeUpdater.insertToIndex(rawMerged.openInputStream(), tw.getPathString().getBytes(UTF_8), mode,
+ DirCacheEntry.STAGE_0, lastModified, (int) rawMerged.length(),
+ attributes.get(Constants.ATTR_MERGE));
} finally {
if (rawMerged != null) {
rawMerged.destroy();
* @param attributes
* the files .gitattributes entries
* @return the working tree file to which the merged content was written.
- * @throws FileNotFoundException
* @throws IOException
*/
private File writeMergedFile(TemporaryBuffer rawMerged,
Attributes attributes)
- throws FileNotFoundException, IOException {
+ throws IOException {
File workTree = nonNullRepo().getWorkTree();
FS fs = nonNullRepo().getFS();
File of = new File(workTree, tw.getPathString());
File parentFolder = of.getParentFile();
+ EolStreamType eol = workTreeUpdater.detectCheckoutStreamType(attributes);
if (!fs.exists(parentFolder)) {
parentFolder.mkdirs();
}
- EolStreamType streamType = EolStreamTypeUtil.detectStreamType(
- OperationType.CHECKOUT_OP, workingTreeOptions,
- attributes);
- try (OutputStream os = EolStreamTypeUtil.wrapOutputStream(
- new BufferedOutputStream(new FileOutputStream(of)),
- streamType)) {
- rawMerged.writeTo(os, null);
- }
+ StreamLoader contentLoader = WorkTreeUpdater.createStreamLoader(rawMerged::openInputStream,
+ rawMerged.length());
+ workTreeUpdater.updateFileWithContent(contentLoader,
+ eol, tw.getSmudgeCommand(attributes), of.getPath(), of, false);
return of;
}
private TemporaryBuffer doMerge(MergeResult<RawText> result)
throws IOException {
TemporaryBuffer.LocalFile buf = new TemporaryBuffer.LocalFile(
- db != null ? nonNullRepo().getDirectory() : null, inCoreLimit);
+ db != null ? nonNullRepo().getDirectory() : null, workTreeUpdater.getInCoreFileSizeLimit());
boolean success = false;
try {
new MergeFormatter().formatMerge(buf, result,
return buf;
}
- private ObjectId insertMergeResult(TemporaryBuffer buf,
- Attributes attributes) throws IOException {
- InputStream in = buf.openInputStream();
- try (LfsInputStream is = LfsFactory.getInstance().applyCleanFilter(
- getRepository(), in,
- buf.length(), attributes.get(Constants.ATTR_MERGE))) {
- return getObjectInserter().insert(OBJ_BLOB, is.getLength(), is);
- }
- }
-
/**
* Try to merge filemodes. If only ours or theirs have changed the mode
* (compared to base) we choose that one. If ours and theirs have equal
* conflict
*/
private int mergeFileModes(int modeB, int modeO, int modeT) {
- if (modeO == modeT)
+ if (modeO == modeT) {
return modeO;
- if (modeB == modeO)
+ }
+ if (modeB == modeO) {
// Base equal to Ours -> chooses Theirs if that is not missing
return (modeT == FileMode.MISSING.getBits()) ? modeO : modeT;
- if (modeB == modeT)
+ }
+ if (modeB == modeT) {
// Base equal to Theirs -> chooses Ours if that is not missing
return (modeO == FileMode.MISSING.getBits()) ? modeT : modeO;
+ }
return FileMode.MISSING.getBits();
}
private RawText getRawText(ObjectId id,
Attributes attributes)
throws IOException, BinaryBlobException {
- if (id.equals(ObjectId.zeroId()))
- return new RawText(new byte[] {});
+ if (id.equals(ObjectId.zeroId())) {
+ return new RawText(new byte[]{});
+ }
ObjectLoader loader = LfsFactory.getInstance().applySmudgeFilter(
getRepository(), reader.open(id, OBJ_BLOB),
* superset of the files listed by {@link #getUnmergedPaths()}.
*/
public List<String> getModifiedFiles() {
- return modifiedFiles;
+ return workTreeUpdater != null ? workTreeUpdater.getModifiedFiles() : modifiedFiles;
}
/**
* for this path.
*/
public Map<String, DirCacheEntry> getToBeCheckedOut() {
- return toBeCheckedOut;
+ return workTreeUpdater.getToBeCheckedOut();
}
/**
*/
public void setDirCache(DirCache dc) {
this.dircache = dc;
- implicitDirCache = false;
}
/**
protected boolean mergeTrees(AbstractTreeIterator baseTree,
RevTree headTree, RevTree mergeTree, boolean ignoreConflicts)
throws IOException {
+ try {
+ workTreeUpdater = inCore ?
+ WorkTreeUpdater.createInCoreWorkTreeUpdater(db, dircache, getObjectInserter()) :
+ WorkTreeUpdater.createWorkTreeUpdater(db, dircache);
+ dircache = workTreeUpdater.getLockedDirCache();
+ tw = new NameConflictTreeWalk(db, reader);
+
+ tw.addTree(baseTree);
+ tw.setHead(tw.addTree(headTree));
+ tw.addTree(mergeTree);
+ DirCacheBuildIterator buildIt = workTreeUpdater.createDirCacheBuildIterator();
+ int dciPos = tw.addTree(buildIt);
+ if (workingTreeIterator != null) {
+ tw.addTree(workingTreeIterator);
+ workingTreeIterator.setDirCacheIterator(tw, dciPos);
+ } else {
+ tw.setFilter(TreeFilter.ANY_DIFF);
+ }
- builder = dircache.builder();
- DirCacheBuildIterator buildIt = new DirCacheBuildIterator(builder);
-
- tw = new NameConflictTreeWalk(db, reader);
- tw.addTree(baseTree);
- tw.setHead(tw.addTree(headTree));
- tw.addTree(mergeTree);
- int dciPos = tw.addTree(buildIt);
- if (workingTreeIterator != null) {
- tw.addTree(workingTreeIterator);
- workingTreeIterator.setDirCacheIterator(tw, dciPos);
- } else {
- tw.setFilter(TreeFilter.ANY_DIFF);
- }
+ if (!mergeTreeWalk(tw, ignoreConflicts)) {
+ return false;
+ }
- if (!mergeTreeWalk(tw, ignoreConflicts)) {
+ workTreeUpdater.writeWorkTreeChanges(true);
+ if (getUnmergedPaths().isEmpty() && !failed()) {
+ WorkTreeUpdater.Result result = workTreeUpdater.writeIndexChanges();
+ resultTree = result.treeId;
+ modifiedFiles = result.modifiedFiles;
+ for (String f : result.failedToDelete) {
+ failingPaths.put(f, MergeFailureReason.COULD_NOT_DELETE);
+ }
+ return result.failedToDelete.isEmpty();
+ }
+ resultTree = null;
return false;
- }
-
- if (!inCore) {
- // No problem found. The only thing left to be done is to
- // checkout all files from "theirs" which have been selected to
- // go into the new index.
- checkout();
-
- // All content-merges are successfully done. If we can now write the
- // new index we are on quite safe ground. Even if the checkout of
- // files coming from "theirs" fails the user can work around such
- // failures by checking out the index again.
- if (!builder.commit()) {
- cleanUp();
- throw new IndexWriteException();
+ } finally {
+ if(modifiedFiles.isEmpty()) {
+ modifiedFiles = workTreeUpdater.getModifiedFiles();
}
- builder = null;
-
- } else {
- builder.finish();
- builder = null;
+ workTreeUpdater.close();
+ workTreeUpdater = null;
}
-
- if (getUnmergedPaths().isEmpty() && !failed()) {
- resultTree = dircache.writeTree(getObjectInserter());
- return true;
- }
- resultTree = null;
- return false;
}
/**
boolean hasAttributeNodeProvider = treeWalk
.getAttributesNodeProvider() != null;
while (treeWalk.next()) {
- Attributes[] attributes = { NO_ATTRIBUTES, NO_ATTRIBUTES,
- NO_ATTRIBUTES };
+ Attributes[] attributes = {NO_ATTRIBUTES, NO_ATTRIBUTES,
+ NO_ATTRIBUTES};
if (hasAttributeNodeProvider) {
attributes[T_BASE] = treeWalk.getAttributes(T_BASE);
attributes[T_OURS] = treeWalk.getAttributes(T_OURS);
hasWorkingTreeIterator ? treeWalk.getTree(T_FILE,
WorkingTreeIterator.class) : null,
ignoreConflicts, attributes)) {
- cleanUp();
+ workTreeUpdater.revertModifiedFiles();
return false;
}
- if (treeWalk.isSubtree() && enterSubtree)
+ if (treeWalk.isSubtree() && enterSubtree) {
treeWalk.enterSubtree();
+ }
}
return true;
}
--- /dev/null
+/*
+ * Copyright (C) 2022, Google Inc. and others
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Distribution License v. 1.0 which is available at
+ * https://www.eclipse.org/org/documents/edl-v10.php.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+package org.eclipse.jgit.util;
+
+import static org.eclipse.jgit.lib.Constants.OBJ_BLOB;
+
+import java.io.BufferedInputStream;
+import java.io.Closeable;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.nio.file.Files;
+import java.nio.file.StandardCopyOption;
+import java.time.Instant;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import org.eclipse.jgit.annotations.Nullable;
+import org.eclipse.jgit.attributes.Attribute;
+import org.eclipse.jgit.attributes.Attributes;
+import org.eclipse.jgit.dircache.DirCache;
+import org.eclipse.jgit.dircache.DirCacheBuildIterator;
+import org.eclipse.jgit.dircache.DirCacheBuilder;
+import org.eclipse.jgit.dircache.DirCacheCheckout;
+import org.eclipse.jgit.dircache.DirCacheCheckout.CheckoutMetadata;
+import org.eclipse.jgit.dircache.DirCacheEntry;
+import org.eclipse.jgit.errors.IndexWriteException;
+import org.eclipse.jgit.errors.LargeObjectException;
+import org.eclipse.jgit.errors.NoWorkTreeException;
+import org.eclipse.jgit.internal.JGitText;
+import org.eclipse.jgit.lib.Config;
+import org.eclipse.jgit.lib.ConfigConstants;
+import org.eclipse.jgit.lib.Constants;
+import org.eclipse.jgit.lib.CoreConfig.EolStreamType;
+import org.eclipse.jgit.lib.FileMode;
+import org.eclipse.jgit.lib.ObjectId;
+import org.eclipse.jgit.lib.ObjectInserter;
+import org.eclipse.jgit.lib.ObjectLoader;
+import org.eclipse.jgit.lib.ObjectReader;
+import org.eclipse.jgit.lib.ObjectStream;
+import org.eclipse.jgit.lib.Repository;
+import org.eclipse.jgit.treewalk.TreeWalk.OperationType;
+import org.eclipse.jgit.treewalk.WorkingTreeOptions;
+import org.eclipse.jgit.util.LfsFactory.LfsInputStream;
+import org.eclipse.jgit.util.io.EolStreamTypeUtil;
+
+/**
+ * Handles work tree updates on both the checkout and the index.
+ * <p>
+ * You should use a single instance for all of your file changes. In case of an error, make sure
+ * your instance is released, and initiate a new one if necessary.
+ */
+public class WorkTreeUpdater implements Closeable {
+
+ /**
+ * The result of writing the index changes.
+ */
+ public static class Result {
+
+ /**
+ * Files modified during this operation.
+ */
+ public List<String> modifiedFiles = new LinkedList<>();
+
+ /**
+ * Files in this list were failed to be deleted.
+ */
+ public List<String> failedToDelete = new LinkedList<>();
+
+ /**
+ * Modified tree ID if any, or null otherwise.
+ */
+ public ObjectId treeId = null;
+ }
+
+ Result result = new Result();
+
+ /**
+ * The repository this handler operates on.
+ */
+ @Nullable
+ private final Repository repo;
+
+ /**
+ * Set to true if this operation should work in-memory. The repo's dircache and
+ * workingtree are not touched by this method. Eventually needed files are
+ * created as temporary files and a new empty, in-memory dircache will be
+ * used instead the repo's one. Often used for bare repos where the repo
+ * doesn't even have a workingtree and dircache.
+ */
+ private final boolean inCore;
+
+ private final ObjectInserter inserter;
+ private final ObjectReader reader;
+ private DirCache dirCache;
+ private boolean implicitDirCache = false;
+
+ /**
+ * Builder to update the dir cache during this operation.
+ */
+ private DirCacheBuilder builder = null;
+
+ /**
+ * The {@link WorkingTreeOptions} are needed to determine line endings for affected files.
+ */
+ private WorkingTreeOptions workingTreeOptions;
+
+ /**
+ * The size limit (bytes) which controls a file to be stored in {@code Heap} or {@code LocalFile}
+ * during the operation.
+ */
+ private int inCoreFileSizeLimit;
+
+ /**
+ * If the operation has nothing to do for a file but check it out at the end of the operation, it
+ * can be added here.
+ */
+ private final Map<String, DirCacheEntry> toBeCheckedOut = new HashMap<>();
+
+ /**
+ * Files in this list will be deleted from the local copy at the end of the operation.
+ */
+ private final TreeMap<String, File> toBeDeleted = new TreeMap<>();
+
+ /**
+ * Keeps {@link CheckoutMetadata} for {@link #checkout()}.
+ */
+ private Map<String, CheckoutMetadata> checkoutMetadata;
+
+ /**
+ * Keeps {@link CheckoutMetadata} for {@link #revertModifiedFiles()}.
+ */
+ private Map<String, CheckoutMetadata> cleanupMetadata;
+
+ /**
+ * Whether the changes were successfully written
+ */
+ private boolean indexChangesWritten = false;
+
+ /**
+ * @param repo the {@link org.eclipse.jgit.lib.Repository}.
+ * @param dirCache if set, use the provided dir cache. Otherwise, use the default repository one
+ */
+ private WorkTreeUpdater(
+ Repository repo,
+ DirCache dirCache) {
+ this.repo = repo;
+ this.dirCache = dirCache;
+
+ this.inCore = false;
+ this.inserter = repo.newObjectInserter();
+ this.reader = inserter.newReader();
+ this.workingTreeOptions = repo.getConfig().get(WorkingTreeOptions.KEY);
+ this.checkoutMetadata = new HashMap<>();
+ this.cleanupMetadata = new HashMap<>();
+ this.inCoreFileSizeLimit = setInCoreFileSizeLimit(repo.getConfig());
+ }
+
+ /**
+ * @param repo the {@link org.eclipse.jgit.lib.Repository}.
+ * @param dirCache if set, use the provided dir cache. Otherwise, use the default repository one
+ * @return an IO handler.
+ */
+ public static WorkTreeUpdater createWorkTreeUpdater(Repository repo, DirCache dirCache) {
+ return new WorkTreeUpdater(repo, dirCache);
+ }
+
+ /**
+ * @param repo the {@link org.eclipse.jgit.lib.Repository}.
+ * @param dirCache if set, use the provided dir cache. Otherwise, creates a new one
+ * @param oi to use for writing the modified objects with.
+ */
+ private WorkTreeUpdater(
+ Repository repo,
+ DirCache dirCache,
+ ObjectInserter oi) {
+ this.repo = repo;
+ this.dirCache = dirCache;
+ this.inserter = oi;
+
+ this.inCore = true;
+ this.reader = oi.newReader();
+ if (repo != null) {
+ this.inCoreFileSizeLimit = setInCoreFileSizeLimit(repo.getConfig());
+ }
+ }
+
+ /**
+ * @param repo the {@link org.eclipse.jgit.lib.Repository}.
+ * @param dirCache if set, use the provided dir cache. Otherwise, creates a new one
+ * @param oi to use for writing the modified objects with.
+ * @return an IO handler.
+ */
+ public static WorkTreeUpdater createInCoreWorkTreeUpdater(Repository repo, DirCache dirCache,
+ ObjectInserter oi) {
+ return new WorkTreeUpdater(repo, dirCache, oi);
+ }
+
+ /**
+ * Something that can supply an {@link InputStream}.
+ */
+ public interface StreamSupplier {
+
+ /**
+ * Loads the input stream.
+ *
+ * @return the loaded stream
+ * @throws IOException if any reading error occurs
+ */
+ InputStream load() throws IOException;
+ }
+
+ /**
+ * We write the patch result to a {@link org.eclipse.jgit.util.TemporaryBuffer} and then use
+ * {@link DirCacheCheckout}.getContent() to run the result through the CR-LF and smudge filters.
+ * DirCacheCheckout needs an ObjectLoader, not a TemporaryBuffer, so this class bridges between
+ * the two, making any Stream provided by a {@link StreamSupplier} look like an ordinary git blob
+ * to DirCacheCheckout.
+ */
+ public static class StreamLoader extends ObjectLoader {
+
+ private final StreamSupplier data;
+
+ private final long size;
+
+ private StreamLoader(StreamSupplier data, long length) {
+ this.data = data;
+ this.size = length;
+ }
+
+ @Override
+ public int getType() {
+ return Constants.OBJ_BLOB;
+ }
+
+ @Override
+ public long getSize() {
+ return size;
+ }
+
+ @Override
+ public boolean isLarge() {
+ return true;
+ }
+
+ @Override
+ public byte[] getCachedBytes() throws LargeObjectException {
+ throw new LargeObjectException();
+ }
+
+ @Override
+ public ObjectStream openStream() throws IOException {
+ return new ObjectStream.Filter(getType(), getSize(), new BufferedInputStream(data.load()));
+ }
+ }
+
+ /**
+ * Creates stream loader for the given supplier.
+ *
+ * @param supplier to wrap
+ * @param length of the supplied content
+ * @return the result stream loader
+ */
+ public static StreamLoader createStreamLoader(StreamSupplier supplier, long length) {
+ return new StreamLoader(supplier, length);
+ }
+
+ private static int setInCoreFileSizeLimit(Config config) {
+ return config.getInt(
+ ConfigConstants.CONFIG_MERGE_SECTION, ConfigConstants.CONFIG_KEY_IN_CORE_LIMIT, 10 << 20);
+ }
+
+ /**
+ * Gets the size limit for in-core files in this config.
+ *
+ * @return the size
+ */
+ public int getInCoreFileSizeLimit() {
+ return inCoreFileSizeLimit;
+ }
+
+ /**
+ * Gets dir cache for the repo. Locked if not inCore.
+ *
+ * @return the result dir cache
+ * @throws IOException is case the dir cache cannot be read
+ */
+ public DirCache getLockedDirCache() throws IOException {
+ if (dirCache == null) {
+ implicitDirCache = true;
+ if (inCore) {
+ dirCache = DirCache.newInCore();
+ } else {
+ dirCache = nonNullNonBareRepo().lockDirCache();
+ }
+ }
+ if (builder == null) {
+ builder = dirCache.builder();
+ }
+ return dirCache;
+ }
+
+ /**
+ * Creates build iterator for the handler's builder.
+ *
+ * @return the iterator
+ */
+ public DirCacheBuildIterator createDirCacheBuildIterator() {
+ return new DirCacheBuildIterator(builder);
+ }
+
+ /**
+ * Writes the changes to the WorkTree (but not the index).
+ *
+ * @param shouldCheckoutTheirs before committing the changes
+ * @throws IOException if any of the writes fail
+ */
+ public void writeWorkTreeChanges(boolean shouldCheckoutTheirs) throws IOException {
+ handleDeletedFiles();
+
+ if (inCore) {
+ builder.finish();
+ return;
+ }
+ if (shouldCheckoutTheirs) {
+ // No problem found. The only thing left to be done is to
+ // check out all files from "theirs" which have been selected to
+ // go into the new index.
+ checkout();
+ }
+
+ // All content operations are successfully done. If we can now write the
+ // new index we are on quite safe ground. Even if the checkout of
+ // files coming from "theirs" fails the user can work around such
+ // failures by checking out the index again.
+ if (!builder.commit()) {
+ revertModifiedFiles();
+ throw new IndexWriteException();
+ }
+ }
+
+ /**
+ * Writes the changes to the index.
+ *
+ * @return the Result of the operation.
+ * @throws IOException if any of the writes fail
+ */
+ public Result writeIndexChanges() throws IOException {
+ result.treeId = getLockedDirCache().writeTree(inserter);
+ indexChangesWritten = true;
+ return result;
+ }
+
+ /**
+ * Adds a {@link DirCacheEntry} for direct checkout and remembers its {@link CheckoutMetadata}.
+ *
+ * @param path of the entry
+ * @param entry to add
+ * @param cleanupStreamType to use for the cleanup metadata
+ * @param cleanupSmudgeCommand to use for the cleanup metadata
+ * @param checkoutStreamType to use for the checkout metadata
+ * @param checkoutSmudgeCommand to use for the checkout metadata
+ * @since 6.1
+ */
+ public void addToCheckout(
+ String path, DirCacheEntry entry, EolStreamType cleanupStreamType,
+ String cleanupSmudgeCommand, EolStreamType checkoutStreamType, String checkoutSmudgeCommand) {
+ if (entry != null) {
+ // In some cases, we just want to add the metadata.
+ toBeCheckedOut.put(path, entry);
+ }
+ addCheckoutMetadata(cleanupMetadata, path, cleanupStreamType, cleanupSmudgeCommand);
+ addCheckoutMetadata(checkoutMetadata, path, checkoutStreamType, checkoutSmudgeCommand);
+ }
+
+ /**
+ * Get a map which maps the paths of files which have to be checked out because the operation
+ * created new fully-merged content for this file into the index.
+ *
+ * <p>This means: the operation wrote a new stage 0 entry for this path.</p>
+ *
+ * @return the map
+ */
+ public Map<String, DirCacheEntry> getToBeCheckedOut() {
+ return toBeCheckedOut;
+ }
+
+ /**
+ * Deletes the given file
+ * <p>
+ * Note the actual deletion is only done in {@link #writeWorkTreeChanges}
+ *
+ * @param path of the file to be deleted
+ * @param file to be deleted
+ * @param streamType to use for cleanup metadata
+ * @param smudgeCommand to use for cleanup metadata
+ * @throws IOException if the file cannot be deleted
+ */
+ public void deleteFile(String path, File file, EolStreamType streamType, String smudgeCommand)
+ throws IOException {
+ toBeDeleted.put(path, file);
+ if (file != null && file.isFile()) {
+ addCheckoutMetadata(cleanupMetadata, path, streamType, smudgeCommand);
+ }
+ }
+
+ /**
+ * Remembers the {@link CheckoutMetadata} for the given path; it may be needed in {@link
+ * #checkout()} or in {@link #revertModifiedFiles()}.
+ *
+ * @param map to add the metadata to
+ * @param path of the current node
+ * @param streamType to use for the metadata
+ * @param smudgeCommand to use for the metadata
+ * @since 6.1
+ */
+ private void addCheckoutMetadata(
+ Map<String, CheckoutMetadata> map, String path, EolStreamType streamType,
+ String smudgeCommand) {
+ if (inCore || map == null) {
+ return;
+ }
+ map.put(path, new CheckoutMetadata(streamType, smudgeCommand));
+ }
+
+ /**
+ * Detects if CRLF conversion has been configured.
+ * <p></p>
+ * See {@link EolStreamTypeUtil#detectStreamType} for more info.
+ *
+ * @param attributes of the file for which the type is to be detected
+ * @return the detected type
+ */
+ public EolStreamType detectCheckoutStreamType(Attributes attributes) {
+ if (inCore) {
+ return null;
+ }
+ return EolStreamTypeUtil.detectStreamType(
+ OperationType.CHECKOUT_OP, workingTreeOptions, attributes);
+ }
+
+ private void handleDeletedFiles() {
+ // Iterate in reverse so that "folder/file" is deleted before
+ // "folder". Otherwise, this could result in a failing path because
+ // of a non-empty directory, for which delete() would fail.
+ for (String path : toBeDeleted.descendingKeySet()) {
+ File file = inCore ? null : toBeDeleted.get(path);
+ if (file != null && !file.delete()) {
+ if (!file.isDirectory()) {
+ result.failedToDelete.add(path);
+ }
+ }
+ }
+ }
+
+ /**
+ * Marks the given path as modified in the operation.
+ *
+ * @param path to mark as modified
+ */
+ public void markAsModified(String path) {
+ result.modifiedFiles.add(path);
+ }
+
+ /**
+ * Gets the list of files which were modified in this operation.
+ *
+ * @return the list
+ */
+ public List<String> getModifiedFiles() {
+ return result.modifiedFiles;
+ }
+
+ private void checkout() throws NoWorkTreeException, IOException {
+ // Iterate in reverse so that "folder/file" is deleted before
+ // "folder". Otherwise, this could result in a failing path because
+ // of a non-empty directory, for which delete() would fail.
+ for (Map.Entry<String, DirCacheEntry> entry : toBeCheckedOut.entrySet()) {
+ DirCacheEntry dirCacheEntry = entry.getValue();
+ if (dirCacheEntry.getFileMode() == FileMode.GITLINK) {
+ new File(nonNullNonBareRepo().getWorkTree(), entry.getKey()).mkdirs();
+ } else {
+ DirCacheCheckout.checkoutEntry(
+ repo, dirCacheEntry, reader, false, checkoutMetadata.get(entry.getKey()));
+ result.modifiedFiles.add(entry.getKey());
+ }
+ }
+ }
+
+ /**
+ * Reverts any uncommitted changes in the worktree. We know that for all modified files the
+ * old content was in the old index and the index contained only stage 0. In case if inCore
+ * operation just clear the history of modified files.
+ *
+ * @throws java.io.IOException in case the cleaning up failed
+ */
+ public void revertModifiedFiles() throws IOException {
+ if (inCore) {
+ result.modifiedFiles.clear();
+ return;
+ }
+ if (indexChangesWritten) {
+ return;
+ }
+ for (String path : result.modifiedFiles) {
+ DirCacheEntry entry = dirCache.getEntry(path);
+ if (entry != null) {
+ DirCacheCheckout.checkoutEntry(
+ repo, entry, reader, false, cleanupMetadata.get(path));
+ }
+ }
+ }
+
+ @Override
+ public void close() throws IOException {
+ if (implicitDirCache) {
+ dirCache.unlock();
+ }
+ }
+
+ /**
+ * Updates the file in the checkout with the given content.
+ *
+ * @param resultStreamLoader with the content to be updated
+ * @param streamType for parsing the content
+ * @param smudgeCommand for formatting the content
+ * @param path of the file to be updated
+ * @param file to be updated
+ * @param safeWrite whether the content should be written to a buffer first
+ * @throws IOException if the {@link CheckoutMetadata} cannot be determined
+ */
+ public void updateFileWithContent(
+ StreamLoader resultStreamLoader,
+ EolStreamType streamType,
+ String smudgeCommand,
+ String path,
+ File file,
+ boolean safeWrite)
+ throws IOException {
+ if (inCore) {
+ return;
+ }
+ CheckoutMetadata checkoutMetadata = new CheckoutMetadata(streamType, smudgeCommand);
+ if (safeWrite) {
+ try (org.eclipse.jgit.util.TemporaryBuffer buffer =
+ new org.eclipse.jgit.util.TemporaryBuffer.LocalFile(null)) {
+ // Write to a buffer and copy to the file only if everything was fine.
+ DirCacheCheckout.getContent(
+ repo, path, checkoutMetadata, resultStreamLoader, null, buffer);
+ InputStream bufIn = buffer.openInputStream();
+ Files.copy(bufIn, file.toPath(), StandardCopyOption.REPLACE_EXISTING);
+ }
+ return;
+ }
+ OutputStream outputStream = new FileOutputStream(file);
+ DirCacheCheckout.getContent(
+ repo, path, checkoutMetadata, resultStreamLoader, null, outputStream);
+
+ }
+
+ /**
+ * Creates a path with the given content, and adds it to the specified stage to the index builder
+ *
+ * @param inputStream with the content to be updated
+ * @param path of the file to be updated
+ * @param fileMode of the modified file
+ * @param entryStage of the new entry
+ * @param lastModified instant of the modified file
+ * @param len of the content
+ * @param lfsAttribute for checking for LFS enablement
+ * @return the entry which was added to the index
+ * @throws IOException if inserting the content fails
+ */
+ public DirCacheEntry insertToIndex(
+ InputStream inputStream,
+ byte[] path,
+ FileMode fileMode,
+ int entryStage,
+ Instant lastModified,
+ int len,
+ Attribute lfsAttribute) throws IOException {
+ StreamLoader contentLoader = createStreamLoader(() -> inputStream, len);
+ return insertToIndex(contentLoader, path, fileMode, entryStage, lastModified, len,
+ lfsAttribute);
+ }
+
+ /**
+ * Creates a path with the given content, and adds it to the specified stage to the index builder
+ *
+ * @param resultStreamLoader with the content to be updated
+ * @param path of the file to be updated
+ * @param fileMode of the modified file
+ * @param entryStage of the new entry
+ * @param lastModified instant of the modified file
+ * @param len of the content
+ * @param lfsAttribute for checking for LFS enablement
+ * @return the entry which was added to the index
+ * @throws IOException if inserting the content fails
+ */
+ public DirCacheEntry insertToIndex(
+ StreamLoader resultStreamLoader,
+ byte[] path,
+ FileMode fileMode,
+ int entryStage,
+ Instant lastModified,
+ int len,
+ Attribute lfsAttribute) throws IOException {
+ return addExistingToIndex(insertResult(resultStreamLoader, lfsAttribute),
+ path, fileMode, entryStage, lastModified, len);
+ }
+
+ /**
+ * Adds a path with the specified stage to the index builder
+ *
+ * @param objectId of the existing object to add
+ * @param path of the modified file
+ * @param fileMode of the modified file
+ * @param entryStage of the new entry
+ * @param lastModified instant of the modified file
+ * @param len of the modified file content
+ * @return the entry which was added to the index
+ */
+ public DirCacheEntry addExistingToIndex(
+ ObjectId objectId,
+ byte[] path,
+ FileMode fileMode,
+ int entryStage,
+ Instant lastModified,
+ int len) {
+ DirCacheEntry dce = new DirCacheEntry(path, entryStage);
+ dce.setFileMode(fileMode);
+ if (lastModified != null) {
+ dce.setLastModified(lastModified);
+ }
+ dce.setLength(inCore ? 0 : len);
+
+ dce.setObjectId(objectId);
+ builder.add(dce);
+ return dce;
+ }
+
+ private ObjectId insertResult(StreamLoader resultStreamLoader, Attribute lfsAttribute)
+ throws IOException {
+ try (LfsInputStream is =
+ org.eclipse.jgit.util.LfsFactory.getInstance()
+ .applyCleanFilter(
+ repo,
+ resultStreamLoader.data.load(),
+ resultStreamLoader.size,
+ lfsAttribute)) {
+ return inserter.insert(OBJ_BLOB, is.getLength(), is);
+ }
+ }
+
+ /**
+ * Gets non-null repository instance
+ *
+ * @return non-null repository instance
+ * @throws java.lang.NullPointerException if the handler was constructed without a repository.
+ */
+ private Repository nonNullRepo() throws NullPointerException {
+ if (repo == null) {
+ throw new NullPointerException(JGitText.get().repositoryIsRequired);
+ }
+ return repo;
+ }
+
+
+ /**
+ * Gets non-null and non-bare repository instance
+ *
+ * @return non-null and non-bare repository instance
+ * @throws java.lang.NullPointerException if the handler was constructed without a repository.
+ * @throws NoWorkTreeException if the handler was constructed with a bare repository
+ */
+ private Repository nonNullNonBareRepo() throws NullPointerException, NoWorkTreeException {
+ if (nonNullRepo().isBare()) {
+ throw new NoWorkTreeException();
+ }
+ return repo;
+ }
+}
\ No newline at end of file