You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

WorkingTreeIterator.java 35KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230
  1. /*
  2. * Copyright (C) 2008, Shawn O. Pearce <spearce@spearce.org>
  3. * Copyright (C) 2010, Christian Halstrick <christian.halstrick@sap.com>
  4. * Copyright (C) 2010, Matthias Sohn <matthias.sohn@sap.com>
  5. * Copyright (C) 2012-2013, Robin Rosenberg
  6. * and other copyright owners as documented in the project's IP log.
  7. *
  8. * This program and the accompanying materials are made available
  9. * under the terms of the Eclipse Distribution License v1.0 which
  10. * accompanies this distribution, is reproduced below, and is
  11. * available at http://www.eclipse.org/org/documents/edl-v10.php
  12. *
  13. * All rights reserved.
  14. *
  15. * Redistribution and use in source and binary forms, with or
  16. * without modification, are permitted provided that the following
  17. * conditions are met:
  18. *
  19. * - Redistributions of source code must retain the above copyright
  20. * notice, this list of conditions and the following disclaimer.
  21. *
  22. * - Redistributions in binary form must reproduce the above
  23. * copyright notice, this list of conditions and the following
  24. * disclaimer in the documentation and/or other materials provided
  25. * with the distribution.
  26. *
  27. * - Neither the name of the Eclipse Foundation, Inc. nor the
  28. * names of its contributors may be used to endorse or promote
  29. * products derived from this software without specific prior
  30. * written permission.
  31. *
  32. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
  33. * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
  34. * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  35. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  36. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  37. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  38. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  39. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  40. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  41. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  42. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  43. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  44. * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45. */
  46. package org.eclipse.jgit.treewalk;
  47. import java.io.ByteArrayInputStream;
  48. import java.io.File;
  49. import java.io.FileInputStream;
  50. import java.io.FileNotFoundException;
  51. import java.io.IOException;
  52. import java.io.InputStream;
  53. import java.nio.ByteBuffer;
  54. import java.nio.CharBuffer;
  55. import java.nio.charset.CharacterCodingException;
  56. import java.nio.charset.CharsetEncoder;
  57. import java.security.MessageDigest;
  58. import java.text.MessageFormat;
  59. import java.util.Arrays;
  60. import java.util.Collections;
  61. import java.util.Comparator;
  62. import org.eclipse.jgit.api.errors.JGitInternalException;
  63. import org.eclipse.jgit.diff.RawText;
  64. import org.eclipse.jgit.dircache.DirCache;
  65. import org.eclipse.jgit.dircache.DirCacheEntry;
  66. import org.eclipse.jgit.dircache.DirCacheIterator;
  67. import org.eclipse.jgit.errors.CorruptObjectException;
  68. import org.eclipse.jgit.errors.MissingObjectException;
  69. import org.eclipse.jgit.errors.NoWorkTreeException;
  70. import org.eclipse.jgit.ignore.IgnoreNode;
  71. import org.eclipse.jgit.ignore.IgnoreRule;
  72. import org.eclipse.jgit.internal.JGitText;
  73. import org.eclipse.jgit.lib.Constants;
  74. import org.eclipse.jgit.lib.CoreConfig;
  75. import org.eclipse.jgit.lib.CoreConfig.CheckStat;
  76. import org.eclipse.jgit.lib.CoreConfig.SymLinks;
  77. import org.eclipse.jgit.lib.FileMode;
  78. import org.eclipse.jgit.lib.ObjectId;
  79. import org.eclipse.jgit.lib.ObjectLoader;
  80. import org.eclipse.jgit.lib.ObjectReader;
  81. import org.eclipse.jgit.lib.Repository;
  82. import org.eclipse.jgit.submodule.SubmoduleWalk;
  83. import org.eclipse.jgit.util.FS;
  84. import org.eclipse.jgit.util.IO;
  85. import org.eclipse.jgit.util.RawParseUtils;
  86. import org.eclipse.jgit.util.io.EolCanonicalizingInputStream;
  87. /**
  88. * Walks a working directory tree as part of a {@link TreeWalk}.
  89. * <p>
  90. * Most applications will want to use the standard implementation of this
  91. * iterator, {@link FileTreeIterator}, as that does all IO through the standard
  92. * <code>java.io</code> package. Plugins for a Java based IDE may however wish
  93. * to create their own implementations of this class to allow traversal of the
  94. * IDE's project space, as well as benefit from any caching the IDE may have.
  95. *
  96. * @see FileTreeIterator
  97. */
  98. public abstract class WorkingTreeIterator extends AbstractTreeIterator {
  99. /** An empty entry array, suitable for {@link #init(Entry[])}. */
  100. protected static final Entry[] EOF = {};
  101. /** Size we perform file IO in if we have to read and hash a file. */
  102. static final int BUFFER_SIZE = 2048;
  103. /**
  104. * Maximum size of files which may be read fully into memory for performance
  105. * reasons.
  106. */
  107. private static final long MAXIMUM_FILE_SIZE_TO_READ_FULLY = 65536;
  108. /** Inherited state of this iterator, describing working tree, etc. */
  109. private final IteratorState state;
  110. /** The {@link #idBuffer()} for the current entry. */
  111. private byte[] contentId;
  112. /** Index within {@link #entries} that {@link #contentId} came from. */
  113. private int contentIdFromPtr;
  114. /** List of entries obtained from the subclass. */
  115. private Entry[] entries;
  116. /** Total number of entries in {@link #entries} that are valid. */
  117. private int entryCnt;
  118. /** Current position within {@link #entries}. */
  119. private int ptr;
  120. /** If there is a .gitignore file present, the parsed rules from it. */
  121. private IgnoreNode ignoreNode;
  122. /** Repository that is the root level being iterated over */
  123. protected Repository repository;
  124. /** Cached canonical length, initialized from {@link #idBuffer()} */
  125. private long canonLen = -1;
  126. /** The offset of the content id in {@link #idBuffer()} */
  127. private int contentIdOffset;
  128. /**
  129. * Create a new iterator with no parent.
  130. *
  131. * @param options
  132. * working tree options to be used
  133. */
  134. protected WorkingTreeIterator(WorkingTreeOptions options) {
  135. super();
  136. state = new IteratorState(options);
  137. }
  138. /**
  139. * Create a new iterator with no parent and a prefix.
  140. * <p>
  141. * The prefix path supplied is inserted in front of all paths generated by
  142. * this iterator. It is intended to be used when an iterator is being
  143. * created for a subsection of an overall repository and needs to be
  144. * combined with other iterators that are created to run over the entire
  145. * repository namespace.
  146. *
  147. * @param prefix
  148. * position of this iterator in the repository tree. The value
  149. * may be null or the empty string to indicate the prefix is the
  150. * root of the repository. A trailing slash ('/') is
  151. * automatically appended if the prefix does not end in '/'.
  152. * @param options
  153. * working tree options to be used
  154. */
  155. protected WorkingTreeIterator(final String prefix,
  156. WorkingTreeOptions options) {
  157. super(prefix);
  158. state = new IteratorState(options);
  159. }
  160. /**
  161. * Create an iterator for a subtree of an existing iterator.
  162. *
  163. * @param p
  164. * parent tree iterator.
  165. */
  166. protected WorkingTreeIterator(final WorkingTreeIterator p) {
  167. super(p);
  168. state = p.state;
  169. }
  170. /**
  171. * Initialize this iterator for the root level of a repository.
  172. * <p>
  173. * This method should only be invoked after calling {@link #init(Entry[])},
  174. * and only for the root iterator.
  175. *
  176. * @param repo
  177. * the repository.
  178. */
  179. protected void initRootIterator(Repository repo) {
  180. repository = repo;
  181. Entry entry;
  182. if (ignoreNode instanceof PerDirectoryIgnoreNode)
  183. entry = ((PerDirectoryIgnoreNode) ignoreNode).entry;
  184. else
  185. entry = null;
  186. ignoreNode = new RootIgnoreNode(entry, repo);
  187. }
  188. /**
  189. * Define the matching {@link DirCacheIterator}, to optimize ObjectIds.
  190. *
  191. * Once the DirCacheIterator has been set this iterator must only be
  192. * advanced by the TreeWalk that is supplied, as it assumes that itself and
  193. * the corresponding DirCacheIterator are positioned on the same file path
  194. * whenever {@link #idBuffer()} is invoked.
  195. *
  196. * @param walk
  197. * the walk that will be advancing this iterator.
  198. * @param treeId
  199. * index of the matching {@link DirCacheIterator}.
  200. */
  201. public void setDirCacheIterator(TreeWalk walk, int treeId) {
  202. state.walk = walk;
  203. state.dirCacheTree = treeId;
  204. }
  205. @Override
  206. public boolean hasId() {
  207. if (contentIdFromPtr == ptr)
  208. return true;
  209. return (mode & FileMode.TYPE_MASK) == FileMode.TYPE_FILE;
  210. }
  211. @Override
  212. public byte[] idBuffer() {
  213. if (contentIdFromPtr == ptr)
  214. return contentId;
  215. if (state.walk != null) {
  216. // If there is a matching DirCacheIterator, we can reuse
  217. // its idBuffer, but only if we appear to be clean against
  218. // the cached index information for the path.
  219. //
  220. DirCacheIterator i = state.walk.getTree(state.dirCacheTree,
  221. DirCacheIterator.class);
  222. if (i != null) {
  223. DirCacheEntry ent = i.getDirCacheEntry();
  224. if (ent != null && compareMetadata(ent) == MetadataDiff.EQUAL) {
  225. contentIdOffset = i.idOffset();
  226. contentIdFromPtr = ptr;
  227. return contentId = i.idBuffer();
  228. }
  229. contentIdOffset = 0;
  230. } else {
  231. contentIdOffset = 0;
  232. }
  233. }
  234. switch (mode & FileMode.TYPE_MASK) {
  235. case FileMode.TYPE_SYMLINK:
  236. case FileMode.TYPE_FILE:
  237. contentIdFromPtr = ptr;
  238. return contentId = idBufferBlob(entries[ptr]);
  239. case FileMode.TYPE_GITLINK:
  240. contentIdFromPtr = ptr;
  241. return contentId = idSubmodule(entries[ptr]);
  242. }
  243. return zeroid;
  244. }
  245. /**
  246. * Get submodule id for given entry.
  247. *
  248. * @param e
  249. * @return non-null submodule id
  250. */
  251. protected byte[] idSubmodule(Entry e) {
  252. if (repository == null)
  253. return zeroid;
  254. File directory;
  255. try {
  256. directory = repository.getWorkTree();
  257. } catch (NoWorkTreeException nwte) {
  258. return zeroid;
  259. }
  260. return idSubmodule(directory, e);
  261. }
  262. /**
  263. * Get submodule id using the repository at the location of the entry
  264. * relative to the directory.
  265. *
  266. * @param directory
  267. * @param e
  268. * @return non-null submodule id
  269. */
  270. protected byte[] idSubmodule(File directory, Entry e) {
  271. final Repository submoduleRepo;
  272. try {
  273. submoduleRepo = SubmoduleWalk.getSubmoduleRepository(directory,
  274. e.getName());
  275. } catch (IOException exception) {
  276. return zeroid;
  277. }
  278. if (submoduleRepo == null)
  279. return zeroid;
  280. final ObjectId head;
  281. try {
  282. head = submoduleRepo.resolve(Constants.HEAD);
  283. } catch (IOException exception) {
  284. return zeroid;
  285. } finally {
  286. submoduleRepo.close();
  287. }
  288. if (head == null)
  289. return zeroid;
  290. final byte[] id = new byte[Constants.OBJECT_ID_LENGTH];
  291. head.copyRawTo(id, 0);
  292. return id;
  293. }
  294. private static final byte[] digits = { '0', '1', '2', '3', '4', '5', '6',
  295. '7', '8', '9' };
  296. private static final byte[] hblob = Constants
  297. .encodedTypeString(Constants.OBJ_BLOB);
  298. private byte[] idBufferBlob(final Entry e) {
  299. try {
  300. final InputStream is = e.openInputStream();
  301. if (is == null)
  302. return zeroid;
  303. try {
  304. state.initializeDigestAndReadBuffer();
  305. final long len = e.getLength();
  306. InputStream filteredIs = possiblyFilteredInputStream(e, is, len);
  307. return computeHash(filteredIs, canonLen);
  308. } finally {
  309. safeClose(is);
  310. }
  311. } catch (IOException err) {
  312. // Can't read the file? Don't report the failure either.
  313. return zeroid;
  314. }
  315. }
  316. private InputStream possiblyFilteredInputStream(final Entry e,
  317. final InputStream is, final long len) throws IOException {
  318. if (!mightNeedCleaning()) {
  319. canonLen = len;
  320. return is;
  321. }
  322. if (len <= MAXIMUM_FILE_SIZE_TO_READ_FULLY) {
  323. ByteBuffer rawbuf = IO.readWholeStream(is, (int) len);
  324. byte[] raw = rawbuf.array();
  325. int n = rawbuf.limit();
  326. if (!isBinary(raw, n)) {
  327. rawbuf = filterClean(raw, n);
  328. raw = rawbuf.array();
  329. n = rawbuf.limit();
  330. }
  331. canonLen = n;
  332. return new ByteArrayInputStream(raw, 0, n);
  333. }
  334. if (isBinary(e)) {
  335. canonLen = len;
  336. return is;
  337. }
  338. final InputStream lenIs = filterClean(e.openInputStream());
  339. try {
  340. canonLen = computeLength(lenIs);
  341. } finally {
  342. safeClose(lenIs);
  343. }
  344. return filterClean(is);
  345. }
  346. private static void safeClose(final InputStream in) {
  347. try {
  348. in.close();
  349. } catch (IOException err2) {
  350. // Suppress any error related to closing an input
  351. // stream. We don't care, we should not have any
  352. // outstanding data to flush or anything like that.
  353. }
  354. }
  355. private boolean mightNeedCleaning() {
  356. switch (getOptions().getAutoCRLF()) {
  357. case FALSE:
  358. default:
  359. return false;
  360. case TRUE:
  361. case INPUT:
  362. return true;
  363. }
  364. }
  365. private static boolean isBinary(byte[] content, int sz) {
  366. return RawText.isBinary(content, sz);
  367. }
  368. private static boolean isBinary(Entry entry) throws IOException {
  369. InputStream in = entry.openInputStream();
  370. try {
  371. return RawText.isBinary(in);
  372. } finally {
  373. safeClose(in);
  374. }
  375. }
  376. private static ByteBuffer filterClean(byte[] src, int n)
  377. throws IOException {
  378. InputStream in = new ByteArrayInputStream(src);
  379. try {
  380. return IO.readWholeStream(filterClean(in), n);
  381. } finally {
  382. safeClose(in);
  383. }
  384. }
  385. private static InputStream filterClean(InputStream in) {
  386. return new EolCanonicalizingInputStream(in, true);
  387. }
  388. /**
  389. * Returns the working tree options used by this iterator.
  390. *
  391. * @return working tree options
  392. */
  393. public WorkingTreeOptions getOptions() {
  394. return state.options;
  395. }
  396. @Override
  397. public int idOffset() {
  398. return contentIdOffset;
  399. }
  400. @Override
  401. public void reset() {
  402. if (!first()) {
  403. ptr = 0;
  404. if (!eof())
  405. parseEntry();
  406. }
  407. }
  408. @Override
  409. public boolean first() {
  410. return ptr == 0;
  411. }
  412. @Override
  413. public boolean eof() {
  414. return ptr == entryCnt;
  415. }
  416. @Override
  417. public void next(final int delta) throws CorruptObjectException {
  418. ptr += delta;
  419. if (!eof()) {
  420. parseEntry();
  421. }
  422. }
  423. @Override
  424. public void back(final int delta) throws CorruptObjectException {
  425. ptr -= delta;
  426. parseEntry();
  427. }
  428. private void parseEntry() {
  429. final Entry e = entries[ptr];
  430. mode = e.getMode().getBits();
  431. final int nameLen = e.encodedNameLen;
  432. ensurePathCapacity(pathOffset + nameLen, pathOffset);
  433. System.arraycopy(e.encodedName, 0, path, pathOffset, nameLen);
  434. pathLen = pathOffset + nameLen;
  435. canonLen = -1;
  436. }
  437. /**
  438. * Get the raw byte length of this entry.
  439. *
  440. * @return size of this file, in bytes.
  441. */
  442. public long getEntryLength() {
  443. return current().getLength();
  444. }
  445. /**
  446. * Get the filtered input length of this entry
  447. *
  448. * @return size of the content, in bytes
  449. * @throws IOException
  450. */
  451. public long getEntryContentLength() throws IOException {
  452. if (canonLen == -1) {
  453. long rawLen = getEntryLength();
  454. if (rawLen == 0)
  455. canonLen = 0;
  456. InputStream is = current().openInputStream();
  457. try {
  458. // canonLen gets updated here
  459. possiblyFilteredInputStream(current(), is, current()
  460. .getLength());
  461. } finally {
  462. safeClose(is);
  463. }
  464. }
  465. return canonLen;
  466. }
  467. /**
  468. * Get the last modified time of this entry.
  469. *
  470. * @return last modified time of this file, in milliseconds since the epoch
  471. * (Jan 1, 1970 UTC).
  472. */
  473. public long getEntryLastModified() {
  474. return current().getLastModified();
  475. }
  476. /**
  477. * Obtain an input stream to read the file content.
  478. * <p>
  479. * Efficient implementations are not required. The caller will usually
  480. * obtain the stream only once per entry, if at all.
  481. * <p>
  482. * The input stream should not use buffering if the implementation can avoid
  483. * it. The caller will buffer as necessary to perform efficient block IO
  484. * operations.
  485. * <p>
  486. * The caller will close the stream once complete.
  487. *
  488. * @return a stream to read from the file.
  489. * @throws IOException
  490. * the file could not be opened for reading.
  491. */
  492. public InputStream openEntryStream() throws IOException {
  493. InputStream rawis = current().openInputStream();
  494. if (mightNeedCleaning())
  495. return filterClean(rawis);
  496. else
  497. return rawis;
  498. }
  499. /**
  500. * Determine if the current entry path is ignored by an ignore rule.
  501. *
  502. * @return true if the entry was ignored by an ignore rule file.
  503. * @throws IOException
  504. * a relevant ignore rule file exists but cannot be read.
  505. */
  506. public boolean isEntryIgnored() throws IOException {
  507. return isEntryIgnored(pathLen);
  508. }
  509. /**
  510. * Determine if the entry path is ignored by an ignore rule.
  511. *
  512. * @param pLen
  513. * the length of the path in the path buffer.
  514. * @return true if the entry is ignored by an ignore rule.
  515. * @throws IOException
  516. * a relevant ignore rule file exists but cannot be read.
  517. */
  518. protected boolean isEntryIgnored(final int pLen) throws IOException {
  519. IgnoreNode rules = getIgnoreNode();
  520. if (rules != null) {
  521. // The ignore code wants path to start with a '/' if possible.
  522. // If we have the '/' in our path buffer because we are inside
  523. // a subdirectory include it in the range we convert to string.
  524. //
  525. int pOff = pathOffset;
  526. if (0 < pOff)
  527. pOff--;
  528. String p = TreeWalk.pathOf(path, pOff, pLen);
  529. switch (rules.isIgnored(p, FileMode.TREE.equals(mode))) {
  530. case IGNORED:
  531. return true;
  532. case NOT_IGNORED:
  533. return false;
  534. case CHECK_PARENT:
  535. break;
  536. }
  537. }
  538. if (parent instanceof WorkingTreeIterator)
  539. return ((WorkingTreeIterator) parent).isEntryIgnored(pLen);
  540. return false;
  541. }
  542. private IgnoreNode getIgnoreNode() throws IOException {
  543. if (ignoreNode instanceof PerDirectoryIgnoreNode)
  544. ignoreNode = ((PerDirectoryIgnoreNode) ignoreNode).load();
  545. return ignoreNode;
  546. }
  547. private static final Comparator<Entry> ENTRY_CMP = new Comparator<Entry>() {
  548. public int compare(final Entry o1, final Entry o2) {
  549. final byte[] a = o1.encodedName;
  550. final byte[] b = o2.encodedName;
  551. final int aLen = o1.encodedNameLen;
  552. final int bLen = o2.encodedNameLen;
  553. int cPos;
  554. for (cPos = 0; cPos < aLen && cPos < bLen; cPos++) {
  555. final int cmp = (a[cPos] & 0xff) - (b[cPos] & 0xff);
  556. if (cmp != 0)
  557. return cmp;
  558. }
  559. if (cPos < aLen)
  560. return (a[cPos] & 0xff) - lastPathChar(o2);
  561. if (cPos < bLen)
  562. return lastPathChar(o1) - (b[cPos] & 0xff);
  563. return lastPathChar(o1) - lastPathChar(o2);
  564. }
  565. };
  566. static int lastPathChar(final Entry e) {
  567. return e.getMode() == FileMode.TREE ? '/' : '\0';
  568. }
  569. /**
  570. * Constructor helper.
  571. *
  572. * @param list
  573. * files in the subtree of the work tree this iterator operates
  574. * on
  575. */
  576. protected void init(final Entry[] list) {
  577. // Filter out nulls, . and .. as these are not valid tree entries,
  578. // also cache the encoded forms of the path names for efficient use
  579. // later on during sorting and iteration.
  580. //
  581. entries = list;
  582. int i, o;
  583. final CharsetEncoder nameEncoder = state.nameEncoder;
  584. for (i = 0, o = 0; i < entries.length; i++) {
  585. final Entry e = entries[i];
  586. if (e == null)
  587. continue;
  588. final String name = e.getName();
  589. if (".".equals(name) || "..".equals(name)) //$NON-NLS-1$ //$NON-NLS-2$
  590. continue;
  591. if (Constants.DOT_GIT.equals(name))
  592. continue;
  593. if (Constants.DOT_GIT_IGNORE.equals(name))
  594. ignoreNode = new PerDirectoryIgnoreNode(e);
  595. if (i != o)
  596. entries[o] = e;
  597. e.encodeName(nameEncoder);
  598. o++;
  599. }
  600. entryCnt = o;
  601. Arrays.sort(entries, 0, entryCnt, ENTRY_CMP);
  602. contentIdFromPtr = -1;
  603. ptr = 0;
  604. if (!eof())
  605. parseEntry();
  606. }
  607. /**
  608. * Obtain the current entry from this iterator.
  609. *
  610. * @return the currently selected entry.
  611. */
  612. protected Entry current() {
  613. return entries[ptr];
  614. }
  615. /**
  616. * The result of a metadata-comparison between the current entry and a
  617. * {@link DirCacheEntry}
  618. */
  619. public enum MetadataDiff {
  620. /**
  621. * The entries are equal by metaData (mode, length,
  622. * modification-timestamp) or the <code>assumeValid</code> attribute of
  623. * the index entry is set
  624. */
  625. EQUAL,
  626. /**
  627. * The entries are not equal by metaData (mode, length) or the
  628. * <code>isUpdateNeeded</code> attribute of the index entry is set
  629. */
  630. DIFFER_BY_METADATA,
  631. /** index entry is smudged - can't use that entry for comparison */
  632. SMUDGED,
  633. /**
  634. * The entries are equal by metaData (mode, length) but differ by
  635. * modification-timestamp.
  636. */
  637. DIFFER_BY_TIMESTAMP
  638. }
  639. /**
  640. * Is the file mode of the current entry different than the given raw mode?
  641. *
  642. * @param rawMode
  643. * @return true if different, false otherwise
  644. */
  645. public boolean isModeDifferent(final int rawMode) {
  646. // Determine difference in mode-bits of file and index-entry. In the
  647. // bitwise presentation of modeDiff we'll have a '1' when the two modes
  648. // differ at this position.
  649. int modeDiff = getEntryRawMode() ^ rawMode;
  650. if (modeDiff == 0)
  651. return false;
  652. // Do not rely on filemode differences in case of symbolic links
  653. if (getOptions().getSymLinks() == SymLinks.FALSE)
  654. if (FileMode.SYMLINK.equals(rawMode))
  655. return false;
  656. // Ignore the executable file bits if WorkingTreeOptions tell me to
  657. // do so. Ignoring is done by setting the bits representing a
  658. // EXECUTABLE_FILE to '0' in modeDiff
  659. if (!state.options.isFileMode())
  660. modeDiff &= ~FileMode.EXECUTABLE_FILE.getBits();
  661. return modeDiff != 0;
  662. }
  663. /**
  664. * Compare the metadata (mode, length, modification-timestamp) of the
  665. * current entry and a {@link DirCacheEntry}
  666. *
  667. * @param entry
  668. * the {@link DirCacheEntry} to compare with
  669. * @return a {@link MetadataDiff} which tells whether and how the entries
  670. * metadata differ
  671. */
  672. public MetadataDiff compareMetadata(DirCacheEntry entry) {
  673. if (entry.isAssumeValid())
  674. return MetadataDiff.EQUAL;
  675. if (entry.isUpdateNeeded())
  676. return MetadataDiff.DIFFER_BY_METADATA;
  677. if (!entry.isSmudged() && entry.getLength() != (int) getEntryLength())
  678. return MetadataDiff.DIFFER_BY_METADATA;
  679. if (isModeDifferent(entry.getRawMode()))
  680. return MetadataDiff.DIFFER_BY_METADATA;
  681. // Git under windows only stores seconds so we round the timestamp
  682. // Java gives us if it looks like the timestamp in index is seconds
  683. // only. Otherwise we compare the timestamp at millisecond precision,
  684. // unless core.checkstat is set to "minimal", in which case we only
  685. // compare the whole second part.
  686. long cacheLastModified = entry.getLastModified();
  687. long fileLastModified = getEntryLastModified();
  688. long lastModifiedMillis = fileLastModified % 1000;
  689. long cacheMillis = cacheLastModified % 1000;
  690. if (getOptions().getCheckStat() == CheckStat.MINIMAL) {
  691. fileLastModified = fileLastModified - lastModifiedMillis;
  692. cacheLastModified = cacheLastModified - cacheMillis;
  693. } else if (cacheMillis == 0)
  694. fileLastModified = fileLastModified - lastModifiedMillis;
  695. // Some Java version on Linux return whole seconds only even when
  696. // the file systems supports more precision.
  697. else if (lastModifiedMillis == 0)
  698. cacheLastModified = cacheLastModified - cacheMillis;
  699. if (fileLastModified != cacheLastModified)
  700. return MetadataDiff.DIFFER_BY_TIMESTAMP;
  701. else if (!entry.isSmudged())
  702. // The file is clean when you look at timestamps.
  703. return MetadataDiff.EQUAL;
  704. else
  705. return MetadataDiff.SMUDGED;
  706. }
  707. /**
  708. * Checks whether this entry differs from a given entry from the
  709. * {@link DirCache}.
  710. *
  711. * File status information is used and if status is same we consider the
  712. * file identical to the state in the working directory. Native git uses
  713. * more stat fields than we have accessible in Java.
  714. *
  715. * @param entry
  716. * the entry from the dircache we want to compare against
  717. * @param forceContentCheck
  718. * True if the actual file content should be checked if
  719. * modification time differs.
  720. * @return true if content is most likely different.
  721. * @deprecated Use {@link #isModified(DirCacheEntry, boolean, ObjectReader)}
  722. */
  723. @Deprecated
  724. public boolean isModified(DirCacheEntry entry, boolean forceContentCheck) {
  725. try {
  726. return isModified(entry, forceContentCheck,
  727. repository.newObjectReader());
  728. } catch (IOException e) {
  729. throw new JGitInternalException(e.getMessage(), e);
  730. }
  731. }
  732. /**
  733. * Checks whether this entry differs from a given entry from the
  734. * {@link DirCache}.
  735. *
  736. * File status information is used and if status is same we consider the
  737. * file identical to the state in the working directory. Native git uses
  738. * more stat fields than we have accessible in Java.
  739. *
  740. * @param entry
  741. * the entry from the dircache we want to compare against
  742. * @param forceContentCheck
  743. * True if the actual file content should be checked if
  744. * modification time differs.
  745. * @param reader
  746. * access to repository objects if necessary. Should not be null.
  747. * @return true if content is most likely different.
  748. * @throws IOException
  749. * @since 3.3
  750. */
  751. public boolean isModified(DirCacheEntry entry, boolean forceContentCheck,
  752. ObjectReader reader) throws IOException {
  753. MetadataDiff diff = compareMetadata(entry);
  754. switch (diff) {
  755. case DIFFER_BY_TIMESTAMP:
  756. if (forceContentCheck)
  757. // But we are told to look at content even though timestamps
  758. // tell us about modification
  759. return contentCheck(entry, reader);
  760. else
  761. // We are told to assume a modification if timestamps differs
  762. return true;
  763. case SMUDGED:
  764. // The file is clean by timestamps but the entry was smudged.
  765. // Lets do a content check
  766. return contentCheck(entry, reader);
  767. case EQUAL:
  768. return false;
  769. case DIFFER_BY_METADATA:
  770. if (mode == FileMode.SYMLINK.getBits())
  771. return contentCheck(entry, reader);
  772. return true;
  773. default:
  774. throw new IllegalStateException(MessageFormat.format(
  775. JGitText.get().unexpectedCompareResult, diff.name()));
  776. }
  777. }
  778. /**
  779. * Get the file mode to use for the current entry when it is to be updated
  780. * in the index.
  781. *
  782. * @param indexIter
  783. * {@link DirCacheIterator} positioned at the same entry as this
  784. * iterator or null if no {@link DirCacheIterator} is available
  785. * at this iterator's current entry
  786. * @return index file mode
  787. */
  788. public FileMode getIndexFileMode(final DirCacheIterator indexIter) {
  789. final FileMode wtMode = getEntryFileMode();
  790. if (indexIter == null)
  791. return wtMode;
  792. if (getOptions().isFileMode())
  793. return wtMode;
  794. final FileMode iMode = indexIter.getEntryFileMode();
  795. if (FileMode.REGULAR_FILE == wtMode
  796. && FileMode.EXECUTABLE_FILE == iMode)
  797. return iMode;
  798. if (FileMode.EXECUTABLE_FILE == wtMode
  799. && FileMode.REGULAR_FILE == iMode)
  800. return iMode;
  801. return wtMode;
  802. }
  803. /**
  804. * Compares the entries content with the content in the filesystem.
  805. * Unsmudges the entry when it is detected that it is clean.
  806. *
  807. * @param entry
  808. * the entry to be checked
  809. * @param reader
  810. * acccess to repository data if necessary
  811. * @return <code>true</code> if the content doesn't match,
  812. * <code>false</code> if it matches
  813. * @throws IOException
  814. */
  815. private boolean contentCheck(DirCacheEntry entry, ObjectReader reader)
  816. throws IOException {
  817. if (getEntryObjectId().equals(entry.getObjectId())) {
  818. // Content has not changed
  819. // We know the entry can't be racily clean because it's still clean.
  820. // Therefore we unsmudge the entry!
  821. // If by any chance we now unsmudge although we are still in the
  822. // same time-slot as the last modification to the index file the
  823. // next index write operation will smudge again.
  824. // Caution: we are unsmudging just by setting the length of the
  825. // in-memory entry object. It's the callers task to detect that we
  826. // have modified the entry and to persist the modified index.
  827. entry.setLength((int) getEntryLength());
  828. return false;
  829. } else {
  830. if (mode == FileMode.SYMLINK.getBits())
  831. return !new File(readContentAsNormalizedString(current()))
  832. .equals(new File((readContentAsNormalizedString(entry,
  833. reader))));
  834. // Content differs: that's a real change, perhaps
  835. if (reader == null) // deprecated use, do no further checks
  836. return true;
  837. switch (getOptions().getAutoCRLF()) {
  838. case INPUT:
  839. case TRUE:
  840. InputStream dcIn = null;
  841. try {
  842. ObjectLoader loader = reader.open(entry.getObjectId());
  843. if (loader == null)
  844. return true;
  845. // We need to compute the length, but only if it is not
  846. // a binary stream.
  847. dcIn = new EolCanonicalizingInputStream(
  848. loader.openStream(), true, true /* abort if binary */);
  849. long dcInLen;
  850. try {
  851. dcInLen = computeLength(dcIn);
  852. } catch (EolCanonicalizingInputStream.IsBinaryException e) {
  853. return true;
  854. } finally {
  855. dcIn.close();
  856. }
  857. dcIn = new EolCanonicalizingInputStream(
  858. loader.openStream(), true);
  859. byte[] autoCrLfHash = computeHash(dcIn, dcInLen);
  860. boolean changed = getEntryObjectId().compareTo(
  861. autoCrLfHash, 0) != 0;
  862. return changed;
  863. } catch (IOException e) {
  864. return true;
  865. } finally {
  866. if (dcIn != null)
  867. try {
  868. dcIn.close();
  869. } catch (IOException e) {
  870. // empty
  871. }
  872. }
  873. case FALSE:
  874. break;
  875. }
  876. return true;
  877. }
  878. }
  879. private static String readContentAsNormalizedString(DirCacheEntry entry,
  880. ObjectReader reader) throws MissingObjectException, IOException {
  881. ObjectLoader open = reader.open(entry.getObjectId());
  882. byte[] cachedBytes = open.getCachedBytes();
  883. return FS.detect().normalize(RawParseUtils.decode(cachedBytes));
  884. }
  885. private static String readContentAsNormalizedString(Entry entry) throws IOException {
  886. long length = entry.getLength();
  887. byte[] content = new byte[(int) length];
  888. InputStream is = entry.openInputStream();
  889. IO.readFully(is, content, 0, (int) length);
  890. return FS.detect().normalize(RawParseUtils.decode(content));
  891. }
  892. private static long computeLength(InputStream in) throws IOException {
  893. // Since we only care about the length, use skip. The stream
  894. // may be able to more efficiently wade through its data.
  895. //
  896. long length = 0;
  897. for (;;) {
  898. long n = in.skip(1 << 20);
  899. if (n <= 0)
  900. break;
  901. length += n;
  902. }
  903. return length;
  904. }
  905. private byte[] computeHash(InputStream in, long length) throws IOException {
  906. final MessageDigest contentDigest = state.contentDigest;
  907. final byte[] contentReadBuffer = state.contentReadBuffer;
  908. contentDigest.reset();
  909. contentDigest.update(hblob);
  910. contentDigest.update((byte) ' ');
  911. long sz = length;
  912. if (sz == 0) {
  913. contentDigest.update((byte) '0');
  914. } else {
  915. final int bufn = contentReadBuffer.length;
  916. int p = bufn;
  917. do {
  918. contentReadBuffer[--p] = digits[(int) (sz % 10)];
  919. sz /= 10;
  920. } while (sz > 0);
  921. contentDigest.update(contentReadBuffer, p, bufn - p);
  922. }
  923. contentDigest.update((byte) 0);
  924. for (;;) {
  925. final int r = in.read(contentReadBuffer);
  926. if (r <= 0)
  927. break;
  928. contentDigest.update(contentReadBuffer, 0, r);
  929. sz += r;
  930. }
  931. if (sz != length)
  932. return zeroid;
  933. return contentDigest.digest();
  934. }
  935. /** A single entry within a working directory tree. */
  936. protected static abstract class Entry {
  937. byte[] encodedName;
  938. int encodedNameLen;
  939. void encodeName(final CharsetEncoder enc) {
  940. final ByteBuffer b;
  941. try {
  942. b = enc.encode(CharBuffer.wrap(getName()));
  943. } catch (CharacterCodingException e) {
  944. // This should so never happen.
  945. throw new RuntimeException(MessageFormat.format(
  946. JGitText.get().unencodeableFile, getName()));
  947. }
  948. encodedNameLen = b.limit();
  949. if (b.hasArray() && b.arrayOffset() == 0)
  950. encodedName = b.array();
  951. else
  952. b.get(encodedName = new byte[encodedNameLen]);
  953. }
  954. public String toString() {
  955. return getMode().toString() + " " + getName(); //$NON-NLS-1$
  956. }
  957. /**
  958. * Get the type of this entry.
  959. * <p>
  960. * <b>Note: Efficient implementation required.</b>
  961. * <p>
  962. * The implementation of this method must be efficient. If a subclass
  963. * needs to compute the value they should cache the reference within an
  964. * instance member instead.
  965. *
  966. * @return a file mode constant from {@link FileMode}.
  967. */
  968. public abstract FileMode getMode();
  969. /**
  970. * Get the byte length of this entry.
  971. * <p>
  972. * <b>Note: Efficient implementation required.</b>
  973. * <p>
  974. * The implementation of this method must be efficient. If a subclass
  975. * needs to compute the value they should cache the reference within an
  976. * instance member instead.
  977. *
  978. * @return size of this file, in bytes.
  979. */
  980. public abstract long getLength();
  981. /**
  982. * Get the last modified time of this entry.
  983. * <p>
  984. * <b>Note: Efficient implementation required.</b>
  985. * <p>
  986. * The implementation of this method must be efficient. If a subclass
  987. * needs to compute the value they should cache the reference within an
  988. * instance member instead.
  989. *
  990. * @return time since the epoch (in ms) of the last change.
  991. */
  992. public abstract long getLastModified();
  993. /**
  994. * Get the name of this entry within its directory.
  995. * <p>
  996. * Efficient implementations are not required. The caller will obtain
  997. * the name only once and cache it once obtained.
  998. *
  999. * @return name of the entry.
  1000. */
  1001. public abstract String getName();
  1002. /**
  1003. * Obtain an input stream to read the file content.
  1004. * <p>
  1005. * Efficient implementations are not required. The caller will usually
  1006. * obtain the stream only once per entry, if at all.
  1007. * <p>
  1008. * The input stream should not use buffering if the implementation can
  1009. * avoid it. The caller will buffer as necessary to perform efficient
  1010. * block IO operations.
  1011. * <p>
  1012. * The caller will close the stream once complete.
  1013. *
  1014. * @return a stream to read from the file.
  1015. * @throws IOException
  1016. * the file could not be opened for reading.
  1017. */
  1018. public abstract InputStream openInputStream() throws IOException;
  1019. }
  1020. /** Magic type indicating we know rules exist, but they aren't loaded. */
  1021. private static class PerDirectoryIgnoreNode extends IgnoreNode {
  1022. final Entry entry;
  1023. PerDirectoryIgnoreNode(Entry entry) {
  1024. super(Collections.<IgnoreRule> emptyList());
  1025. this.entry = entry;
  1026. }
  1027. IgnoreNode load() throws IOException {
  1028. IgnoreNode r = new IgnoreNode();
  1029. InputStream in = entry.openInputStream();
  1030. try {
  1031. r.parse(in);
  1032. } finally {
  1033. in.close();
  1034. }
  1035. return r.getRules().isEmpty() ? null : r;
  1036. }
  1037. }
  1038. /** Magic type indicating there may be rules for the top level. */
  1039. private static class RootIgnoreNode extends PerDirectoryIgnoreNode {
  1040. final Repository repository;
  1041. RootIgnoreNode(Entry entry, Repository repository) {
  1042. super(entry);
  1043. this.repository = repository;
  1044. }
  1045. @Override
  1046. IgnoreNode load() throws IOException {
  1047. IgnoreNode r;
  1048. if (entry != null) {
  1049. r = super.load();
  1050. if (r == null)
  1051. r = new IgnoreNode();
  1052. } else {
  1053. r = new IgnoreNode();
  1054. }
  1055. FS fs = repository.getFS();
  1056. String path = repository.getConfig().get(CoreConfig.KEY)
  1057. .getExcludesFile();
  1058. if (path != null) {
  1059. File excludesfile;
  1060. if (path.startsWith("~/")) //$NON-NLS-1$
  1061. excludesfile = fs.resolve(fs.userHome(), path.substring(2));
  1062. else
  1063. excludesfile = fs.resolve(null, path);
  1064. loadRulesFromFile(r, excludesfile);
  1065. }
  1066. File exclude = fs.resolve(repository.getDirectory(),
  1067. Constants.INFO_EXCLUDE);
  1068. loadRulesFromFile(r, exclude);
  1069. return r.getRules().isEmpty() ? null : r;
  1070. }
  1071. private static void loadRulesFromFile(IgnoreNode r, File exclude)
  1072. throws FileNotFoundException, IOException {
  1073. if (FS.DETECTED.exists(exclude)) {
  1074. FileInputStream in = new FileInputStream(exclude);
  1075. try {
  1076. r.parse(in);
  1077. } finally {
  1078. in.close();
  1079. }
  1080. }
  1081. }
  1082. }
  1083. private static final class IteratorState {
  1084. /** Options used to process the working tree. */
  1085. final WorkingTreeOptions options;
  1086. /** File name character encoder. */
  1087. final CharsetEncoder nameEncoder;
  1088. /** Digest computer for {@link #contentId} computations. */
  1089. MessageDigest contentDigest;
  1090. /** Buffer used to perform {@link #contentId} computations. */
  1091. byte[] contentReadBuffer;
  1092. /** TreeWalk with a (supposedly) matching DirCacheIterator. */
  1093. TreeWalk walk;
  1094. /** Position of the matching {@link DirCacheIterator}. */
  1095. int dirCacheTree;
  1096. IteratorState(WorkingTreeOptions options) {
  1097. this.options = options;
  1098. this.nameEncoder = Constants.CHARSET.newEncoder();
  1099. }
  1100. void initializeDigestAndReadBuffer() {
  1101. if (contentDigest == null) {
  1102. contentDigest = Constants.newMessageDigest();
  1103. contentReadBuffer = new byte[BUFFER_SIZE];
  1104. }
  1105. }
  1106. }
  1107. }