* out what format is desired
* Note - doesn't check for core-supported formats!
* Note - doesn't check for OOXML-supported formats
+ *
+ * @param poifsDir the directory node to be inspected
+ * @return the format specific text extractor
+ *
+ * @throws IOException when the format specific extraction fails because of invalid entires
*/
public static POITextExtractor createExtractor(DirectoryNode poifsDir) throws IOException {
if (poifsDir.hasEntry("WordDocument")) {
* If there are no embedded documents, you'll get back an
* empty array. Otherwise, you'll get one open
* {@link POITextExtractor} for each embedded file.
+ *
+ * @param ext the extractor holding the directory to start parsing
+ * @param dirs a list to be filled with directory references holding embedded
+ * @param nonPOIFS a list to be filled with streams which aren't based on POIFS entries
+ *
+ * @throws IOException when the format specific extraction fails because of invalid entires
*/
public static void identifyEmbeddedResources(POIOLE2TextExtractor ext, List<Entry> dirs, List<InputStream> nonPOIFS) throws IOException {
// Find all the embedded directories
/**
* Returns the TrailerStream, which is at the root of the
* tree of Streams.
+ *
+ * @return the TrailerStream
*/
public TrailerStream getTrailerStream() { return trailer; }
+
/**
* Returns all the top level streams, which are the streams
* pointed to by the TrailerStream.
+ *
+ * @return the top level streams
*/
public Stream[] getTopLevelStreams() { return trailer.getPointedToStreams(); }
+
public long getDocumentSize() { return docSize; }
/**
}
}
}
-
- /**
- * For testing only
- */
- public static void main(String args[]) throws Exception {
- NPOIFSFileSystem pfs = new NPOIFSFileSystem(new File(args[0]));
- HDGFDiagram hdgf = new HDGFDiagram(pfs);
- hdgf.debug();
- hdgf.close();
- pfs.close();
- }
}
/**
* Compress the given input stream, returning the array of bytes
* of the compressed input
+ *
+ * @param src the compression source byte
+ * @return the compressed stream as bytes
+ *
+ * @throws IOException when the InputStream can't be read
*/
public byte[] compress(InputStream src) throws IOException {
ByteArrayOutputStream res = new ByteArrayOutputStream();
/**
* Performs the Visio compatible streaming LZW compression.
+ *
+ * @param src the input bytes for the compression
+ * @param res the OutputStream which receives the compressed bytes
+ *
+ * @throws IOException when the InputStream can't be read
+ * or the OutputStream can't be written to
*/
public void compress(InputStream src, OutputStream res) throws IOException {
HDGFLZWCompressor c = new HDGFLZWCompressor();
public byte[] _getContents() {
return contents;
}
+
public ChunkHeader getHeader() {
return header;
}
- /** Gets the separator between this chunk and the next, if it exists */
+
+ /**
+ * Gets the separator between this chunk and the next, if it exists
+ *
+ * @return the separator
+ */
public ChunkSeparator getSeparator() {
return separator;
}
- /** Gets the trailer for this chunk, if it exists */
+
+ /**
+ * Gets the trailer for this chunk, if it exists
+ *
+ * @return the trailer
+ */
public ChunkTrailer getTrailer() {
return trailer;
}
+
/**
* Gets the command definitions, which define and describe much
* of the data held by the chunk.
+ *
+ * @return the command definitions
*/
public CommandDefinition[] getCommandDefinitions() {
return commandDefinitions;
}
+
public Command[] getCommands() {
return commands;
}
+
/**
* Get the name of the chunk, as found from the CommandDefinitions
+ *
+ * @return the name of the chunk
*/
public String getName() {
return name;
/**
* Returns the size of the chunk, including any
* headers, trailers and separators.
+ *
+ * @return the size of the chunk
*/
public int getOnDiskSize() {
int size = header.getSizeInBytes() + contents.length;
/**
* Creates the appropriate chunk at the given location.
- * @param data
- * @param offset
+ *
+ * @param data the chunk bytes
+ * @param offset the offset into the chunk bytes array to start reading from
+ *
+ * @return the new Chunk
*/
public Chunk createChunk(byte[] data, int offset) {
// Create the header
/**
* Creates the appropriate ChunkHeader for the Chunk Header at
* the given location, for the given document version.
+ *
+ * @param documentVersion the documentVersion - 4 and higher is supported
+ * @param data the chunk data
+ * @param offset the start offset in the chunk data
+ * @return the ChunkHeader
*/
public static ChunkHeader createChunkHeader(int documentVersion, byte[] data, int offset) {
if(documentVersion >= 6) {
/**
* Returns the size of a chunk header for the given document version.
+ *
+ * @param documentVersion the documentVersion - 4 and higher is supported
+ *
+ * @return the header size
*/
public static int getHeaderSize(int documentVersion) {
if(documentVersion > 6) {
public abstract Charset getChunkCharset();
/**
- * Returns the ID/IX of the chunk
+ * @return the ID/IX of the chunk
*/
public int getId() {
return id;
/**
* Returns the length of the trunk, excluding the length
* of the header, trailer or separator.
+ *
+ * @return the length of the trunk
*/
public int getLength() {
return length;
/**
* Returns the type of the chunk, which affects the
* mandatory information
+ *
+ * @return the type of the chunk
*/
public int getType() {
return type;