*
* @return byte array containing the HSSF-only portions of the POIFS file.
*/
-
- public byte [] serialize() {
- log.log(DEBUG, "Serializing Workbook!");
- byte[] retval = null;
-
- // ArrayList bytes = new ArrayList(records.size());
- int arraysize = getSize();
- int pos = 0;
-
- // for (int k = 0; k < records.size(); k++)
- // {
- // bytes.add((( Record ) records.get(k)).serialize());
- // }
- // for (int k = 0; k < bytes.size(); k++)
- // {
- // arraysize += (( byte [] ) bytes.get(k)).length;
- // }
- retval = new byte[ arraysize ];
- for (int k = 0; k < records.size(); k++) {
-
- // byte[] rec = (( byte [] ) bytes.get(k));
- // System.arraycopy(rec, 0, retval, pos, rec.length);
- Record record = records.get(k);
- // Let's skip RECALCID records, as they are only use for optimization
- if(record.getSid() != RecalcIdRecord.sid || ((RecalcIdRecord)record).isNeeded()) {
- pos += record.serialize(pos, retval); // rec.length;
- }
- }
- log.log(DEBUG, "Exiting serialize workbook");
- return retval;
- }
+ // GJS: Not used so why keep it.
+// public byte [] serialize() {
+// log.log(DEBUG, "Serializing Workbook!");
+// byte[] retval = null;
+//
+//// ArrayList bytes = new ArrayList(records.size());
+// int arraysize = getSize();
+// int pos = 0;
+//
+// retval = new byte[ arraysize ];
+// for (int k = 0; k < records.size(); k++) {
+//
+// Record record = records.get(k);
+//// Let's skip RECALCID records, as they are only use for optimization
+// if(record.getSid() != RecalcIdRecord.sid || ((RecalcIdRecord)record).isNeeded()) {
+// pos += record.serialize(pos, retval); // rec.length;
+// }
+// }
+// log.log(DEBUG, "Exiting serialize workbook");
+// return retval;
+// }
/**
* Serializes all records int the worksheet section into a big byte array. Use
* @param data array of bytes to write this to
*/
- public int serialize(int offset, byte [] data) {
- log.log(DEBUG, "Serializing Workbook with offsets");
+ public int serialize( int offset, byte[] data )
+ {
+ log.log( DEBUG, "Serializing Workbook with offsets" );
- // ArrayList bytes = new ArrayList(records.size());
- // int arraysize = getSize(); // 0;
- int pos = 0;
+ int pos = 0;
- // for (int k = 0; k < records.size(); k++)
- // {
- // bytes.add((( Record ) records.get(k)).serialize());
- //
- // }
- // for (int k = 0; k < bytes.size(); k++)
- // {
- // arraysize += (( byte [] ) bytes.get(k)).length;
- // }
- for (int k = 0; k < records.size(); k++) {
+ SSTRecord sst = null;
+ int sstPos = 0;
+ for ( int k = 0; k < records.size(); k++ )
+ {
- // byte[] rec = (( byte [] ) bytes.get(k));
- // System.arraycopy(rec, 0, data, offset + pos, rec.length);
- Record record = records.get(k);
+ Record record = records.get( k );
// Let's skip RECALCID records, as they are only use for optimization
- if(record.getSid() != RecalcIdRecord.sid || ((RecalcIdRecord)record).isNeeded()) {
- pos += record.serialize(pos + offset, data); // rec.length;
+ if ( record.getSid() != RecalcIdRecord.sid || ( (RecalcIdRecord) record ).isNeeded() )
+ {
+ if (record instanceof SSTRecord)
+ {
+ sst = (SSTRecord)record;
+ sstPos = pos;
+ }
+ if (record.getSid() == ExtSSTRecord.sid && sst != null)
+ {
+ record = sst.createExtSSTRecord(sstPos + offset);
+ }
+ pos += record.serialize( pos + offset, data ); // rec.length;
}
}
- log.log(DEBUG, "Exiting serialize workbook");
+ log.log( DEBUG, "Exiting serialize workbook" );
return pos;
}
- public int getSize() {
+ public int getSize()
+ {
int retval = 0;
- for (int k = 0; k < records.size(); k++) {
- Record record = records.get(k);
+ SSTRecord sst = null;
+ for ( int k = 0; k < records.size(); k++ )
+ {
+ Record record = records.get( k );
// Let's skip RECALCID records, as they are only use for optimization
- if(record.getSid() != RecalcIdRecord.sid || ((RecalcIdRecord)record).isNeeded()) {
- retval += record.getRecordSize();
+ if ( record.getSid() != RecalcIdRecord.sid || ( (RecalcIdRecord) record ).isNeeded() )
+ {
+ if (record instanceof SSTRecord)
+ sst = (SSTRecord)record;
+ if (record.getSid() == ExtSSTRecord.sid && sst != null)
+ retval += sst.calcExtSSTRecordSize();
+ else
+ retval += record.getRecordSize();
}
}
return retval;
field_1_stream_pos = pos;
}
- public void setBucketSSTOffset(short offset)
+ public void setBucketRecordOffset(short offset)
{
field_2_bucket_sst_offset = offset;
}
public short getSid()
{
- return this.sid;
+ return sid;
}
}
private short field_1_strings_per_bucket;
private ArrayList field_2_sst_info;
+
public ExtSSTRecord()
{
field_2_sst_info = new ArrayList();
public int serialize(int offset, byte [] data)
{
LittleEndian.putShort(data, 0 + offset, sid);
-
-// LittleEndian.putShort(data,2,(short)(2 + (getNumInfoRecords() *8)));
- LittleEndian.putShort(data, 2 + offset, ( short ) (2 + (0x3fa - 2)));
- int pos = 4;
+ LittleEndian.putShort(data, 2 + offset, (short)(getRecordSize() - 4));
+ LittleEndian.putShort(data, 4 + offset, field_1_strings_per_bucket);
+ int pos = 6;
for (int k = 0; k < getNumInfoRecords(); k++)
{
System.arraycopy(getInfoRecordAt(k).serialize(), 0, data,
pos + offset, 8);
+ pos += getInfoRecordAt(k).getRecordSize();
}
return getRecordSize();
}
public int getRecordSize()
{
- return 6 + 0x3fa - 2;
+ return 4 + 2 + field_2_sst_info.size() * 8;
}
public short getSid()
{
- return this.sid;
+ return sid;
+ }
+
+ public void setBucketOffsets( int[] bucketAbsoluteOffsets, int[] bucketRelativeOffsets )
+ {
+ this.field_2_sst_info = new ArrayList(bucketAbsoluteOffsets.length);
+ for ( int i = 0; i < bucketAbsoluteOffsets.length; i++ )
+ {
+ ExtSSTInfoSubRecord r = new ExtSSTInfoSubRecord();
+ r.setBucketRecordOffset((short)bucketRelativeOffsets[i]);
+ r.setStreamPos(bucketAbsoluteOffsets[i]);
+ field_2_sst_info.add(r);
+ }
}
+
}
recordOffset += amount;
available -= amount;
}
+
+ public int getRecordOffset()
+ {
+ return recordOffset;
+ }
}
import java.util.Iterator;
import java.util.List;
-import java.util.ArrayList;
/**
* Title: Static String Table Record
* @author Andrew C. Oliver (acoliver at apache dot org)
* @author Marc Johnson (mjohnson at apache dot org)
* @author Glen Stampoultzis (glens at apache.org)
- * @version 2.0-pre
+ *
* @see org.apache.poi.hssf.record.LabelSSTRecord
* @see org.apache.poi.hssf.record.ContinueRecord
*/
private List _record_lengths = null;
private SSTDeserializer deserializer;
+ /** Offsets from the beginning of the SST record (even across continuations) */
+ int[] bucketAbsoluteOffsets;
+ /** Offsets relative the start of the current SST or continue record */
+ int[] bucketRelativeOffsets;
+
/**
* default constructor
*/
-
public SSTRecord()
{
field_1_num_strings = 0;
field_1_num_strings++;
String str = ( string == null ) ? ""
: string;
- int rval = -1;
+ int rval;
UnicodeString ucs = new UnicodeString();
ucs.setString( str );
for ( int k = 0; k < field_3_strings.size(); k++ )
{
buffer.append( " .string_" + k + " = " )
- .append( ( (UnicodeString) field_3_strings
+ .append( ( field_3_strings
.get( new Integer( k ) ) ).toString() ).append( "\n" );
}
buffer.append( "[/SST]\n" );
* The data consists of sets of string data. This string data is
* arranged as follows:
* <P>
- * <CODE>
+ * <CODE><pre>
* short string_length; // length of string data
* byte string_flag; // flag specifying special string
* // handling
* // array is run_count)
* byte[] extension; // optional extension (length of array
* // is extend_length)
- * </CODE>
+ * </pre></CODE>
* <P>
* The string_flag is bit mapped as follows:
* <P>
* Subclasses should implement this so that their data is passed back in a
* byte array.
*
- * @return byte array containing instance data
+ * @return size
*/
public int serialize( int offset, byte[] data )
{
SSTSerializer serializer = new SSTSerializer(
_record_lengths, field_3_strings, getNumStrings(), getNumUniqueStrings() );
- return serializer.serialize( getRecordSize(), offset, data );
+ int bytes = serializer.serialize( getRecordSize(), offset, data );
+ bucketAbsoluteOffsets = serializer.getBucketAbsoluteOffsets();
+ bucketRelativeOffsets = serializer.getBucketRelativeOffsets();
+// for ( int i = 0; i < bucketAbsoluteOffsets.length; i++ )
+// {
+// System.out.println( "bucketAbsoluteOffset = " + bucketAbsoluteOffsets[i] );
+// System.out.println( "bucketRelativeOffset = " + bucketRelativeOffsets[i] );
+// }
+ return bytes;
}
{
deserializer.processContinueRecord( record );
}
+
+ /**
+ * Creates an extended string record based on the current contents of
+ * the current SST record. The offset within the stream to the SST record
+ * is required because the extended string record points directly to the
+ * strings in the SST record.
+ * <p>
+ * NOTE: THIS FUNCTION MUST ONLY BE CALLED AFTER THE SST RECORD HAS BEEN
+ * SERIALIZED.
+ *
+ * @param sstOffset The offset in the stream to the start of the
+ * SST record.
+ * @return The new SST record.
+ */
+ public ExtSSTRecord createExtSSTRecord(int sstOffset)
+ {
+ if (bucketAbsoluteOffsets == null || bucketAbsoluteOffsets == null)
+ throw new IllegalStateException("SST record has not yet been serialized.");
+
+ ExtSSTRecord extSST = new ExtSSTRecord();
+ extSST.setNumStringsPerBucket((short)8);
+ int[] absoluteOffsets = (int[]) bucketAbsoluteOffsets.clone();
+ int[] relativeOffsets = (int[]) bucketRelativeOffsets.clone();
+ for ( int i = 0; i < absoluteOffsets.length; i++ )
+ absoluteOffsets[i] += sstOffset;
+ extSST.setBucketOffsets(absoluteOffsets, relativeOffsets);
+ return extSST;
+ }
+
+ /**
+ * Calculates the size in bytes of the EXTSST record as it would be if the
+ * record was serialized.
+ *
+ * @return The size of the ExtSST record in bytes.
+ */
+ public int calcExtSSTRecordSize()
+ {
+ return 4 + 2 + ((field_3_strings.size() / SSTSerializer.DEFAULT_BUCKET_SIZE) + 1) * 8;
+ }
}
import java.util.Map;
/**
- * Used to calculate the record sizes for a particular record.
+ * Used to calculate the record sizes for a particular record. This kind of
+ * sucks because it's similar to the SST serialization code. In general
+ * the SST serialization code needs to be rewritten.
*
* @author Glen Stampoultzis (glens at apache.org)
*/
private int numUniqueStrings;
private SSTRecordHeader sstRecordHeader;
+ /** Offsets from the beginning of the SST record (even across continuations) */
+ int[] bucketAbsoluteOffsets;
+ /** Offsets relative the start of the current SST or continue record */
+ int[] bucketRelativeOffsets;
+ int startOfSST, startOfRecord;
+ /** The default bucket size (this is used for ExternSST) */
+ final static int DEFAULT_BUCKET_SIZE = 8;
+
public SSTSerializer( List recordLengths, BinaryTree strings, int numStrings, int numUniqueStrings )
{
this.recordLengths = recordLengths;
this.numStrings = numStrings;
this.numUniqueStrings = numUniqueStrings;
this.sstRecordHeader = new SSTRecordHeader( numStrings, numUniqueStrings );
+
+ this.bucketAbsoluteOffsets = new int[strings.size()/DEFAULT_BUCKET_SIZE+1];
+ this.bucketRelativeOffsets = new int[strings.size()/DEFAULT_BUCKET_SIZE+1];
}
/**
/**
* This case is chosen when an SST record does not span over to a continue record.
- *
*/
private void serializeSingleSSTRecord( byte[] data, int offset, int record_length_index )
{
for ( int k = 0; k < strings.size(); k++ )
{
+ if (k % DEFAULT_BUCKET_SIZE == 0)
+ {
+ bucketAbsoluteOffsets[k / DEFAULT_BUCKET_SIZE] = pos;
+ bucketRelativeOffsets[k / DEFAULT_BUCKET_SIZE] = pos;
+ }
System.arraycopy( getUnicodeString( k ).serialize(), 0, data, pos + offset, getUnicodeString( k ).getRecordSize() );
pos += getUnicodeString( k ).getRecordSize();
}
private void serializeLargeRecord( int record_size, int record_length_index, byte[] buffer, int offset )
{
+ startOfSST = offset;
+
byte[] stringReminant = null;
int stringIndex = 0;
boolean lastneedcontinue = false;
recordLength, numStrings, numUniqueStrings );
// write the appropriate header
+ startOfRecord = offset + totalWritten;
recordProcessor.writeRecordHeader( offset, totalWritten, recordLength, first_record );
first_record = false;
{
UnicodeString unistr = getUnicodeString( stringIndex );
+ if (stringIndex % DEFAULT_BUCKET_SIZE == 0)
+ {
+ bucketAbsoluteOffsets[stringIndex / DEFAULT_BUCKET_SIZE] = offset + totalWritten + recordProcessor.getRecordOffset() - startOfSST;
+ bucketRelativeOffsets[stringIndex / DEFAULT_BUCKET_SIZE] = offset + totalWritten + recordProcessor.getRecordOffset() - startOfRecord;
+ }
+
if ( unistr.getRecordSize() <= recordProcessor.getAvailable() )
{
recordProcessor.writeWholeString( unistr, offset, totalWritten );
{
return recordLengths;
}
+
+ public int[] getBucketAbsoluteOffsets()
+ {
+ return bucketAbsoluteOffsets;
+ }
+
+ public int[] getBucketRelativeOffsets()
+ {
+ return bucketRelativeOffsets;
+ }
}