<!-- Don't forget to update status.xml too! -->
<release version="3.5-beta4" date="2008-??-??">
+ <action dev="POI-DEVELOPERS" type="fix">46033 - fixed TableCell to correctly set text type</action>
+ <action dev="POI-DEVELOPERS" type="fix">46122 - fixed Picture.draw to skip rendering if picture data was not found</action>
<action dev="POI-DEVELOPERS" type="fix">15716 - memory usage optimisation - converted Ptg arrays into Formula objects</action>
<action dev="POI-DEVELOPERS" type="add">46065 - added implementation for VALUE function</action>
<action dev="POI-DEVELOPERS" type="add">45966 - added implementation for FIND function</action>
<!-- Don't forget to update changes.xml too! -->
<changes>
<release version="3.5-beta4" date="2008-??-??">
+ <action dev="POI-DEVELOPERS" type="fix">46033 - fixed TableCell to correctly set text type</action>
+ <action dev="POI-DEVELOPERS" type="fix">46122 - fixed Picture.draw to skip rendering if picture data was not found</action>
<action dev="POI-DEVELOPERS" type="fix">15716 - memory usage optimisation - converted Ptg arrays into Formula objects</action>
<action dev="POI-DEVELOPERS" type="add">46065 - added implementation for VALUE function</action>
<action dev="POI-DEVELOPERS" type="add">45966 - added implementation for FIND function</action>
import org.apache.poi.hssf.record.NameRecord;
import org.apache.poi.hssf.record.Record;
import org.apache.poi.hssf.record.SupBookRecord;
-import org.apache.poi.hssf.record.UnicodeString;
+import org.apache.poi.hssf.record.formula.Area3DPtg;
import org.apache.poi.hssf.record.formula.NameXPtg;
+import org.apache.poi.hssf.record.formula.Ref3DPtg;
/**
* Link Table (OOO pdf reference: 4.10.3 ) <p/>
return null;
}
int shIx = _externSheetRecord.getFirstSheetIndexFromRefIndex(extRefIndex);
- UnicodeString usSheetName = ebr.getSheetNames()[shIx];
+ String usSheetName = ebr.getSheetNames()[shIx];
return new String[] {
ebr.getURL(),
- usSheetName.getString(),
+ usSheetName,
};
}
return result;
}
- private static int getSheetIndex(UnicodeString[] sheetNames, String sheetName) {
+ private static int getSheetIndex(String[] sheetNames, String sheetName) {
for (int i = 0; i < sheetNames.length; i++) {
- if (sheetNames[i].getString().equals(sheetName)) {
+ if (sheetNames[i].equals(sheetName)) {
return i;
}
package org.apache.poi.hssf.record;
-public final class DrawingSelectionRecord extends AbstractEscherHolderRecord {
- public static final short sid = 0xED;
-
- public DrawingSelectionRecord()
- {
- }
-
- public DrawingSelectionRecord( RecordInputStream in )
- {
- super( in );
- }
-
- protected String getRecordName()
- {
- return "MSODRAWINGSELECTION";
- }
-
- public short getSid()
- {
- return sid;
- }
+import org.apache.poi.util.HexDump;
+import org.apache.poi.util.LittleEndianByteArrayOutputStream;
+import org.apache.poi.util.LittleEndianInput;
+import org.apache.poi.util.LittleEndianOutput;
+
+/**
+ * MsoDrawingSelection (0x00ED)<p/>
+ * Reference:
+ * [MS-OGRAPH].pdf sec 2.4.69
+ *
+ * @author Josh Micich
+ */
+public final class DrawingSelectionRecord extends Record {
+ public static final short sid = 0x00ED;
+
+ /**
+ * From [MS-ODRAW].pdf sec 2.2.1<br/>
+ * TODO - make EscherRecordHeader {@link LittleEndianInput} aware and refactor with this
+ */
+ private static final class OfficeArtRecordHeader {
+ public static final int ENCODED_SIZE = 8;
+ /**
+ * lower 4 bits is 'version' usually 0x01 or 0x0F (for containers)<br/>
+ * upper 12 bits is 'instance'
+ */
+ private final int _verAndInstance;
+ /** value should be between 0xF000 and 0xFFFF */
+ private final int _type;
+ private final int _length;
+
+ public OfficeArtRecordHeader(LittleEndianInput in) {
+ _verAndInstance = in.readUShort();
+ _type = in.readUShort();
+ _length = in.readInt();
+ }
+
+ public void serialize(LittleEndianOutput out) {
+ out.writeShort(_verAndInstance);
+ out.writeShort(_type);
+ out.writeInt(_length);
+ }
+
+ public String debugFormatAsString() {
+ StringBuffer sb = new StringBuffer(32);
+ sb.append("ver+inst=").append(HexDump.shortToHex(_verAndInstance));
+ sb.append(" type=").append(HexDump.shortToHex(_type));
+ sb.append(" len=").append(HexDump.intToHex(_length));
+ return sb.toString();
+ }
+ }
+
+ // [MS-OGRAPH].pdf says that the data of this record is an OfficeArtFDGSL structure
+ // as described in[MS-ODRAW].pdf sec 2.2.33
+ private OfficeArtRecordHeader _header;
+ private int _cpsp;
+ /** a MSODGSLK enum value for the current selection mode */
+ private int _dgslk;
+ private int _spidFocus;
+ /** selected shape IDs (e.g. from EscherSpRecord.ShapeId) */
+ private int[] _shapeIds;
+
+ public DrawingSelectionRecord(RecordInputStream in) {
+ _header = new OfficeArtRecordHeader(in);
+ _cpsp = in.readInt();
+ _dgslk = in.readInt();
+ _spidFocus = in.readInt();
+ int nShapes = in.available() / 4;
+ int[] shapeIds = new int[nShapes];
+ for (int i = 0; i < nShapes; i++) {
+ shapeIds[i] = in.readInt();
+ }
+ _shapeIds = shapeIds;
+ }
+
+ public short getSid() {
+ return sid;
+ }
+
+ protected int getDataSize() {
+ return OfficeArtRecordHeader.ENCODED_SIZE
+ + 12 // 3 int fields
+ + _shapeIds.length * 4;
+ }
+
+ public int serialize(int offset, byte[] data) {
+ int dataSize = getDataSize();
+ int recSize = 4 + dataSize;
+ LittleEndianOutput out = new LittleEndianByteArrayOutputStream(data, offset, recSize);
+ out.writeShort(sid);
+ out.writeShort(dataSize);
+ _header.serialize(out);
+ out.writeInt(_cpsp);
+ out.writeInt(_dgslk);
+ out.writeInt(_spidFocus);
+ for (int i = 0; i < _shapeIds.length; i++) {
+ out.writeInt(_shapeIds[i]);
+ }
+ return recSize;
+ }
+
+ public Object clone() {
+ // currently immutable
+ return this;
+ }
+
+ public String toString() {
+ StringBuffer sb = new StringBuffer();
+
+ sb.append("[MSODRAWINGSELECTION]\n");
+ sb.append(" .rh =(").append(_header.debugFormatAsString()).append(")\n");
+ sb.append(" .cpsp =").append(HexDump.intToHex(_cpsp)).append('\n');
+ sb.append(" .dgslk =").append(HexDump.intToHex(_dgslk)).append('\n');
+ sb.append(" .spidFocus=").append(HexDump.intToHex(_spidFocus)).append('\n');
+ sb.append(" .shapeIds =(");
+ for (int i = 0; i < _shapeIds.length; i++) {
+ if (i > 0) {
+ sb.append(", ");
+ }
+ sb.append(HexDump.intToHex(_shapeIds[i]));
+ }
+ sb.append(")\n");
+
+ sb.append("[/MSODRAWINGSELECTION]\n");
+ return sb.toString();
+ }
}
* contains the classes for all the records we want to parse.<br/>
* Note - this most but not *every* subclass of Record.
*/
- private static final Class[] records = {
+ private static final Class[] recordClasses = {
ArrayRecord.class,
BackupRecord.class,
BlankRecord.class,
/**
* cache of the recordsToMap();
*/
- private static Map recordsMap = recordsToMap(records);
+ private static Map recordsMap = recordsToMap(recordClasses);
private static short[] _allKnownRecordSIDs;
* are returned digested into the non-mul form.
*/
public static Record [] createRecord(RecordInputStream in) {
+
+ Record record = createSingleRecord(in);
+ if (record instanceof DBCellRecord) {
+ // Not needed by POI. Regenerated from scratch by POI when spreadsheet is written
+ return new Record[] { null, };
+ }
+ if (record instanceof RKRecord) {
+ return new Record[] { convertToNumberRecord((RKRecord) record), };
+ }
+ if (record instanceof MulRKRecord) {
+ return convertRKRecords((MulRKRecord)record);
+ }
+ if (record instanceof MulBlankRecord) {
+ return convertMulBlankRecords((MulBlankRecord)record);
+ }
+ return new Record[] { record, };
+ }
+
+ private static Record createSingleRecord(RecordInputStream in) {
Constructor constructor = (Constructor) recordsMap.get(new Short(in.getSid()));
if (constructor == null) {
- return new Record[] { new UnknownRecord(in), };
+ return new UnknownRecord(in);
}
- Record retval;
-
try {
- retval = ( Record ) constructor.newInstance(new Object[] { in });
+ return (Record) constructor.newInstance(new Object[] { in });
} catch (InvocationTargetException e) {
throw new RecordFormatException("Unable to construct record instance" , e.getTargetException());
} catch (IllegalArgumentException e) {
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
-
- if (retval instanceof RKRecord) {
- // RK record is a slightly smaller alternative to NumberRecord
- // POI likes NumberRecord better
- RKRecord rk = ( RKRecord ) retval;
- NumberRecord num = new NumberRecord();
+ }
- num.setColumn(rk.getColumn());
- num.setRow(rk.getRow());
- num.setXFIndex(rk.getXFIndex());
- num.setValue(rk.getRKNumber());
- return new Record[] { num, };
- }
- if (retval instanceof DBCellRecord) {
- // Not needed by POI. Regenerated from scratch by POI when spreadsheet is written
- return new Record[] { null, };
- }
- // expand multiple records where necessary
- if (retval instanceof MulRKRecord) {
- MulRKRecord mrk = ( MulRKRecord ) retval;
+ /**
+ * RK record is a slightly smaller alternative to NumberRecord
+ * POI likes NumberRecord better
+ */
+ private static NumberRecord convertToNumberRecord(RKRecord rk) {
+ NumberRecord num = new NumberRecord();
+
+ num.setColumn(rk.getColumn());
+ num.setRow(rk.getRow());
+ num.setXFIndex(rk.getXFIndex());
+ num.setValue(rk.getRKNumber());
+ return num;
+ }
- Record[] mulRecs = new Record[ mrk.getNumColumns() ];
- for (int k = 0; k < mrk.getNumColumns(); k++) {
- NumberRecord nr = new NumberRecord();
+ /**
+ * Converts a {@link MulRKRecord} into an equivalent array of {@link NumberRecord}s
+ */
+ private static NumberRecord[] convertRKRecords(MulRKRecord mrk) {
- nr.setColumn(( short ) (k + mrk.getFirstColumn()));
- nr.setRow(mrk.getRow());
- nr.setXFIndex(mrk.getXFAt(k));
- nr.setValue(mrk.getRKNumberAt(k));
- mulRecs[ k ] = nr;
- }
- return mulRecs;
+ NumberRecord[] mulRecs = new NumberRecord[mrk.getNumColumns()];
+ for (int k = 0; k < mrk.getNumColumns(); k++) {
+ NumberRecord nr = new NumberRecord();
+
+ nr.setColumn((short) (k + mrk.getFirstColumn()));
+ nr.setRow(mrk.getRow());
+ nr.setXFIndex(mrk.getXFAt(k));
+ nr.setValue(mrk.getRKNumberAt(k));
+ mulRecs[k] = nr;
}
- if (retval instanceof MulBlankRecord) {
- MulBlankRecord mb = ( MulBlankRecord ) retval;
+ return mulRecs;
+ }
- Record[] mulRecs = new Record[ mb.getNumColumns() ];
- for (int k = 0; k < mb.getNumColumns(); k++) {
- BlankRecord br = new BlankRecord();
+ /**
+ * Converts a {@link MulBlankRecord} into an equivalent array of {@link BlankRecord}s
+ */
+ private static BlankRecord[] convertMulBlankRecords(MulBlankRecord mb) {
- br.setColumn(( short ) (k + mb.getFirstColumn()));
- br.setRow(mb.getRow());
- br.setXFIndex(mb.getXFAt(k));
- mulRecs[ k ] = br;
- }
- return mulRecs;
+ BlankRecord[] mulRecs = new BlankRecord[mb.getNumColumns()];
+ for (int k = 0; k < mb.getNumColumns(); k++) {
+ BlankRecord br = new BlankRecord();
+
+ br.setColumn((short) (k + mb.getFirstColumn()));
+ br.setRow(mb.getRow());
+ br.setXFIndex(mb.getXFAt(k));
+ mulRecs[k] = br;
}
- return new Record[] { retval, };
+ return mulRecs;
}
/**
// After EOF, Excel seems to pad block with zeros
continue;
}
- Record[] recs = createRecord(recStream); // handle MulRK records
+ Record record = createSingleRecord(recStream);
- if (recs.length > 1) {
- for (int k = 0; k < recs.length; k++) {
- records.add(recs[ k ]); // these will be number records
- }
+ if (record instanceof DBCellRecord) {
+ // Not needed by POI. Regenerated from scratch by POI when spreadsheet is written
continue;
}
- Record record = recs[ 0 ];
- if (record == null) {
+ if (record instanceof RKRecord) {
+ records.add(convertToNumberRecord((RKRecord) record));
+ continue;
+ }
+ if (record instanceof MulRKRecord) {
+ addAll(records, convertRKRecords((MulRKRecord)record));
+ continue;
+ }
+ if (record instanceof MulBlankRecord) {
+ addAll(records, convertMulBlankRecords((MulBlankRecord)record));
continue;
}
+
if (record.getSid() == DrawingGroupRecord.sid
&& lastRecord instanceof DrawingGroupRecord) {
DrawingGroupRecord lastDGRecord = (DrawingGroupRecord) lastRecord;
records.add(record);
} else if (lastRecord instanceof DrawingGroupRecord) {
((DrawingGroupRecord)lastRecord).processContinueRecord(contRec.getData());
- } else if (lastRecord instanceof StringRecord) {
- ((StringRecord)lastRecord).processContinueRecord(contRec.getData());
} else if (lastRecord instanceof UnknownRecord) {
//Gracefully handle records that we don't know about,
//that happen to be continued
}
return records;
}
+
+ private static void addAll(List destList, Record[] srcRecs) {
+ for (int i = 0; i < srcRecs.length; i++) {
+ destList.add(srcRecs[i]);
+ }
+ }
}
}
}
- /** Returns an excel style unicode string from the bytes reminaing in the record.
- * <i>Note:</i> Unicode strings differ from <b>normal</b> strings due to the addition of
- * formatting information.
- *
- * @return The unicode string representation of the remaining bytes.
- */
- public UnicodeString readUnicodeString() {
- return new UnicodeString(this);
- }
-
/** Returns the remaining bytes for the current record.
*
* @return The remaining bytes of the current record.
package org.apache.poi.hssf.record;
+import java.util.Iterator;
+
+import org.apache.poi.hssf.record.cont.ContinuableRecord;
+import org.apache.poi.hssf.record.cont.ContinuableRecordOutput;
import org.apache.poi.util.IntMapper;
import org.apache.poi.util.LittleEndianConsts;
-import java.util.Iterator;
-
/**
- * Title: Static String Table Record
- * <P>
+ * Title: Static String Table Record (0x00FC)<p/>
+ *
* Description: This holds all the strings for LabelSSTRecords.
* <P>
* REFERENCE: PG 389 Microsoft Excel 97 Developer's Kit (ISBN:
* @see org.apache.poi.hssf.record.LabelSSTRecord
* @see org.apache.poi.hssf.record.ContinueRecord
*/
-public final class SSTRecord extends Record {
+public final class SSTRecord extends ContinuableRecord {
public static final short sid = 0x00FC;
- private static UnicodeString EMPTY_STRING = new UnicodeString("");
-
- /** how big can an SST record be? As big as any record can be: 8228 bytes */
- static final int MAX_RECORD_SIZE = 8228;
+ private static final UnicodeString EMPTY_STRING = new UnicodeString("");
+ // TODO - move these constants to test class (the only consumer)
/** standard record overhead: two shorts (record id plus data space size)*/
- static final int STD_RECORD_OVERHEAD =
- 2 * LittleEndianConsts.SHORT_SIZE;
+ static final int STD_RECORD_OVERHEAD = 2 * LittleEndianConsts.SHORT_SIZE;
/** SST overhead: the standard record overhead, plus the number of strings and the number of unique strings -- two ints */
- static final int SST_RECORD_OVERHEAD =
- ( STD_RECORD_OVERHEAD + ( 2 * LittleEndianConsts.INT_SIZE ) );
+ static final int SST_RECORD_OVERHEAD = STD_RECORD_OVERHEAD + 2 * LittleEndianConsts.INT_SIZE;
/** how much data can we stuff into an SST record? That would be _max minus the standard SST record overhead */
- static final int MAX_DATA_SPACE = MAX_RECORD_SIZE - SST_RECORD_OVERHEAD;
-
- /** overhead for each string includes the string's character count (a short) and the flag describing its characteristics (a byte) */
- static final int STRING_MINIMAL_OVERHEAD = LittleEndianConsts.SHORT_SIZE + LittleEndianConsts.BYTE_SIZE;
+ static final int MAX_DATA_SPACE = RecordInputStream.MAX_RECORD_DATA_SIZE - 8;
/** union of strings in the SST and EXTSST */
private int field_1_num_strings;
return field_2_num_unique_strings;
}
- /**
- * USE THIS METHOD AT YOUR OWN PERIL: THE <code>addString</code>
- * METHODS MANIPULATE THE NUMBER OF STRINGS AS A SIDE EFFECT; YOUR
- * ATTEMPTS AT MANIPULATING THE STRING COUNT IS LIKELY TO BE VERY
- * WRONG AND WILL RESULT IN BAD BEHAVIOR WHEN THIS RECORD IS
- * WRITTEN OUT AND ANOTHER PROCESS ATTEMPTS TO READ THE RECORD
- *
- * @param count number of strings
- *
- */
-
- public void setNumStrings( final int count )
- {
- field_1_num_strings = count;
- }
-
- /**
- * USE THIS METHOD AT YOUR OWN PERIL: THE <code>addString</code>
- * METHODS MANIPULATE THE NUMBER OF UNIQUE STRINGS AS A SIDE
- * EFFECT; YOUR ATTEMPTS AT MANIPULATING THE UNIQUE STRING COUNT
- * IS LIKELY TO BE VERY WRONG AND WILL RESULT IN BAD BEHAVIOR WHEN
- * THIS RECORD IS WRITTEN OUT AND ANOTHER PROCESS ATTEMPTS TO READ
- * THE RECORD
- *
- * @param count number of strings
- */
-
- public void setNumUniqueStrings( final int count )
- {
- field_2_num_unique_strings = count;
- }
/**
* Get a particular string by its index
return (UnicodeString) field_3_strings.get( id );
}
- public boolean isString16bit( final int id )
- {
- UnicodeString unicodeString = ( (UnicodeString) field_3_strings.get( id ) );
- return ( ( unicodeString.getOptionFlags() & 0x01 ) == 1 );
- }
/**
* Return a debugging string representation
return field_3_strings.size();
}
- /**
- * called by the class that is responsible for writing this sucker.
- * Subclasses should implement this so that their data is passed back in a
- * byte array.
- *
- * @return size
- */
-
- public int serialize( int offset, byte[] data )
- {
- SSTSerializer serializer = new SSTSerializer(
- field_3_strings, getNumStrings(), getNumUniqueStrings() );
- int bytes = serializer.serialize( offset, data );
+ protected void serialize(ContinuableRecordOutput out) {
+ SSTSerializer serializer = new SSTSerializer(field_3_strings, getNumStrings(), getNumUniqueStrings() );
+ serializer.serialize(out);
bucketAbsoluteOffsets = serializer.getBucketAbsoluteOffsets();
bucketRelativeOffsets = serializer.getBucketRelativeOffsets();
- return bytes;
- }
-
-
- protected int getDataSize() {
- SSTRecordSizeCalculator calculator = new SSTRecordSizeCalculator(field_3_strings);
- int recordSize = calculator.getRecordSize();
- return recordSize-4;
}
SSTDeserializer getDeserializer()
+++ /dev/null
-
-/* ====================================================================
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-==================================================================== */
-
-
-package org.apache.poi.hssf.record;
-
-import org.apache.poi.util.LittleEndian;
-import org.apache.poi.util.LittleEndianConsts;
-
-/**
- * Write out an SST header record.
- *
- * @author Glen Stampoultzis (glens at apache.org)
- */
-class SSTRecordHeader
-{
- int numStrings;
- int numUniqueStrings;
-
- public SSTRecordHeader( int numStrings, int numUniqueStrings )
- {
- this.numStrings = numStrings;
- this.numUniqueStrings = numUniqueStrings;
- }
-
- /**
- * Writes out the SST record. This consists of the sid, the record size, the number of
- * strings and the number of unique strings.
- *
- * @param data The data buffer to write the header to.
- * @param bufferIndex The index into the data buffer where the header should be written.
- * @param recSize The number of records written.
- *
- * @return The bufer of bytes modified.
- */
- public int writeSSTHeader( UnicodeString.UnicodeRecordStats stats, byte[] data, int bufferIndex, int recSize )
- {
- int offset = bufferIndex;
-
- LittleEndian.putShort( data, offset, SSTRecord.sid );
- offset += LittleEndianConsts.SHORT_SIZE;
- stats.recordSize += LittleEndianConsts.SHORT_SIZE;
- stats.remainingSize -= LittleEndianConsts.SHORT_SIZE;
- //Delay writing the length
- stats.lastLengthPos = offset;
- offset += LittleEndianConsts.SHORT_SIZE;
- stats.recordSize += LittleEndianConsts.SHORT_SIZE;
- stats.remainingSize -= LittleEndianConsts.SHORT_SIZE;
- LittleEndian.putInt( data, offset, numStrings );
- offset += LittleEndianConsts.INT_SIZE;
- stats.recordSize += LittleEndianConsts.INT_SIZE;
- stats.remainingSize -= LittleEndianConsts.INT_SIZE;
- LittleEndian.putInt( data, offset, numUniqueStrings );
- offset += LittleEndianConsts.INT_SIZE;
- stats.recordSize += LittleEndianConsts.INT_SIZE;
- stats.remainingSize -= LittleEndianConsts.INT_SIZE;
-
- return offset - bufferIndex;
- }
-
-}
+++ /dev/null
-/* ====================================================================
- Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements. See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-==================================================================== */
-
-
-package org.apache.poi.hssf.record;
-
-import org.apache.poi.util.IntMapper;
-
-/**
- * Used to calculate the record sizes for a particular record. This kind of
- * sucks because it's similar to the SST serialization code. In general
- * the SST serialization code needs to be rewritten.
- *
- * @author Glen Stampoultzis (glens at apache.org)
- * @author Jason Height (jheight at apache.org)
- */
-class SSTRecordSizeCalculator
-{
- private IntMapper strings;
-
- public SSTRecordSizeCalculator(IntMapper strings)
- {
- this.strings = strings;
- }
-
- public int getRecordSize() {
- UnicodeString.UnicodeRecordStats rs = new UnicodeString.UnicodeRecordStats();
- rs.remainingSize -= SSTRecord.SST_RECORD_OVERHEAD;
- rs.recordSize += SSTRecord.SST_RECORD_OVERHEAD;
- for (int i=0; i < strings.size(); i++ )
- {
- UnicodeString unistr = ( (UnicodeString) strings.get(i));
- unistr.getRecordSize(rs);
- }
- return rs.recordSize;
- }
-}
-
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
-
package org.apache.poi.hssf.record;
+import org.apache.poi.hssf.record.cont.ContinuableRecordOutput;
import org.apache.poi.util.IntMapper;
-import org.apache.poi.util.LittleEndian;
/**
* This class handles serialization of SST records. It utilizes the record processor
*
* @author Glen Stampoultzis (glens at apache.org)
*/
-class SSTSerializer
-{
+final class SSTSerializer {
- // todo: make private again
- private IntMapper strings;
+ private final int _numStrings;
+ private final int _numUniqueStrings;
- private SSTRecordHeader sstRecordHeader;
+ private final IntMapper strings;
/** Offsets from the beginning of the SST record (even across continuations) */
- int[] bucketAbsoluteOffsets;
+ private final int[] bucketAbsoluteOffsets;
/** Offsets relative the start of the current SST or continue record */
- int[] bucketRelativeOffsets;
+ private final int[] bucketRelativeOffsets;
int startOfSST, startOfRecord;
public SSTSerializer( IntMapper strings, int numStrings, int numUniqueStrings )
{
this.strings = strings;
- this.sstRecordHeader = new SSTRecordHeader( numStrings, numUniqueStrings );
+ _numStrings = numStrings;
+ _numUniqueStrings = numUniqueStrings;
int infoRecs = ExtSSTRecord.getNumberOfInfoRecsForStrings(strings.size());
this.bucketAbsoluteOffsets = new int[infoRecs];
this.bucketRelativeOffsets = new int[infoRecs];
}
- /**
- * Create a byte array consisting of an SST record and any
- * required Continue records, ready to be written out.
- * <p>
- * If an SST record and any subsequent Continue records are read
- * in to create this instance, this method should produce a byte
- * array that is identical to the byte array produced by
- * concatenating the input records' data.
- *
- * @return the byte array
- */
- public int serialize(int offset, byte[] data )
- {
- UnicodeString.UnicodeRecordStats stats = new UnicodeString.UnicodeRecordStats();
- sstRecordHeader.writeSSTHeader( stats, data, 0 + offset, 0 );
- int pos = offset + SSTRecord.SST_RECORD_OVERHEAD;
+ public void serialize(ContinuableRecordOutput out) {
+ out.writeInt(_numStrings);
+ out.writeInt(_numUniqueStrings);
for ( int k = 0; k < strings.size(); k++ )
{
if (k % ExtSSTRecord.DEFAULT_BUCKET_SIZE == 0)
{
+ int rOff = out.getTotalSize();
int index = k/ExtSSTRecord.DEFAULT_BUCKET_SIZE;
if (index < ExtSSTRecord.MAX_BUCKETS) {
//Excel only indexes the first 128 buckets.
- bucketAbsoluteOffsets[index] = pos-offset;
- bucketRelativeOffsets[index] = pos-offset;
- }
+ bucketAbsoluteOffsets[index] = rOff;
+ bucketRelativeOffsets[index] = rOff;
+ }
}
UnicodeString s = getUnicodeString(k);
- pos += s.serialize(stats, pos, data);
- }
- //Check to see if there is a hanging continue record length
- if (stats.lastLengthPos != -1) {
- short lastRecordLength = (short)(pos - stats.lastLengthPos-2);
- if (lastRecordLength > 8224)
- throw new InternalError();
-
- LittleEndian.putShort(data, stats.lastLengthPos, lastRecordLength);
- }
- return pos - offset;
- }
+ s.serialize(out);
+ }
+ }
private UnicodeString getUnicodeString( int index )
package org.apache.poi.hssf.record;
-import org.apache.poi.util.LittleEndian;
+import org.apache.poi.hssf.record.cont.ContinuableRecord;
+import org.apache.poi.hssf.record.cont.ContinuableRecordOutput;
import org.apache.poi.util.StringUtil;
/**
- * Supports the STRING record structure. (0x0207)
+ * STRING (0x0207)<p/>
+ *
+ * Stores the cached result of a text formula
*
* @author Glen Stampoultzis (glens at apache.org)
*/
-public class StringRecord extends Record {
- public final static short sid = 0x0207;
- private int field_1_string_length;
- private byte field_2_unicode_flag;
- private String field_3_string;
+public final class StringRecord extends ContinuableRecord {
+
+ public final static short sid = 0x0207;
+
+ private boolean _is16bitUnicode;
+ private String _text;
public StringRecord()
/**
* @param in the RecordInputstream to read the record from
*/
- public StringRecord( RecordInputStream in)
- {
- field_1_string_length = in.readShort();
- field_2_unicode_flag = in.readByte();
- byte[] data = in.readRemainder();
- //Why isn't this using the in.readString methods???
- if (isUnCompressedUnicode())
- {
- field_3_string = StringUtil.getFromUnicodeLE(data, 0, field_1_string_length );
- }
- else
- {
- field_3_string = StringUtil.getFromCompressedUnicode(data, 0, field_1_string_length);
+ public StringRecord( RecordInputStream in) {
+ int field_1_string_length = in.readUShort();
+ _is16bitUnicode = in.readByte() != 0x00;
+
+ if (_is16bitUnicode){
+ _text = in.readUnicodeLEString(field_1_string_length);
+ } else {
+ _text = in.readCompressedUnicode(field_1_string_length);
}
}
-
- public void processContinueRecord(byte[] data) {
- if(isUnCompressedUnicode()) {
- field_3_string += StringUtil.getFromUnicodeLE(data, 0, field_1_string_length - field_3_string.length());
- } else {
- field_3_string += StringUtil.getFromCompressedUnicode(data, 0, field_1_string_length - field_3_string.length());
- }
- }
- private int getStringByteLength()
- {
- return isUnCompressedUnicode() ? field_1_string_length * 2 : field_1_string_length;
- }
-
- protected int getDataSize() {
- return 2 + 1 + getStringByteLength();
- }
- /**
- * is this uncompressed unicode (16bit)? Or just 8-bit compressed?
- * @return isUnicode - True for 16bit- false for 8bit
- */
- public boolean isUnCompressedUnicode()
- {
- return (field_2_unicode_flag == 1);
+ protected void serialize(ContinuableRecordOutput out) {
+ out.writeShort(_text.length());
+ out.writeStringData(_text);
}
- /**
- * called by the class that is responsible for writing this sucker.
- * Subclasses should implement this so that their data is passed back in a
- * byte array.
- *
- * @param offset to begin writing at
- * @param data byte array containing instance data
- * @return number of bytes written
- */
- public int serialize( int offset, byte[] data )
- {
- LittleEndian.putUShort(data, 0 + offset, sid);
- LittleEndian.putUShort(data, 2 + offset, 3 + getStringByteLength());
- LittleEndian.putUShort(data, 4 + offset, field_1_string_length);
- data[6 + offset] = field_2_unicode_flag;
- if (isUnCompressedUnicode())
- {
- StringUtil.putUnicodeLE(field_3_string, data, 7 + offset);
- }
- else
- {
- StringUtil.putCompressedUnicode(field_3_string, data, 7 + offset);
- }
- return getRecordSize();
- }
- /**
- * return the non static version of the id for this record.
- */
public short getSid()
{
return sid;
*/
public String getString()
{
- return field_3_string;
+ return _text;
}
- /**
- * Sets whether the string is compressed or not
- * @param unicode_flag 1 = uncompressed, 0 = compressed
- */
- public void setCompressedFlag( byte unicode_flag )
- {
- this.field_2_unicode_flag = unicode_flag;
- }
/**
* Sets the string represented by this record.
*/
- public void setString( String string )
- {
- this.field_1_string_length = string.length();
- this.field_3_string = string;
- setCompressedFlag(StringUtil.hasMultibyte(string) ? (byte)1 : (byte)0);
+ public void setString(String string) {
+ _text = string;
+ _is16bitUnicode = StringUtil.hasMultibyte(string);
}
public String toString()
buffer.append("[STRING]\n");
buffer.append(" .string = ")
- .append(field_3_string).append("\n");
+ .append(_text).append("\n");
buffer.append("[/STRING]\n");
return buffer.toString();
}
public Object clone() {
StringRecord rec = new StringRecord();
- rec.field_1_string_length = this.field_1_string_length;
- rec.field_2_unicode_flag= this.field_2_unicode_flag;
- rec.field_3_string = this.field_3_string;
+ rec._is16bitUnicode= _is16bitUnicode;
+ rec._text = _text;
return rec;
}
}
package org.apache.poi.hssf.record;
-import org.apache.poi.hssf.record.UnicodeString.UnicodeRecordStats;
-import org.apache.poi.util.LittleEndian;
+import org.apache.poi.util.LittleEndianByteArrayOutputStream;
+import org.apache.poi.util.LittleEndianOutput;
+import org.apache.poi.util.StringUtil;
/**
- * Title: Sup Book (EXTERNALBOOK) <P>
+ * Title: Sup Book - EXTERNALBOOK (0x01AE) <p/>
* Description: A External Workbook Description (Supplemental Book)
* Its only a dummy record for making new ExternSheet Record <P>
* REFERENCE: 5.38<P>
*/
public final class SupBookRecord extends Record {
- public final static short sid = 0x1AE;
+ public final static short sid = 0x01AE;
private static final short SMALL_RECORD_SIZE = 4;
private static final short TAG_INTERNAL_REFERENCES = 0x0401;
private static final short TAG_ADD_IN_FUNCTIONS = 0x3A01;
- private short field_1_number_of_sheets;
- private UnicodeString field_2_encoded_url;
- private UnicodeString[] field_3_sheet_names;
- private boolean _isAddInFunctions;
+ private short field_1_number_of_sheets;
+ private String field_2_encoded_url;
+ private String[] field_3_sheet_names;
+ private boolean _isAddInFunctions;
+
-
public static SupBookRecord createInternalReferences(short numberOfSheets) {
return new SupBookRecord(false, numberOfSheets);
}
public static SupBookRecord createAddInFunctions() {
return new SupBookRecord(true, (short)0);
}
- public static SupBookRecord createExternalReferences(UnicodeString url, UnicodeString[] sheetNames) {
+ public static SupBookRecord createExternalReferences(String url, String[] sheetNames) {
return new SupBookRecord(url, sheetNames);
}
private SupBookRecord(boolean isAddInFuncs, short numberOfSheets) {
field_3_sheet_names = null;
_isAddInFunctions = isAddInFuncs;
}
- public SupBookRecord(UnicodeString url, UnicodeString[] sheetNames) {
+ public SupBookRecord(String url, String[] sheetNames) {
field_1_number_of_sheets = (short) sheetNames.length;
field_2_encoded_url = url;
field_3_sheet_names = sheetNames;
* @param offset of the record's data (provided a big array of the file)
*/
public SupBookRecord(RecordInputStream in) {
- int recLen = in.remaining();
-
+ int recLen = in.remaining();
+
field_1_number_of_sheets = in.readShort();
-
+
if(recLen > SMALL_RECORD_SIZE) {
// 5.38.1 External References
_isAddInFunctions = false;
- field_2_encoded_url = in.readUnicodeString();
- UnicodeString[] sheetNames = new UnicodeString[field_1_number_of_sheets];
+ field_2_encoded_url = in.readString();
+ String[] sheetNames = new String[field_1_number_of_sheets];
for (int i = 0; i < sheetNames.length; i++) {
- sheetNames[i] = in.readUnicodeString();
+ sheetNames[i] = in.readString();
}
field_3_sheet_names = sheetNames;
return;
// else not 'External References'
field_2_encoded_url = null;
field_3_sheet_names = null;
-
+
short nextShort = in.readShort();
if(nextShort == TAG_INTERNAL_REFERENCES) {
// 5.38.2 'Internal References'
+ field_1_number_of_sheets + ")");
}
} else {
- throw new RuntimeException("invalid EXTERNALBOOK code ("
+ throw new RuntimeException("invalid EXTERNALBOOK code ("
+ Integer.toHexString(nextShort) + ")");
}
}
public String toString() {
StringBuffer sb = new StringBuffer();
sb.append(getClass().getName()).append(" [SUPBOOK ");
-
+
if(isExternalReferences()) {
sb.append("External References");
sb.append(" nSheets=").append(field_1_number_of_sheets);
return SMALL_RECORD_SIZE;
}
int sum = 2; // u16 number of sheets
- UnicodeRecordStats urs = new UnicodeRecordStats();
- field_2_encoded_url.getRecordSize(urs);
- sum += urs.recordSize;
-
+
+ sum += StringUtil.getEncodedSize(field_2_encoded_url);
+
for(int i=0; i<field_3_sheet_names.length; i++) {
- urs = new UnicodeRecordStats();
- field_3_sheet_names[i].getRecordSize(urs);
- sum += urs.recordSize;
+ sum += StringUtil.getEncodedSize(field_3_sheet_names[i]);
}
return sum;
}
-
/**
* called by the class that is responsible for writing this sucker.
* Subclasses should implement this so that their data is passed back in a
* @return number of bytes written
*/
public int serialize(int offset, byte [] data) {
- LittleEndian.putShort(data, 0 + offset, sid);
int dataSize = getDataSize();
- LittleEndian.putShort(data, 2 + offset, (short) dataSize);
- LittleEndian.putShort(data, 4 + offset, field_1_number_of_sheets);
-
+ int recordSize = 4 + dataSize;
+ LittleEndianOutput out = new LittleEndianByteArrayOutputStream(data, offset, recordSize);
+
+ out.writeShort(sid);
+ out.writeShort(dataSize);
+ out.writeShort(field_1_number_of_sheets);
+
if(isExternalReferences()) {
-
- int currentOffset = 6 + offset;
- UnicodeRecordStats urs = new UnicodeRecordStats();
- field_2_encoded_url.serialize(urs, currentOffset, data);
- currentOffset += urs.recordSize;
-
+ StringUtil.writeUnicodeString(out, field_2_encoded_url);
+
for(int i=0; i<field_3_sheet_names.length; i++) {
- urs = new UnicodeRecordStats();
- field_3_sheet_names[i].serialize(urs, currentOffset, data);
- currentOffset += urs.recordSize;
+ StringUtil.writeUnicodeString(out, field_3_sheet_names[i]);
}
} else {
- short field2val = _isAddInFunctions ? TAG_ADD_IN_FUNCTIONS : TAG_INTERNAL_REFERENCES;
-
- LittleEndian.putShort(data, 6 + offset, field2val);
+ int field2val = _isAddInFunctions ? TAG_ADD_IN_FUNCTIONS : TAG_INTERNAL_REFERENCES;
+
+ out.writeShort(field2val);
}
- return dataSize + 4;
+ return recordSize;
}
public void setNumberOfSheets(short number){
return sid;
}
public String getURL() {
- String encodedUrl = field_2_encoded_url.getString();
+ String encodedUrl = field_2_encoded_url;
switch(encodedUrl.charAt(0)) {
case 0: // Reference to an empty workbook name
return encodedUrl.substring(1); // will this just be empty string?
return decodeFileName(encodedUrl);
case 2: // Self-referential external reference
return encodedUrl.substring(1);
-
+
}
return encodedUrl;
}
return encodedUrl.substring(1);
// TODO the following special characters may appear in the rest of the string, and need to get interpreted
/* see "MICROSOFT OFFICE EXCEL 97-2007 BINARY FILE FORMAT SPECIFICATION"
- chVolume 1
- chSameVolume 2
+ chVolume 1
+ chSameVolume 2
chDownDir 3
- chUpDir 4
+ chUpDir 4
chLongVolume 5
chStartupDir 6
chAltStartupDir 7
chLibDir 8
-
+
*/
}
- public UnicodeString[] getSheetNames() {
- return (UnicodeString[]) field_3_sheet_names.clone();
+ public String[] getSheetNames() {
+ return (String[]) field_3_sheet_names.clone();
}
}
package org.apache.poi.hssf.record;
-import java.io.UnsupportedEncodingException;
-
+import org.apache.poi.hssf.record.cont.ContinuableRecord;
+import org.apache.poi.hssf.record.cont.ContinuableRecordOutput;
import org.apache.poi.hssf.record.formula.Ptg;
import org.apache.poi.hssf.usermodel.HSSFRichTextString;
import org.apache.poi.util.BitField;
import org.apache.poi.util.BitFieldFactory;
import org.apache.poi.util.HexDump;
-import org.apache.poi.util.LittleEndian;
-import org.apache.poi.util.LittleEndianByteArrayOutputStream;
-import org.apache.poi.util.LittleEndianOutput;
/**
* The TXO record (0x01B6) is used to define the properties of a text box. It is
*
* @author Glen Stampoultzis (glens at apache.org)
*/
-public final class TextObjectRecord extends Record {
+public final class TextObjectRecord extends ContinuableRecord {
public final static short sid = 0x01B6;
private static final int FORMAT_RUN_ENCODED_SIZE = 8; // 2 shorts and 4 bytes reserved
return sid;
}
- /**
- * Only for the current record. does not include any subsequent Continue
- * records
- */
- private int getCurrentRecordDataSize() {
- int result = 2 + 2 + 2 + 2 + 2 + 2 + 2 + 4;
- if (_linkRefPtg != null) {
- result += 2 // formula size
- + 4 // unknownInt
- +_linkRefPtg.getSize();
- if (_unknownPostFormulaByte != null) {
- result += 1;
- }
- }
- return result;
- }
-
- private int serializeTXORecord(int offset, byte[] data) {
- int dataSize = getCurrentRecordDataSize();
- int recSize = dataSize+4;
- LittleEndianOutput out = new LittleEndianByteArrayOutputStream(data, offset, recSize);
-
- out.writeShort(TextObjectRecord.sid);
- out.writeShort(dataSize);
+ private void serializeTXORecord(ContinuableRecordOutput out) {
out.writeShort(field_1_options);
out.writeShort(field_2_textOrientation);
out.writeByte(_unknownPostFormulaByte.byteValue());
}
}
- return recSize;
}
- private int serializeTrailingRecords(int offset, byte[] data) {
- byte[] textBytes;
- try {
- textBytes = _text.getString().getBytes("UTF-16LE");
- } catch (UnsupportedEncodingException e) {
- throw new RuntimeException(e.getMessage(), e);
- }
- int remainingLength = textBytes.length;
-
- int countTextBytesWritten = 0;
- int pos = offset;
- // (regardless what was read, we always serialize double-byte
- // unicode characters (UTF-16LE).
- Byte unicodeFlag = new Byte((byte)1);
- while (remainingLength > 0) {
- int chunkSize = Math.min(RecordInputStream.MAX_RECORD_DATA_SIZE - 2, remainingLength);
- remainingLength -= chunkSize;
- pos += ContinueRecord.write(data, pos, unicodeFlag, textBytes, countTextBytesWritten, chunkSize);
- countTextBytesWritten += chunkSize;
- }
-
- byte[] formatData = createFormatData(_text);
- pos += ContinueRecord.write(data, pos, null, formatData);
- return pos - offset;
+ private void serializeTrailingRecords(ContinuableRecordOutput out) {
+ out.writeContinue();
+ out.writeStringData(_text.getString());
+ out.writeContinue();
+ writeFormatData(out, _text);
}
- private int getTrailingRecordsSize() {
- if (_text.length() < 1) {
- return 0;
- }
- int encodedTextSize = 0;
- int textBytesLength = _text.length() * LittleEndian.SHORT_SIZE;
- while (textBytesLength > 0) {
- int chunkSize = Math.min(RecordInputStream.MAX_RECORD_DATA_SIZE - 2, textBytesLength);
- textBytesLength -= chunkSize;
-
- encodedTextSize += 4; // +4 for ContinueRecord sid+size
- encodedTextSize += 1+chunkSize; // +1 for compressed unicode flag,
- }
-
- int encodedFormatSize = (_text.numFormattingRuns() + 1) * FORMAT_RUN_ENCODED_SIZE
- + 4; // +4 for ContinueRecord sid+size
- return encodedTextSize + encodedFormatSize;
- }
+ protected void serialize(ContinuableRecordOutput out) {
-
- public int serialize(int offset, byte[] data) {
-
- int expectedTotalSize = getRecordSize();
- int totalSize = serializeTXORecord(offset, data);
-
+ serializeTXORecord(out);
if (_text.getString().length() > 0) {
- totalSize += serializeTrailingRecords(offset+totalSize, data);
+ serializeTrailingRecords(out);
}
-
- if (totalSize != expectedTotalSize)
- throw new RecordFormatException(totalSize
- + " bytes written but getRecordSize() reports " + expectedTotalSize);
- return totalSize;
}
- /**
- * Note - this total size includes all potential {@link ContinueRecord}s written
- * but it is not the "ushort size" value to be written at the start of the first BIFF record
- */
- protected int getDataSize() {
- return getCurrentRecordDataSize() + getTrailingRecordsSize();
- }
-
-
private int getFormattingDataLength() {
if (_text.length() < 1) {
// important - no formatting data if text is empty
return (_text.numFormattingRuns() + 1) * FORMAT_RUN_ENCODED_SIZE;
}
- private static byte[] createFormatData(HSSFRichTextString str) {
+ private static void writeFormatData(ContinuableRecordOutput out , HSSFRichTextString str) {
int nRuns = str.numFormattingRuns();
- byte[] result = new byte[(nRuns + 1) * FORMAT_RUN_ENCODED_SIZE];
- int pos = 0;
for (int i = 0; i < nRuns; i++) {
- LittleEndian.putUShort(result, pos, str.getIndexOfFormattingRun(i));
- pos += 2;
+ out.writeShort(str.getIndexOfFormattingRun(i));
int fontIndex = str.getFontOfFormattingRun(i);
- LittleEndian.putUShort(result, pos, fontIndex == str.NO_FONT ? 0 : fontIndex);
- pos += 2;
- pos += 4; // skip reserved
+ out.writeShort(fontIndex == str.NO_FONT ? 0 : fontIndex);
+ out.writeInt(0); // skip reserved
}
- LittleEndian.putUShort(result, pos, str.length());
- pos += 2;
- LittleEndian.putUShort(result, pos, 0);
- pos += 2;
- pos += 4; // skip reserved
-
- return result;
+ out.writeShort(str.length());
+ out.writeShort(0);
+ out.writeInt(0); // skip reserved
}
/**
package org.apache.poi.hssf.record;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.poi.hssf.record.cont.ContinuableRecordOutput;
import org.apache.poi.util.BitField;
import org.apache.poi.util.BitFieldFactory;
-import org.apache.poi.util.LittleEndian;
import org.apache.poi.util.HexDump;
-
-import java.util.Iterator;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Collections;
+import org.apache.poi.util.LittleEndianInput;
+import org.apache.poi.util.LittleEndianOutput;
/**
- * Title: Unicode String<P>
- * Description: Unicode String record. We implement these as a record, although
- * they are really just standard fields that are in several records.
- * It is considered more desirable then repeating it in all of them.<P>
- * REFERENCE: PG 264 Microsoft Excel 97 Developer's Kit (ISBN: 1-57231-498-2)<P>
+ * Title: Unicode String<p/>
+ * Description: Unicode String - just standard fields that are in several records.
+ * It is considered more desirable then repeating it in all of them.<p/>
+ * REFERENCE: PG 264 Microsoft Excel 97 Developer's Kit (ISBN: 1-57231-498-2)<p/>
* @author Andrew C. Oliver
* @author Marc Johnson (mjohnson at apache dot org)
* @author Glen Stampoultzis (glens at apache.org)
*/
public final class UnicodeString implements Comparable {
- private short field_1_charCount; // = 0;
- private byte field_2_optionflags; // = 0;
- private String field_3_string; // = null;
+ private short field_1_charCount;
+ private byte field_2_optionflags;
+ private String field_3_string;
private List field_4_format_runs;
private byte[] field_5_ext_rst;
- private static final BitField highByte = BitFieldFactory.getInstance(0x1);
- private static final BitField extBit = BitFieldFactory.getInstance(0x4);
- private static final BitField richText = BitFieldFactory.getInstance(0x8);
+ private static final BitField highByte = BitFieldFactory.getInstance(0x1);
+ private static final BitField extBit = BitFieldFactory.getInstance(0x4);
+ private static final BitField richText = BitFieldFactory.getInstance(0x8);
public static class FormatRun implements Comparable {
- short character;
- short fontIndex;
+ short character;
+ short fontIndex;
- public FormatRun(short character, short fontIndex) {
- this.character = character;
- this.fontIndex = fontIndex;
- }
+ public FormatRun(short character, short fontIndex) {
+ this.character = character;
+ this.fontIndex = fontIndex;
+ }
- public short getCharacterPos() {
- return character;
- }
+ public FormatRun(LittleEndianInput in) {
+ this(in.readShort(), in.readShort());
+ }
- public short getFontIndex() {
- return fontIndex;
- }
+ public short getCharacterPos() {
+ return character;
+ }
- public boolean equals(Object o) {
- if ((o == null) || (o.getClass() != this.getClass()))
- {
- return false;
+ public short getFontIndex() {
+ return fontIndex;
}
- FormatRun other = ( FormatRun ) o;
- return ((character == other.character) && (fontIndex == other.fontIndex));
- }
+ public boolean equals(Object o) {
+ if (!(o instanceof FormatRun)) {
+ return false;
+ }
+ FormatRun other = ( FormatRun ) o;
- public int compareTo(Object obj) {
- FormatRun r = (FormatRun)obj;
- if ((character == r.character) && (fontIndex == r.fontIndex))
- return 0;
- if (character == r.character)
- return fontIndex - r.fontIndex;
- else return character - r.character;
- }
+ return character == other.character && fontIndex == other.fontIndex;
+ }
- public String toString() {
- return "character="+character+",fontIndex="+fontIndex;
- }
+ public int compareTo(Object obj) {
+ FormatRun r = (FormatRun)obj;
+ if ((character == r.character) && (fontIndex == r.fontIndex))
+ return 0;
+ if (character == r.character)
+ return fontIndex - r.fontIndex;
+ else return character - r.character;
+ }
+
+ public String toString() {
+ return "character="+character+",fontIndex="+fontIndex;
+ }
+
+ public void serialize(LittleEndianOutput out) {
+ out.writeShort(character);
+ out.writeShort(fontIndex);
+ }
}
private UnicodeString() {
*/
public boolean equals(Object o)
{
- if ((o == null) || (o.getClass() != this.getClass()))
- {
+ if (!(o instanceof UnicodeString)) {
return false;
}
- UnicodeString other = ( UnicodeString ) o;
+ UnicodeString other = (UnicodeString) o;
- //Ok lets do this in stages to return a quickly, first check the actual string
+ //OK lets do this in stages to return a quickly, first check the actual string
boolean eq = ((field_1_charCount == other.field_1_charCount)
&& (field_2_optionflags == other.field_2_optionflags)
&& field_3_string.equals(other.field_3_string));
if (!run1.equals(run2))
return false;
- }
+ }
//Well the format runs are equal as well!, better check the ExtRst data
//Which by the way we dont know how to decode!
boolean isCompressed = ((field_2_optionflags & 1) == 0);
if (isCompressed) {
- field_3_string = in.readCompressedUnicode(field_1_charCount);
+ field_3_string = in.readCompressedUnicode(field_1_charCount);
} else {
- field_3_string = in.readUnicodeLEString(field_1_charCount);
+ field_3_string = in.readUnicodeLEString(field_1_charCount);
}
if (isRichText() && (runCount > 0)) {
field_4_format_runs = new ArrayList(runCount);
for (int i=0;i<runCount;i++) {
- field_4_format_runs.add(new FormatRun(in.readShort(), in.readShort()));
- //read reserved
- //in.readInt();
- }
+ field_4_format_runs.add(new FormatRun(in));
+ }
}
if (isExtendedText() && (extensionLength > 0)) {
field_2_optionflags = richText.clearByte(field_2_optionflags);
}
- public byte[] getExtendedRst() {
- return this.field_5_ext_rst;
- }
- public void setExtendedRst(byte[] ext_rst) {
+ void setExtendedRst(byte[] ext_rst) {
if (ext_rst != null)
field_2_optionflags = extBit.setByte(field_2_optionflags);
else field_2_optionflags = extBit.clearByte(field_2_optionflags);
* removed / re-ordered
*/
public void swapFontUse(short oldFontIndex, short newFontIndex) {
- Iterator i = field_4_format_runs.iterator();
- while(i.hasNext()) {
- FormatRun run = (FormatRun)i.next();
- if(run.fontIndex == oldFontIndex) {
- run.fontIndex = newFontIndex;
- }
- }
+ Iterator i = field_4_format_runs.iterator();
+ while(i.hasNext()) {
+ FormatRun run = (FormatRun)i.next();
+ if(run.fontIndex == oldFontIndex) {
+ run.fontIndex = newFontIndex;
+ }
+ }
}
/**
return buffer.toString();
}
- private int writeContinueIfRequired(UnicodeRecordStats stats, final int requiredSize, int offset, byte[] data) {
- //Basic string overhead
- if (stats.remainingSize < requiredSize) {
- //Check if be are already in a continue record, if so make sure that
- //we go back and write out our length
- if (stats.lastLengthPos != -1) {
- short lastRecordLength = (short)(offset - stats.lastLengthPos - 2);
- if (lastRecordLength > 8224)
- throw new InternalError();
- LittleEndian.putShort(data, stats.lastLengthPos, lastRecordLength);
- }
-
- LittleEndian.putShort(data, offset, ContinueRecord.sid);
- offset+=2;
- //Record the location of the last continue length position, but don't write
- //anything there yet (since we don't know what it will be!)
- stats.lastLengthPos = offset;
- offset += 2;
-
- stats.recordSize += 4;
- stats.remainingSize = SSTRecord.MAX_RECORD_SIZE-4;
- }
- return offset;
- }
-
- public int serialize(UnicodeRecordStats stats, final int offset, byte [] data)
- {
- int pos = offset;
-
- //Basic string overhead
- pos = writeContinueIfRequired(stats, 3, pos, data);
- LittleEndian.putShort(data, pos, getCharCount());
- pos += 2;
- data[ pos ] = getOptionFlags();
- pos += 1;
- stats.recordSize += 3;
- stats.remainingSize-= 3;
-
- if (isRichText()) {
- if (field_4_format_runs != null) {
- pos = writeContinueIfRequired(stats, 2, pos, data);
-
- LittleEndian.putShort(data, pos, (short) field_4_format_runs.size());
- pos += 2;
- stats.recordSize += 2;
- stats.remainingSize -= 2;
- }
- }
- if ( isExtendedText() )
- {
- if (this.field_5_ext_rst != null) {
- pos = writeContinueIfRequired(stats, 4, pos, data);
-
- LittleEndian.putInt(data, pos, field_5_ext_rst.length);
- pos += 4;
- stats.recordSize += 4;
- stats.remainingSize -= 4;
- }
- }
-
- int charsize = isUncompressedUnicode() ? 2 : 1;
- int strSize = (getString().length() * charsize);
-
- byte[] strBytes = null;
- try {
- String unicodeString = getString();
- if (!isUncompressedUnicode())
- {
- strBytes = unicodeString.getBytes("ISO-8859-1");
- }
- else
- {
- strBytes = unicodeString.getBytes("UTF-16LE");
- }
- }
- catch (Exception e) {
- throw new InternalError();
- }
- if (strSize != strBytes.length)
- throw new InternalError("That shouldnt have happened!");
-
- //Check to see if the offset occurs mid string, if so then we need to add
- //the byte to start with that represents the first byte of the continue record.
- if (strSize > stats.remainingSize) {
- //OK the offset occurs half way through the string, that means that
- //we need an extra byte after the continue record ie we didnt finish
- //writing out the string the 1st time through
-
- //But hang on, how many continue records did we span? What if this is
- //a REALLY long string. We need to work this all out.
- int amountThatCantFit = strSize;
- int strPos = 0;
- while (amountThatCantFit > 0) {
- int amountWritten = Math.min(stats.remainingSize, amountThatCantFit);
- //Make sure that the amount that can't fit takes into account
- //whether we are writing double byte unicode
- if (isUncompressedUnicode()) {
- //We have the '-1' here because whether this is the first record or
- //subsequent continue records, there is always the case that the
- //number of bytes in a string on double byte boundaries is actually odd.
- if ( ( (amountWritten ) % 2) == 1)
- amountWritten--;
- }
- System.arraycopy(strBytes, strPos, data, pos, amountWritten);
- pos += amountWritten;
- strPos += amountWritten;
- stats.recordSize += amountWritten;
- stats.remainingSize -= amountWritten;
-
- //Ok lets subtract what we can write
- amountThatCantFit -= amountWritten;
-
- //Each iteration of this while loop is another continue record, unless
- //everything now fits.
- if (amountThatCantFit > 0) {
- //We know that a continue WILL be requied, but use this common method
- pos = writeContinueIfRequired(stats, amountThatCantFit, pos, data);
-
- //The first byte after a continue mid string is the extra byte to
- //indicate if this run is compressed or not.
- data[pos] = (byte) (isUncompressedUnicode() ? 0x1 : 0x0);
- pos++;
- stats.recordSize++;
- stats.remainingSize --;
- }
- }
- } else {
- if (strSize > (data.length-pos))
- System.out.println("Hmm shouldnt happen");
- //Ok the string fits nicely in the remaining size
- System.arraycopy(strBytes, 0, data, pos, strSize);
- pos += strSize;
- stats.recordSize += strSize;
- stats.remainingSize -= strSize;
- }
-
-
- if (isRichText() && (field_4_format_runs != null)) {
- int count = field_4_format_runs.size();
-
- //This will ensure that a run does not split a continue
- for (int i=0;i<count;i++) {
- pos = writeContinueIfRequired(stats, 4, pos, data);
- FormatRun r = (FormatRun)field_4_format_runs.get(i);
- LittleEndian.putShort(data, pos, r.character);
- pos += 2;
- LittleEndian.putShort(data, pos, r.fontIndex);
- pos += 2;
-
- //Each run count is four bytes
- stats.recordSize += 4;
- stats.remainingSize -=4;
+ public void serialize(ContinuableRecordOutput out) {
+ int numberOfRichTextRuns = 0;
+ int extendedDataSize = 0;
+ if (isRichText() && field_4_format_runs != null) {
+ numberOfRichTextRuns = field_4_format_runs.size();
}
- }
-
- if (isExtendedText() && (field_5_ext_rst != null)) {
- //Ok ExtRst is actually not documented, so i am going to hope
- //that we can actually continue on byte boundaries
- int ammountThatCantFit = field_5_ext_rst.length - stats.remainingSize;
- int extPos = 0;
- if (ammountThatCantFit > 0) {
- while (ammountThatCantFit > 0) {
- //So for this record we have already written
- int ammountWritten = Math.min(stats.remainingSize, ammountThatCantFit);
- System.arraycopy(field_5_ext_rst, extPos, data, pos, ammountWritten);
- pos += ammountWritten;
- extPos += ammountWritten;
- stats.recordSize += ammountWritten;
- stats.remainingSize -= ammountWritten;
-
- //Ok lets subtract what we can write
- ammountThatCantFit -= ammountWritten;
- if (ammountThatCantFit > 0) {
- pos = writeContinueIfRequired(stats, 1, pos, data);
- }
- }
- } else {
- //We can fit wholey in what remains.
- System.arraycopy(field_5_ext_rst, 0, data, pos, field_5_ext_rst.length);
- pos += field_5_ext_rst.length;
- stats.remainingSize -= field_5_ext_rst.length;
- stats.recordSize += field_5_ext_rst.length;
+ if (isExtendedText() && field_5_ext_rst != null) {
+ extendedDataSize = field_5_ext_rst.length;
}
- }
-
- return pos - offset;
- }
-
-
- public void setCompressedUnicode() {
- field_2_optionflags = highByte.setByte(field_2_optionflags);
- }
-
- public void setUncompressedUnicode() {
- field_2_optionflags = highByte.clearByte(field_2_optionflags);
- }
-
- private boolean isUncompressedUnicode()
- {
- return highByte.isSet(getOptionFlags());
- }
-
- /** Returns the size of this record, given the amount of record space
- * remaining, it will also include the size of writing a continue record.
- */
-
- public static class UnicodeRecordStats {
- public int recordSize;
- public int remainingSize = SSTRecord.MAX_RECORD_SIZE;
- public int lastLengthPos = -1;
- }
- public void getRecordSize(UnicodeRecordStats stats) {
- //Basic string overhead
- if (stats.remainingSize < 3) {
- //Needs a continue
- stats.recordSize += 4;
- stats.remainingSize = SSTRecord.MAX_RECORD_SIZE-4;
- }
- stats.recordSize += 3;
- stats.remainingSize-= 3;
-
- //Read the number of rich runs if rich text.
- if ( isRichText() )
- {
- //Run count
- if (stats.remainingSize < 2) {
- //Needs a continue
- //Reset the available space.
- stats.remainingSize = SSTRecord.MAX_RECORD_SIZE-4;
- //continue record overhead
- stats.recordSize+=4;
- }
-
- stats.recordSize += 2;
- stats.remainingSize -=2;
- }
- //Read the size of extended data if present.
- if ( isExtendedText() )
- {
- //Needs a continue
- //extension length
- if (stats.remainingSize < 4) {
- //Reset the available space.
- stats.remainingSize = SSTRecord.MAX_RECORD_SIZE-4;
- //continue record overhead
- stats.recordSize+=4;
- }
-
- stats.recordSize += 4;
- stats.remainingSize -=4;
- }
-
- int charsize = isUncompressedUnicode() ? 2 : 1;
- int strSize = (getString().length() * charsize);
- //Check to see if the offset occurs mid string, if so then we need to add
- //the byte to start with that represents the first byte of the continue record.
- if (strSize > stats.remainingSize) {
- //Ok the offset occurs half way through the string, that means that
- //we need an extra byte after the continue record ie we didnt finish
- //writing out the string the 1st time through
-
- //But hang on, how many continue records did we span? What if this is
- //a REALLY long string. We need to work this all out.
- int ammountThatCantFit = strSize;
- while (ammountThatCantFit > 0) {
- int ammountWritten = Math.min(stats.remainingSize, ammountThatCantFit);
- //Make sure that the ammount that cant fit takes into account
- //whether we are writing double byte unicode
- if (isUncompressedUnicode()) {
- //We have the '-1' here because whether this is the first record or
- //subsequent continue records, there is always the case that the
- //number of bytes in a string on doube byte boundaries is actually odd.
- if ( ( (ammountWritten) % 2) == 1)
- ammountWritten--;
- }
- stats.recordSize += ammountWritten;
- stats.remainingSize -= ammountWritten;
-
- //Ok lets subtract what we can write
- ammountThatCantFit -= ammountWritten;
-
- //Each iteration of this while loop is another continue record, unless
- //everything now fits.
- if (ammountThatCantFit > 0) {
- //Reset the available space.
- stats.remainingSize = SSTRecord.MAX_RECORD_SIZE-4;
- //continue record overhead
- stats.recordSize+=4;
-
- //The first byte after a continue mid string is the extra byte to
- //indicate if this run is compressed or not.
- stats.recordSize++;
- stats.remainingSize --;
- }
- }
- } else {
- //Ok the string fits nicely in the remaining size
- stats.recordSize += strSize;
- stats.remainingSize -= strSize;
- }
+
+ out.writeString(field_3_string, numberOfRichTextRuns, extendedDataSize);
- if (isRichText() && (field_4_format_runs != null)) {
- int count = field_4_format_runs.size();
+ if (numberOfRichTextRuns > 0) {
- //This will ensure that a run does not split a continue
- for (int i=0;i<count;i++) {
- if (stats.remainingSize < 4) {
- //Reset the available space.
- stats.remainingSize = SSTRecord.MAX_RECORD_SIZE-4;
- //continue record overhead
- stats.recordSize+=4;
+ //This will ensure that a run does not split a continue
+ for (int i=0;i<numberOfRichTextRuns;i++) {
+ if (out.getAvailableSpace() < 4) {
+ out.writeContinue();
+ }
+ FormatRun r = (FormatRun)field_4_format_runs.get(i);
+ r.serialize(out);
}
-
- //Each run count is four bytes
- stats.recordSize += 4;
- stats.remainingSize -=4;
}
- }
- if (isExtendedText() && (field_5_ext_rst != null)) {
- //Ok ExtRst is actually not documented, so i am going to hope
- //that we can actually continue on byte boundaries
- int ammountThatCantFit = field_5_ext_rst.length - stats.remainingSize;
- if (ammountThatCantFit > 0) {
- while (ammountThatCantFit > 0) {
- //So for this record we have already written
- int ammountWritten = Math.min(stats.remainingSize, ammountThatCantFit);
- stats.recordSize += ammountWritten;
- stats.remainingSize -= ammountWritten;
-
- //Ok lets subtract what we can write
- ammountThatCantFit -= ammountWritten;
- if (ammountThatCantFit > 0) {
- //Each iteration of this while loop is another continue record.
-
- //Reset the available space.
- stats.remainingSize = SSTRecord.MAX_RECORD_SIZE-4;
- //continue record overhead
- stats.recordSize += 4;
+ if (extendedDataSize > 0) {
+ // OK ExtRst is actually not documented, so i am going to hope
+ // that we can actually continue on byte boundaries
+
+ int extPos = 0;
+ while (true) {
+ int nBytesToWrite = Math.min(extendedDataSize - extPos, out.getAvailableSpace());
+ out.write(field_5_ext_rst, extPos, nBytesToWrite);
+ extPos += nBytesToWrite;
+ if (extPos >= extendedDataSize) {
+ break;
+ }
+ out.writeContinue();
}
- }
- } else {
- //We can fit wholey in what remains.
- stats.remainingSize -= field_5_ext_rst.length;
- stats.recordSize += field_5_ext_rst.length;
}
- }
}
public int compareTo(Object obj)
if (result != 0)
return result;
- //Ok string appears to be equal but now lets compare formatting runs
+ //OK string appears to be equal but now lets compare formatting runs
if ((field_4_format_runs == null) && (str.field_4_format_runs == null))
- //Strings are equal, and there are no formtting runs.
+ //Strings are equal, and there are no formatting runs.
return 0;
if ((field_4_format_runs == null) && (str.field_4_format_runs != null))
return 0;
}
- public boolean isRichText()
+ private boolean isRichText()
{
return richText.isSet(getOptionFlags());
}
- public boolean isExtendedText()
+ private boolean isExtendedText()
{
return extBit.isSet(getOptionFlags());
}
str.field_5_ext_rst = new byte[field_5_ext_rst.length];
System.arraycopy(field_5_ext_rst, 0, str.field_5_ext_rst, 0,
field_5_ext_rst.length);
- }
+ }
return str;
}
-
-
}
package org.apache.poi.hssf.record.constant;
-import org.apache.poi.hssf.record.UnicodeString;
-import org.apache.poi.hssf.record.UnicodeString.UnicodeRecordStats;
import org.apache.poi.util.LittleEndianInput;
import org.apache.poi.util.LittleEndianOutput;
import org.apache.poi.util.StringUtil;
case TYPE_NUMBER:
return new Double(in.readDouble());
case TYPE_STRING:
- return new UnicodeString(StringUtil.readUnicodeString(in));
+ return StringUtil.readUnicodeString(in);
case TYPE_BOOLEAN:
return readBoolean(in);
case TYPE_ERROR_CODE:
if(cls == Boolean.class || cls == Double.class || cls == ErrorConstant.class) {
return 8;
}
- UnicodeString strVal = (UnicodeString)object;
- UnicodeRecordStats urs = new UnicodeRecordStats();
- strVal.getRecordSize(urs);
- return urs.recordSize;
+ String strVal = (String)object;
+ return StringUtil.getEncodedSize(strVal);
}
public static void encode(LittleEndianOutput out, Object[] values) {
out.writeDouble(dVal.doubleValue());
return;
}
- if (value instanceof UnicodeString) {
- UnicodeString usVal = (UnicodeString) value;
+ if (value instanceof String) {
+ String val = (String) value;
out.writeByte(TYPE_STRING);
- StringUtil.writeUnicodeString(out, usVal.getString());
+ StringUtil.writeUnicodeString(out, val);
return;
}
if (value instanceof ErrorConstant) {
--- /dev/null
+/* ====================================================================\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements. See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to You under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License. You may obtain a copy of the License at\r
+\r
+ http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+==================================================================== */\r
+\r
+package org.apache.poi.hssf.record.cont;\r
+\r
+import org.apache.poi.hssf.record.ContinueRecord;\r
+import org.apache.poi.hssf.record.Record;\r
+import org.apache.poi.util.LittleEndianByteArrayOutputStream;\r
+import org.apache.poi.util.LittleEndianOutput;\r
+\r
+/**\r
+ * Common superclass of all records that can produce {@link ContinueRecord}s while being serialized.\r
+ * \r
+ * @author Josh Micich\r
+ */\r
+public abstract class ContinuableRecord extends Record {\r
+\r
+ protected ContinuableRecord() {\r
+ // no fields to initialise \r
+ }\r
+ /**\r
+ * Serializes this record's content to the supplied data output.<br/>\r
+ * The standard BIFF header (ushort sid, ushort size) has been handled by the superclass, so \r
+ * only BIFF data should be written by this method. Simple data types can be written with the\r
+ * standard {@link LittleEndianOutput} methods. Methods from {@link ContinuableRecordOutput} \r
+ * can be used to serialize strings (with {@link ContinueRecord}s being written as required).\r
+ * If necessary, implementors can explicitly start {@link ContinueRecord}s (regardless of the\r
+ * amount of remaining space).\r
+ * \r
+ * @param out a data output stream\r
+ */\r
+ protected abstract void serialize(ContinuableRecordOutput out);\r
+\r
+\r
+ /**\r
+ * @return four less than the total length of the encoded record(s) \r
+ * (in the case when no {@link ContinueRecord} is needed, this is the \r
+ * same ushort value that gets encoded after the record sid\r
+ */\r
+ protected final int getDataSize() {\r
+ ContinuableRecordOutput out = ContinuableRecordOutput.createForCountingOnly();\r
+ serialize(out);\r
+ out.terminate();\r
+ return out.getTotalSize() - 4;\r
+ }\r
+\r
+ public final int serialize(int offset, byte[] data) {\r
+\r
+ LittleEndianOutput leo = new LittleEndianByteArrayOutputStream(data, offset);\r
+ ContinuableRecordOutput out = new ContinuableRecordOutput(leo, getSid());\r
+ serialize(out);\r
+ out.terminate();\r
+ return out.getTotalSize();\r
+ }\r
+}\r
--- /dev/null
+/* ====================================================================\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements. See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to You under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License. You may obtain a copy of the License at\r
+\r
+ http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+==================================================================== */\r
+\r
+package org.apache.poi.hssf.record.cont;\r
+\r
+import org.apache.poi.hssf.record.ContinueRecord;\r
+import org.apache.poi.util.DelayableLittleEndianOutput;\r
+import org.apache.poi.util.LittleEndianOutput;\r
+import org.apache.poi.util.StringUtil;\r
+\r
+/**\r
+ * An augmented {@link LittleEndianOutput} used for serialization of {@link ContinuableRecord}s.\r
+ * This class keeps track of how much remaining space is available in the current BIFF record and\r
+ * can start new {@link ContinueRecord}s as required. \r
+ * \r
+ * @author Josh Micich\r
+ */\r
+public final class ContinuableRecordOutput implements LittleEndianOutput {\r
+ \r
+ private final LittleEndianOutput _out;\r
+ private UnknownLengthRecordOutput _ulrOutput;\r
+ private int _totalPreviousRecordsSize;\r
+\r
+ ContinuableRecordOutput(LittleEndianOutput out, int sid) {\r
+ _ulrOutput = new UnknownLengthRecordOutput(out, sid);\r
+ _out = out;\r
+ _totalPreviousRecordsSize = 0;\r
+ }\r
+ \r
+ public static ContinuableRecordOutput createForCountingOnly() {\r
+ return new ContinuableRecordOutput(NOPOutput, -777); // fake sid\r
+ }\r
+\r
+ /**\r
+ * @return total number of bytes written so far (including all BIFF headers) \r
+ */\r
+ public int getTotalSize() {\r
+ return _totalPreviousRecordsSize + _ulrOutput.getTotalSize();\r
+ }\r
+ /**\r
+ * Terminates the last record (also updates its 'ushort size' field)\r
+ */\r
+ void terminate() {\r
+ _ulrOutput.terminate();\r
+ }\r
+ /**\r
+ * @return number of remaining bytes of space in current record\r
+ */\r
+ public int getAvailableSpace() {\r
+ return _ulrOutput.getAvailableSpace();\r
+ }\r
+ \r
+ /**\r
+ * Terminates the current record and starts a new {@link ContinueRecord} (regardless\r
+ * of how much space is still available in the current record).\r
+ */\r
+ public void writeContinue() {\r
+ _ulrOutput.terminate();\r
+ _totalPreviousRecordsSize += _ulrOutput.getTotalSize();\r
+ _ulrOutput = new UnknownLengthRecordOutput(_out, ContinueRecord.sid);\r
+ }\r
+ public void writeContinueIfRequired(int requiredContinuousSize) {\r
+ if (_ulrOutput.getAvailableSpace() < requiredContinuousSize) {\r
+ writeContinue();\r
+ }\r
+ }\r
+\r
+ /**\r
+ * Writes the 'optionFlags' byte and encoded character data of a unicode string. This includes:\r
+ * <ul>\r
+ * <li>byte optionFlags</li>\r
+ * <li>encoded character data (in "ISO-8859-1" or "UTF-16LE" encoding)</li>\r
+ * </ul>\r
+ * \r
+ * Notes:\r
+ * <ul>\r
+ * <li>The value of the 'is16bitEncoded' flag is determined by the actual character data \r
+ * of <tt>text</tt></li>\r
+ * <li>The string options flag is never separated (by a {@link ContinueRecord}) from the\r
+ * first chunk of character data it refers to.</li>\r
+ * <li>The 'ushort length' field is assumed to have been explicitly written earlier. Hence, \r
+ * there may be an intervening {@link ContinueRecord}</li>\r
+ * </ul>\r
+ */\r
+ public void writeStringData(String text) {\r
+ boolean is16bitEncoded = StringUtil.hasMultibyte(text);\r
+ // calculate total size of the header and first encoded char\r
+ int keepTogetherSize = 1 + 1; // ushort len, at least one character byte\r
+ int optionFlags = 0x00;\r
+ if (is16bitEncoded) {\r
+ optionFlags |= 0x01;\r
+ keepTogetherSize += 1; // one extra byte for first char\r
+ }\r
+ writeContinueIfRequired(keepTogetherSize);\r
+ writeByte(optionFlags);\r
+ writeCharacterData(text, is16bitEncoded);\r
+ }\r
+ /**\r
+ * Writes a unicode string complete with header and character data. This includes:\r
+ * <ul>\r
+ * <li>ushort length</li>\r
+ * <li>byte optionFlags</li>\r
+ * <li>ushort numberOfRichTextRuns (optional)</li>\r
+ * <li>ushort extendedDataSize (optional)</li>\r
+ * <li>encoded character data (in "ISO-8859-1" or "UTF-16LE" encoding)</li>\r
+ * </ul>\r
+ * \r
+ * The following bits of the 'optionFlags' byte will be set as appropriate:\r
+ * <table border='1'>\r
+ * <tr><th>Mask</th><th>Description</th></tr>\r
+ * <tr><td>0x01</td><td>is16bitEncoded</td></tr>\r
+ * <tr><td>0x04</td><td>hasExtendedData</td></tr>\r
+ * <tr><td>0x08</td><td>isRichText</td></tr>\r
+ * </table>\r
+ * Notes:\r
+ * <ul> \r
+ * <li>The value of the 'is16bitEncoded' flag is determined by the actual character data \r
+ * of <tt>text</tt></li>\r
+ * <li>The string header fields are never separated (by a {@link ContinueRecord}) from the\r
+ * first chunk of character data (i.e. the first character is always encoded in the same\r
+ * record as the string header).</li>\r
+ * </ul>\r
+ */\r
+ public void writeString(String text, int numberOfRichTextRuns, int extendedDataSize) {\r
+ boolean is16bitEncoded = StringUtil.hasMultibyte(text);\r
+ // calculate total size of the header and first encoded char\r
+ int keepTogetherSize = 2 + 1 + 1; // ushort len, byte optionFlags, at least one character byte\r
+ int optionFlags = 0x00;\r
+ if (is16bitEncoded) {\r
+ optionFlags |= 0x01;\r
+ keepTogetherSize += 1; // one extra byte for first char\r
+ }\r
+ if (numberOfRichTextRuns > 0) {\r
+ optionFlags |= 0x08;\r
+ keepTogetherSize += 2;\r
+ }\r
+ if (extendedDataSize > 0) {\r
+ optionFlags |= 0x04;\r
+ keepTogetherSize += 4;\r
+ }\r
+ writeContinueIfRequired(keepTogetherSize);\r
+ writeShort(text.length());\r
+ writeByte(optionFlags);\r
+ if (numberOfRichTextRuns > 0) {\r
+ writeShort(numberOfRichTextRuns);\r
+ }\r
+ if (extendedDataSize > 0) {\r
+ writeInt(extendedDataSize);\r
+ }\r
+ writeCharacterData(text, is16bitEncoded);\r
+ }\r
+\r
+\r
+ private void writeCharacterData(String text, boolean is16bitEncoded) {\r
+ int nChars = text.length();\r
+ int i=0;\r
+ if (is16bitEncoded) {\r
+ while(true) {\r
+ int nWritableChars = Math.min(nChars-i, _ulrOutput.getAvailableSpace() / 2);\r
+ for ( ; nWritableChars > 0; nWritableChars--) {\r
+ _ulrOutput.writeShort(text.charAt(i++));\r
+ }\r
+ if (i >= nChars) {\r
+ break;\r
+ }\r
+ writeContinue();\r
+ writeByte(0x01); \r
+ }\r
+ } else {\r
+ while(true) {\r
+ int nWritableChars = Math.min(nChars-i, _ulrOutput.getAvailableSpace() / 1);\r
+ for ( ; nWritableChars > 0; nWritableChars--) {\r
+ _ulrOutput.writeByte(text.charAt(i++));\r
+ }\r
+ if (i >= nChars) {\r
+ break;\r
+ }\r
+ writeContinue();\r
+ writeByte(0x00); \r
+ }\r
+ }\r
+ }\r
+\r
+ public void write(byte[] b) {\r
+ writeContinueIfRequired(b.length);\r
+ _ulrOutput.write(b);\r
+ }\r
+ public void write(byte[] b, int offset, int len) {\r
+ writeContinueIfRequired(len);\r
+ _ulrOutput.write(b, offset, len);\r
+ }\r
+ public void writeByte(int v) {\r
+ writeContinueIfRequired(1);\r
+ _ulrOutput.writeByte(v);\r
+ }\r
+ public void writeDouble(double v) {\r
+ writeContinueIfRequired(8);\r
+ _ulrOutput.writeDouble(v);\r
+ }\r
+ public void writeInt(int v) {\r
+ writeContinueIfRequired(4);\r
+ _ulrOutput.writeInt(v);\r
+ }\r
+ public void writeLong(long v) {\r
+ writeContinueIfRequired(8);\r
+ _ulrOutput.writeLong(v);\r
+ }\r
+ public void writeShort(int v) {\r
+ writeContinueIfRequired(2);\r
+ _ulrOutput.writeShort(v);\r
+ }\r
+ \r
+ /**\r
+ * Allows optimised usage of {@link ContinuableRecordOutput} for sizing purposes only.\r
+ */\r
+ private static final LittleEndianOutput NOPOutput = new DelayableLittleEndianOutput() {\r
+\r
+ public LittleEndianOutput createDelayedOutput(int size) {\r
+ return this;\r
+ }\r
+ public void write(byte[] b) {\r
+ // does nothing\r
+ }\r
+ public void write(byte[] b, int offset, int len) {\r
+ // does nothing\r
+ }\r
+ public void writeByte(int v) {\r
+ // does nothing\r
+ }\r
+ public void writeDouble(double v) {\r
+ // does nothing\r
+ }\r
+ public void writeInt(int v) {\r
+ // does nothing\r
+ }\r
+ public void writeLong(long v) {\r
+ // does nothing\r
+ }\r
+ public void writeShort(int v) {\r
+ // does nothing\r
+ }\r
+ };\r
+}\r
--- /dev/null
+/* ====================================================================\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements. See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to You under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License. You may obtain a copy of the License at\r
+\r
+ http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+==================================================================== */\r
+\r
+package org.apache.poi.hssf.record.cont;\r
+\r
+import org.apache.poi.hssf.record.RecordInputStream;\r
+import org.apache.poi.util.DelayableLittleEndianOutput;\r
+import org.apache.poi.util.LittleEndianByteArrayOutputStream;\r
+import org.apache.poi.util.LittleEndianOutput;\r
+/**\r
+ * Allows the writing of BIFF records when the 'ushort size' header field is not known in advance.\r
+ * When the client is finished writing data, it calls {@link #terminate()}, at which point this \r
+ * class updates the 'ushort size' with its final value. \r
+ * \r
+ * @author Josh Micich\r
+ */\r
+final class UnknownLengthRecordOutput implements LittleEndianOutput {\r
+ private static final int MAX_DATA_SIZE = RecordInputStream.MAX_RECORD_DATA_SIZE;\r
+\r
+ private final LittleEndianOutput _originalOut;\r
+ /** for writing the 'ushort size' field once its value is known */\r
+ private final LittleEndianOutput _dataSizeOutput;\r
+ private final byte[] _byteBuffer;\r
+ private LittleEndianOutput _out;\r
+ private int _size;\r
+ \r
+ public UnknownLengthRecordOutput(LittleEndianOutput out, int sid) {\r
+ _originalOut = out;\r
+ out.writeShort(sid);\r
+ if (out instanceof DelayableLittleEndianOutput) {\r
+ // optimisation\r
+ DelayableLittleEndianOutput dleo = (DelayableLittleEndianOutput) out;\r
+ _dataSizeOutput = dleo.createDelayedOutput(2);\r
+ _byteBuffer = null;\r
+ _out = out;\r
+ } else {\r
+ // otherwise temporarily write all subsequent data to a buffer\r
+ _dataSizeOutput = out;\r
+ _byteBuffer = new byte[RecordInputStream.MAX_RECORD_DATA_SIZE];\r
+ _out = new LittleEndianByteArrayOutputStream(_byteBuffer, 0);\r
+ }\r
+ }\r
+ /**\r
+ * includes 4 byte header\r
+ */\r
+ public int getTotalSize() {\r
+ return 4 + _size;\r
+ }\r
+ public int getAvailableSpace() {\r
+ if (_out == null) {\r
+ throw new IllegalStateException("Record already terminated");\r
+ }\r
+ return MAX_DATA_SIZE - _size;\r
+ }\r
+ /**\r
+ * Finishes writing the current record and updates 'ushort size' field.<br/>\r
+ * After this method is called, only {@link #getTotalSize()} may be called.\r
+ */\r
+ public void terminate() {\r
+ if (_out == null) {\r
+ throw new IllegalStateException("Record already terminated");\r
+ }\r
+ _dataSizeOutput.writeShort(_size);\r
+ if (_byteBuffer != null) {\r
+ _originalOut.write(_byteBuffer, 0, _size);\r
+ _out = null;\r
+ return;\r
+ }\r
+ _out = null;\r
+ }\r
+ \r
+ public void write(byte[] b) {\r
+ _out.write(b);\r
+ _size += b.length;\r
+ }\r
+ public void write(byte[] b, int offset, int len) {\r
+ _out.write(b, offset, len);\r
+ _size += len;\r
+ }\r
+ public void writeByte(int v) {\r
+ _out.writeByte(v);\r
+ _size += 1;\r
+ }\r
+ public void writeDouble(double v) {\r
+ _out.writeDouble(v);\r
+ _size += 8;\r
+ }\r
+ public void writeInt(int v) {\r
+ _out.writeInt(v);\r
+ _size += 4;\r
+ }\r
+ public void writeLong(long v) {\r
+ _out.writeLong(v);\r
+ _size += 8;\r
+ }\r
+ public void writeShort(int v) {\r
+ _out.writeShort(v);\r
+ _size += 2;\r
+ }\r
+}\r
if (o == null) {
throw new RuntimeException("Array item cannot be null");
}
- if (o instanceof UnicodeString) {
- return "\"" + ((UnicodeString)o).getString() + "\"";
+ if (o instanceof String) {
+ return "\"" + (String)o + "\"";
}
if (o instanceof Double) {
return ((Double)o).toString();
import org.apache.poi.hssf.record.ObjRecord;
import org.apache.poi.hssf.record.Record;
import org.apache.poi.hssf.record.RecordBase;
-import org.apache.poi.hssf.record.StringRecord;
import org.apache.poi.hssf.record.SubRecord;
import org.apache.poi.hssf.record.TextObjectRecord;
import org.apache.poi.hssf.record.UnicodeString;
}
public int getColumnIndex() {
- return record.getColumn() & 0xFFFF;
+ return record.getColumn() & 0xFFFF;
}
/**
break;
case CELL_TYPE_STRING :
- LabelSSTRecord lrec = null;
+ LabelSSTRecord lrec;
- if (cellType != this.cellType)
- {
+ if (cellType == this.cellType) {
+ lrec = (LabelSSTRecord) record;
+ } else {
lrec = new LabelSSTRecord();
+ lrec.setColumn(col);
+ lrec.setRow(row);
+ lrec.setXFIndex(styleIndex);
}
- else
- {
- lrec = ( LabelSSTRecord ) record;
- }
- lrec.setColumn(col);
- lrec.setRow(row);
- lrec.setXFIndex(styleIndex);
- if (setValue)
- {
- if ((getStringCellValue() != null)
- && (!getStringCellValue().equals("")))
- {
- int sst = 0;
-
- UnicodeString str = getRichStringCellValue().getUnicodeString();
-//jmh if (encoding == ENCODING_COMPRESSED_UNICODE)
-//jmh {
-// jmh str.setCompressedUnicode();
-// jmh } else if (encoding == ENCODING_UTF_16)
-// jmh {
-// jmh str.setUncompressedUnicode();
-// jmh }
- sst = book.getWorkbook().addSSTString(str);
- lrec.setSSTIndex(sst);
- getRichStringCellValue().setUnicodeString(book.getWorkbook().getSSTString(sst));
- }
+ if (setValue) {
+ String str = convertCellValueToString();
+ int sstIndex = book.getWorkbook().addSSTString(new UnicodeString(str));
+ lrec.setSSTIndex(sstIndex);
+ UnicodeString us = book.getWorkbook().getSSTString(sstIndex);
+ stringValue = new HSSFRichTextString();
+ stringValue.setUnicodeString(us);
}
record = lrec;
break;
case CELL_TYPE_BOOLEAN:
return (( BoolErrRecord ) record).getBooleanValue();
case CELL_TYPE_STRING:
- return Boolean.valueOf(((StringRecord)record).getString()).booleanValue();
+ int sstIndex = ((LabelSSTRecord)record).getSSTIndex();
+ String text = book.getWorkbook().getSSTString(sstIndex).getString();
+ return Boolean.valueOf(text).booleanValue();
case CELL_TYPE_NUMERIC:
return ((NumberRecord)record).getValue() != 0;
}
throw new RuntimeException("Unexpected cell type (" + cellType + ")");
}
+ private String convertCellValueToString() {
+
+ switch (cellType) {
+ case CELL_TYPE_BLANK:
+ return "";
+ case CELL_TYPE_BOOLEAN:
+ return ((BoolErrRecord) record).getBooleanValue() ? "TRUE" : "FALSE";
+ case CELL_TYPE_STRING:
+ int sstIndex = ((LabelSSTRecord)record).getSSTIndex();
+ return book.getWorkbook().getSSTString(sstIndex).getString();
+ case CELL_TYPE_NUMERIC:
+ return String.valueOf(((NumberRecord)record).getValue());
+ case CELL_TYPE_ERROR:
+ return HSSFErrorConstants.getText(((BoolErrRecord) record).getErrorValue());
+ case CELL_TYPE_FORMULA:
+ // should really evaluate, but HSSFCell can't call HSSFFormulaEvaluator
+ return "";
+ }
+ throw new RuntimeException("Unexpected cell type (" + cellType + ")");
+ }
/**
* get the value of the cell as a boolean. For strings, numbers, and errors, we throw an exception.
--- /dev/null
+/* ====================================================================\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements. See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to You under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License. You may obtain a copy of the License at\r
+\r
+ http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+==================================================================== */\r
+\r
+package org.apache.poi.util;\r
+/**\r
+ * Implementors of this interface allow client code to 'delay' writing to a certain section of a \r
+ * data output stream.<br/>\r
+ * A typical application is for writing BIFF records when the size is not known until well after\r
+ * the header has been written. The client code can call {@link #createDelayedOutput(int)}\r
+ * to reserve two bytes of the output for the 'ushort size' header field. The delayed output can\r
+ * be written at any stage. \r
+ * \r
+ * @author Josh Micich\r
+ */\r
+public interface DelayableLittleEndianOutput extends LittleEndianOutput {\r
+ /**\r
+ * Creates an output stream intended for outputting a sequence of <tt>size</tt> bytes.\r
+ */\r
+ LittleEndianOutput createDelayedOutput(int size);\r
+}\r
*
* @author Josh Micich
*/
-public final class LittleEndianByteArrayOutputStream implements LittleEndianOutput {
+public final class LittleEndianByteArrayOutputStream implements LittleEndianOutput, DelayableLittleEndianOutput {
private final byte[] _buf;
private final int _endIndex;
private int _writeIndex;
public int getWriteIndex() {
return _writeIndex;
}
+ public LittleEndianOutput createDelayedOutput(int size) {
+ checkPosition(size);
+ LittleEndianOutput result = new LittleEndianByteArrayOutputStream(_buf, _writeIndex, _writeIndex+size);
+ _writeIndex += size;
+ return result;
+ }
}
}
}
+ /**
+ * @return the number of bytes that would be written by {@link #writeUnicodeString(LittleEndianOutput, String)}
+ */
+ public static int getEncodedSize(String value) {
+ int result = 2 + 1;
+ result += value.length() * (StringUtil.hasMultibyte(value) ? 2 : 1);
+ return result;
+ }
+
/**
* Takes a unicode (java) string, and returns it as 8 bit data (in ISO-8859-1
* codepage).
Document doc = ppt.getDocumentRecord();
EscherContainerRecord dggContainer = doc.getPPDrawingGroup().getDggContainer();
EscherContainerRecord bstore = (EscherContainerRecord)Shape.getEscherChild(dggContainer, EscherContainerRecord.BSTORE_CONTAINER);
-
+ if(bstore == null) {
+ logger.log(POILogger.DEBUG, "EscherContainerRecord.BSTORE_CONTAINER was not found ");
+ return null;
+ }
List lst = bstore.getChildRecords();
int idx = getPictureIndex();
if (idx == 0){
+ logger.log(POILogger.DEBUG, "picture index was not found, returning ");
return null;
} else {
return (EscherBSERecord)lst.get(idx-1);
ShapePainter.paint(this, graphics);
PictureData data = getPictureData();
- data.draw(graphics, this);
+ if(data != null) data.draw(graphics, this);
graphics.setTransform(at);
}
super(parent);\r
\r
setShapeType(ShapeTypes.Rectangle);\r
- _txtrun.setRunType(TextHeaderAtom.HALF_BODY_TYPE);\r
- _txtrun.getRichTextRuns()[0].setFlag(false, 0, false);\r
+ //_txtrun.setRunType(TextHeaderAtom.HALF_BODY_TYPE);\r
+ //_txtrun.getRichTextRuns()[0].setFlag(false, 0, false);\r
}\r
\r
protected EscherContainerRecord createSpContainer(boolean isChild){\r
\r
import java.io.FileOutputStream;\r
import java.io.File;\r
+import java.io.IOException;\r
import java.awt.*;\r
+import java.awt.image.BufferedImage;\r
\r
import org.apache.poi.hslf.usermodel.SlideShow;\r
+import org.apache.poi.hslf.usermodel.PictureData;\r
import org.apache.poi.hslf.HSLFSlideShow;\r
import org.apache.poi.ddf.EscherBSERecord;\r
\r
\r
}\r
\r
+ /**\r
+ * Picture#getEscherBSERecord threw NullPointerException if EscherContainerRecord.BSTORE_CONTAINER\r
+ * was not found. The correct behaviour is to return null.\r
+ */\r
+ public void test46122() throws IOException {\r
+ SlideShow ppt = new SlideShow();\r
+ Slide slide = ppt.createSlide();\r
+\r
+ Picture pict = new Picture(-1); //index to non-existing picture data\r
+ pict.setSheet(slide);\r
+ PictureData data = pict.getPictureData();\r
+ assertNull(data);\r
+\r
+ BufferedImage img = new BufferedImage(100, 100, BufferedImage.TYPE_INT_RGB);\r
+ Graphics2D graphics = img.createGraphics();\r
+ pict.draw(graphics);\r
+\r
+ assertTrue("no errors rendering Picture with null data", true);\r
+ }\r
+\r
}\r
\r
import org.apache.poi.hslf.usermodel.SlideShow;\r
import org.apache.poi.hslf.HSLFSlideShow;\r
+import org.apache.poi.hslf.record.TextHeaderAtom;\r
\r
/**\r
* Test <code>Table</code> object.\r
Table tbl = new Table(2, 5);\r
slide.addShape(tbl);\r
\r
+ TableCell cell = tbl.getCell(0, 0);\r
+ //table cells have type=TextHeaderAtom.OTHER_TYPE, see bug #46033\r
+ assertEquals(TextHeaderAtom.OTHER_TYPE, cell.getTextRun().getRunType());\r
+\r
assertTrue(slide.getShapes()[0] instanceof Table);\r
Table tbl2 = (Table)slide.getShapes()[0];\r
assertEquals(tbl.getNumberOfColumns(), tbl2.getNumberOfColumns());\r
byte[] data = {
0, 6, 5, 0, -2, 28, -51, 7, -55, 64, 0, 0, 6, 1, 0, 0
};
- short size = 16;
Record[] record = RecordFactory.createRecord(TestcaseRecordInputStream.create(recType, data));
assertEquals(BOFRecord.class.getName(),
assertEquals(5, bofRecord.getType());
assertEquals(1536, bofRecord.getVersion());
recType = MMSRecord.sid;
- size = 2;
data = new byte[]
{
0, 0
byte[] data = {
0, 0, 0, 0, 21, 0, 0, 0, 0, 0
};
- short size = 10;
Record[] record = RecordFactory.createRecord(TestcaseRecordInputStream.create(recType, data));
assertEquals(NumberRecord.class.getName(),
*/
public void testMixedContinue() throws Exception {
/**
- * Taken from a real test sample file 39512.xls. See Bug 39512 for details.
+ * Adapted from a real test sample file 39512.xls (Offset 0x4854).
+ * See Bug 39512 for details.
*/
String dump =
//OBJ
- "5D, 00, 48, 00, 15, 00, 12, 00, 0C, 00, 3C, 00, 11, 00, A0, 2E, 03, 01, CC, 42, " +
- "CF, 00, 00, 00, 00, 00, 0A, 00, 0C, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, " +
- "03, 00, 0B, 00, 06, 00, 28, 01, 03, 01, 00, 00, 12, 00, 08, 00, 00, 00, 00, 00, " +
- "00, 00, 03, 00, 11, 00, 04, 00, 3D, 00, 00, 00, 00, 00, 00, 00, " +
+ "5D 00 48 00 15 00 12 00 0C 00 3C 00 11 00 A0 2E 03 01 CC 42 " +
+ "CF 00 00 00 00 00 0A 00 0C 00 00 00 00 00 00 00 00 00 00 00 " +
+ "03 00 0B 00 06 00 28 01 03 01 00 00 12 00 08 00 00 00 00 00 " +
+ "00 00 03 00 11 00 04 00 3D 00 00 00 00 00 00 00 " +
//MSODRAWING
- "EC, 00, 08, 00, 00, 00, 0D, F0, 00, 00, 00, 00, " +
- //TXO
- "B6, 01, 12, 00, 22, 02, 00, 00, 00, 00, 00, 00, 00, 00, 10, 00, 10, 00, 00, 00, " +
- "00, 00, 3C, 00, 21, 00, 01, 4F, 00, 70, 00, 74, 00, 69, 00, 6F, 00, 6E, 00, 20, " +
- "00, 42, 00, 75, 00, 74, 00, 74, 00, 6F, 00, 6E, 00, 20, 00, 33, 00, 39, 00, 3C, " +
- "00, 10, 00, 00, 00, 05, 00, 00, 00, 00, 00, 10, 00, 00, 00, 00, 00, 00, 00, " +
- //CONTINUE
- "3C, 00, 7E, 00, 0F, 00, 04, F0, 7E, 00, 00, 00, 92, 0C, 0A, F0, 08, 00, 00, 00, " +
- "3D, 04, 00, 00, 00, 0A, 00, 00, A3, 00, 0B, F0, 3C, 00, 00, 00, 7F, 00, 00, 01, " +
- "00, 01, 80, 00, 8C, 01, 03, 01, 85, 00, 01, 00, 00, 00, 8B, 00, 02, 00, 00, 00, " +
- "BF, 00, 08, 00, 1A, 00, 7F, 01, 29, 00, 29, 00, 81, 01, 41, 00, 00, 08, BF, 01, " +
- "00, 00, 10, 00, C0, 01, 40, 00, 00, 08, FF, 01, 00, 00, 08, 00, 00, 00, 10, F0, " +
- "12, 00, 00, 00, 02, 00, 02, 00, A0, 03, 18, 00, B5, 00, 04, 00, 30, 02, 1A, 00, " +
- "00, 00, 00, 00, 11, F0, 00, 00, 00, 00, " +
+ "EC 00 08 00 00 00 0D F0 00 00 00 00 " +
+ //TXO (and 2 trailing CONTINUE records)
+ "B6 01 12 00 22 02 00 00 00 00 00 00 00 00 10 00 10 00 00 00 00 00 " +
+ "3C 00 11 00 00 4F 70 74 69 6F 6E 20 42 75 74 74 6F 6E 20 33 39 " +
+ "3C 00 10 00 00 00 05 00 00 00 00 00 10 00 00 00 00 00 00 00 " +
+ // another CONTINUE
+ "3C 00 7E 00 0F 00 04 F0 7E 00 00 00 92 0C 0A F0 08 00 00 00 " +
+ "3D 04 00 00 00 0A 00 00 A3 00 0B F0 3C 00 00 00 7F 00 00 01 " +
+ "00 01 80 00 8C 01 03 01 85 00 01 00 00 00 8B 00 02 00 00 00 " +
+ "BF 00 08 00 1A 00 7F 01 29 00 29 00 81 01 41 00 00 08 BF 01 " +
+ "00 00 10 00 C0 01 40 00 00 08 FF 01 00 00 08 00 00 00 10 F0 " +
+ "12 00 00 00 02 00 02 00 A0 03 18 00 B5 00 04 00 30 02 1A 00 " +
+ "00 00 00 00 11 F0 00 00 00 00 " +
//OBJ
- "5D, 00, 48, 00, 15, 00, 12, 00, 0C, 00, 3D, 00, 11, 00, 8C, 01, 03, 01, C8, 59, CF, 00, 00, " +
- "00, 00, 00, 0A, 00, 0C, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 03, 00, 0B, 00, 06, 00, " +
- "7C, 16, 03, 01, 00, 00, 12, 00, 08, 00, 00, 00, 00, 00, 00, 00, 03, 00, 11, 00, 04, 00, 01, " +
- "00, 00, 00, 00, 00, 00, 00";
+ "5D 00 48 00 15 00 12 00 0C 00 3D 00 11 00 8C 01 03 01 C8 59 CF 00 00 " +
+ "00 00 00 0A 00 0C 00 00 00 00 00 00 00 00 00 00 00 03 00 0B 00 06 00 " +
+ "7C 16 03 01 00 00 12 00 08 00 00 00 00 00 00 00 03 00 11 00 04 00 01 " +
+ "00 00 00 00 00 00 00";
byte[] data = HexRead.readFromString(dump);
List records = RecordFactory.createRecords(new ByteArrayInputStream(data));
-
/* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
-
package org.apache.poi.hssf.record;
import junit.framework.TestCase;
+import org.apache.poi.hssf.record.cont.ContinuableRecordOutput;
import org.apache.poi.util.IntMapper;
/**
* Tests that records size calculates correctly.
- *
+ *
* @author Glen Stampoultzis (glens at apache.org)
*/
-public class TestSSTRecordSizeCalculator
- extends TestCase
-{
- private static final String SMALL_STRING = "Small string";
- private static final int COMPRESSED_PLAIN_STRING_OVERHEAD = 3;
-// private List recordLengths;
- private IntMapper strings;
- private static final int OPTION_FIELD_SIZE = 1;
-
- public TestSSTRecordSizeCalculator( String s )
- {
- super( s );
- }
-
- public void testBasic()
- throws Exception
- {
- strings.add(makeUnicodeString(SMALL_STRING));
- SSTRecordSizeCalculator calculator = new SSTRecordSizeCalculator(strings);
- assertEquals(SSTRecord.SST_RECORD_OVERHEAD + COMPRESSED_PLAIN_STRING_OVERHEAD + SMALL_STRING.length(),
- calculator.getRecordSize());
- }
-
- public void testBigStringAcrossUnicode()
- throws Exception
- {
- String bigString = new String(new char[SSTRecord.MAX_DATA_SPACE + 100]);
- strings.add(makeUnicodeString(bigString));
- SSTRecordSizeCalculator calculator = new SSTRecordSizeCalculator(strings);
- assertEquals(SSTRecord.SST_RECORD_OVERHEAD
- + COMPRESSED_PLAIN_STRING_OVERHEAD
- + SSTRecord.MAX_DATA_SPACE
- + SSTRecord.STD_RECORD_OVERHEAD
- + OPTION_FIELD_SIZE
- + 100,
- calculator.getRecordSize());
- }
-
- public void testPerfectFit()
- throws Exception
- {
- String perfectFit = new String(new char[SSTRecord.MAX_DATA_SPACE - COMPRESSED_PLAIN_STRING_OVERHEAD]);
- strings.add(makeUnicodeString(perfectFit));
- SSTRecordSizeCalculator calculator = new SSTRecordSizeCalculator(strings);
- assertEquals(SSTRecord.SST_RECORD_OVERHEAD
- + COMPRESSED_PLAIN_STRING_OVERHEAD
- + perfectFit.length(),
- calculator.getRecordSize());
- }
-
- public void testJustOversized()
- throws Exception
- {
- String tooBig = new String(new char[SSTRecord.MAX_DATA_SPACE - COMPRESSED_PLAIN_STRING_OVERHEAD + 1]);
- strings.add(makeUnicodeString(tooBig));
- SSTRecordSizeCalculator calculator = new SSTRecordSizeCalculator(strings);
- assertEquals(SSTRecord.SST_RECORD_OVERHEAD
- + COMPRESSED_PLAIN_STRING_OVERHEAD
- + tooBig.length() - 1
- // continue record
- + SSTRecord.STD_RECORD_OVERHEAD
- + OPTION_FIELD_SIZE
- + 1,
- calculator.getRecordSize());
-
- }
-
- public void testSecondStringStartsOnNewContinuation()
- throws Exception
- {
- String perfectFit = new String(new char[SSTRecord.MAX_DATA_SPACE - COMPRESSED_PLAIN_STRING_OVERHEAD]);
- strings.add(makeUnicodeString(perfectFit));
- strings.add(makeUnicodeString(SMALL_STRING));
- SSTRecordSizeCalculator calculator = new SSTRecordSizeCalculator(strings);
- assertEquals(SSTRecord.SST_RECORD_OVERHEAD
- + SSTRecord.MAX_DATA_SPACE
- // second string
- + SSTRecord.STD_RECORD_OVERHEAD
- + COMPRESSED_PLAIN_STRING_OVERHEAD
- + SMALL_STRING.length(),
- calculator.getRecordSize());
- }
-
- public void testHeaderCrossesNormalContinuePoint()
- throws Exception
- {
- String almostPerfectFit = new String(new char[SSTRecord.MAX_DATA_SPACE - COMPRESSED_PLAIN_STRING_OVERHEAD - 2]);
- strings.add(makeUnicodeString(almostPerfectFit));
- String oneCharString = new String(new char[1]);
- strings.add(makeUnicodeString(oneCharString));
- SSTRecordSizeCalculator calculator = new SSTRecordSizeCalculator(strings);
- assertEquals(SSTRecord.SST_RECORD_OVERHEAD
- + COMPRESSED_PLAIN_STRING_OVERHEAD
- + almostPerfectFit.length()
- // second string
- + SSTRecord.STD_RECORD_OVERHEAD
- + COMPRESSED_PLAIN_STRING_OVERHEAD
- + oneCharString.length(),
- calculator.getRecordSize());
-
- }
-
-
- public void setUp()
- {
- strings = new IntMapper();
- }
-
-
- private UnicodeString makeUnicodeString( String s )
- {
- UnicodeString st = new UnicodeString(s);
- st.setOptionFlags((byte)0);
- return st;
- }
-
+public final class TestSSTRecordSizeCalculator extends TestCase {
+ private static final String SMALL_STRING = "Small string";
+ private static final int COMPRESSED_PLAIN_STRING_OVERHEAD = 3;
+ private static final int OPTION_FIELD_SIZE = 1;
+
+ private final IntMapper strings = new IntMapper();
+
+
+ private void confirmSize(int expectedSize) {
+ ContinuableRecordOutput cro = ContinuableRecordOutput.createForCountingOnly();
+ SSTSerializer ss = new SSTSerializer(strings, 0, 0);
+ ss.serialize(cro);
+ assertEquals(expectedSize, cro.getTotalSize());
+ }
+
+ public void testBasic() {
+ strings.add(makeUnicodeString(SMALL_STRING));
+ confirmSize(SSTRecord.SST_RECORD_OVERHEAD
+ + COMPRESSED_PLAIN_STRING_OVERHEAD
+ + SMALL_STRING.length());
+ }
+
+ public void testBigStringAcrossUnicode() {
+ int bigString = SSTRecord.MAX_DATA_SPACE + 100;
+ strings.add(makeUnicodeString(bigString));
+ confirmSize(SSTRecord.SST_RECORD_OVERHEAD
+ + COMPRESSED_PLAIN_STRING_OVERHEAD
+ + SSTRecord.MAX_DATA_SPACE
+ + SSTRecord.STD_RECORD_OVERHEAD
+ + OPTION_FIELD_SIZE
+ + 100);
+ }
+
+ public void testPerfectFit() {
+ int perfectFit = SSTRecord.MAX_DATA_SPACE - COMPRESSED_PLAIN_STRING_OVERHEAD;
+ strings.add(makeUnicodeString(perfectFit));
+ confirmSize(SSTRecord.SST_RECORD_OVERHEAD
+ + COMPRESSED_PLAIN_STRING_OVERHEAD
+ + perfectFit);
+ }
+
+ public void testJustOversized() {
+ int tooBig = SSTRecord.MAX_DATA_SPACE - COMPRESSED_PLAIN_STRING_OVERHEAD + 1;
+ strings.add(makeUnicodeString(tooBig));
+ confirmSize(SSTRecord.SST_RECORD_OVERHEAD
+ + COMPRESSED_PLAIN_STRING_OVERHEAD
+ + tooBig - 1
+ // continue record
+ + SSTRecord.STD_RECORD_OVERHEAD
+ + OPTION_FIELD_SIZE + 1);
+
+ }
+
+ public void testSecondStringStartsOnNewContinuation() {
+ int perfectFit = SSTRecord.MAX_DATA_SPACE - COMPRESSED_PLAIN_STRING_OVERHEAD;
+ strings.add(makeUnicodeString(perfectFit));
+ strings.add(makeUnicodeString(SMALL_STRING));
+ confirmSize(SSTRecord.SST_RECORD_OVERHEAD
+ + SSTRecord.MAX_DATA_SPACE
+ // second string
+ + SSTRecord.STD_RECORD_OVERHEAD
+ + COMPRESSED_PLAIN_STRING_OVERHEAD
+ + SMALL_STRING.length());
+ }
+
+ public void testHeaderCrossesNormalContinuePoint() {
+ int almostPerfectFit = SSTRecord.MAX_DATA_SPACE - COMPRESSED_PLAIN_STRING_OVERHEAD - 2;
+ strings.add(makeUnicodeString(almostPerfectFit));
+ String oneCharString = new String(new char[1]);
+ strings.add(makeUnicodeString(oneCharString));
+ confirmSize(SSTRecord.SST_RECORD_OVERHEAD
+ + COMPRESSED_PLAIN_STRING_OVERHEAD
+ + almostPerfectFit
+ // second string
+ + SSTRecord.STD_RECORD_OVERHEAD
+ + COMPRESSED_PLAIN_STRING_OVERHEAD
+ + oneCharString.length());
+
+ }
+ private static UnicodeString makeUnicodeString(int size) {
+ String s = new String(new char[size]);
+ return makeUnicodeString(s);
+ }
+
+ private static UnicodeString makeUnicodeString(String s) {
+ UnicodeString st = new UnicodeString(s);
+ st.setOptionFlags((byte) 0);
+ return st;
+ }
}
package org.apache.poi.hssf.record;
+import org.apache.poi.util.HexRead;
+import org.apache.poi.util.LittleEndian;
+import org.apache.poi.util.LittleEndianByteArrayInputStream;
+import org.apache.poi.util.LittleEndianInput;
+
+import junit.framework.AssertionFailedError;
import junit.framework.TestCase;
/**
* @author Glen Stampoultzis (glens at apache.org)
*/
public final class TestStringRecord extends TestCase {
- byte[] data = new byte[] {
- (byte)0x0B,(byte)0x00, // length
- (byte)0x00, // option
- // string
- (byte)0x46,(byte)0x61,(byte)0x68,(byte)0x72,(byte)0x7A,(byte)0x65,(byte)0x75,(byte)0x67,(byte)0x74,(byte)0x79,(byte)0x70
- };
-
- public void testLoad() {
-
- StringRecord record = new StringRecord(TestcaseRecordInputStream.create(0x207, data));
- assertEquals( "Fahrzeugtyp", record.getString());
-
- assertEquals( 18, record.getRecordSize() );
- }
-
- public void testStore()
- {
- StringRecord record = new StringRecord();
- record.setString("Fahrzeugtyp");
-
- byte [] recordBytes = record.serialize();
- assertEquals(recordBytes.length - 4, data.length);
- for (int i = 0; i < data.length; i++)
- assertEquals("At offset " + i, data[i], recordBytes[i+4]);
- }
+ private static final byte[] data = HexRead.readFromString(
+ "0B 00 " + // length
+ "00 " + // option
+ // string
+ "46 61 68 72 7A 65 75 67 74 79 70"
+ );
+
+ public void testLoad() {
+
+ StringRecord record = new StringRecord(TestcaseRecordInputStream.create(0x207, data));
+ assertEquals( "Fahrzeugtyp", record.getString());
+
+ assertEquals( 18, record.getRecordSize() );
+ }
+
+ public void testStore() {
+ StringRecord record = new StringRecord();
+ record.setString("Fahrzeugtyp");
+
+ byte [] recordBytes = record.serialize();
+ assertEquals(recordBytes.length - 4, data.length);
+ for (int i = 0; i < data.length; i++)
+ assertEquals("At offset " + i, data[i], recordBytes[i+4]);
+ }
+
+ public void testContinue() {
+ int MAX_BIFF_DATA = RecordInputStream.MAX_RECORD_DATA_SIZE;
+ int TEXT_LEN = MAX_BIFF_DATA + 1000; // deliberately over-size
+ String textChunk = "ABCDEGGHIJKLMNOP"; // 16 chars
+ StringBuffer sb = new StringBuffer(16384);
+ while (sb.length() < TEXT_LEN) {
+ sb.append(textChunk);
+ }
+ sb.setLength(TEXT_LEN);
+
+ StringRecord sr = new StringRecord();
+ sr.setString(sb.toString());
+ byte[] ser = sr.serialize();
+ assertEquals(StringRecord.sid, LittleEndian.getUShort(ser, 0));
+ if (LittleEndian.getUShort(ser, 2) > MAX_BIFF_DATA) {
+ throw new AssertionFailedError(
+ "StringRecord should have been split with a continue record");
+ }
+ // Confirm expected size of first record, and ushort strLen.
+ assertEquals(MAX_BIFF_DATA, LittleEndian.getUShort(ser, 2));
+ assertEquals(TEXT_LEN, LittleEndian.getUShort(ser, 4));
+
+ // Confirm first few bytes of ContinueRecord
+ LittleEndianInput crIn = new LittleEndianByteArrayInputStream(ser, (MAX_BIFF_DATA + 4));
+ int nCharsInFirstRec = MAX_BIFF_DATA - (2 + 1); // strLen, optionFlags
+ int nCharsInSecondRec = TEXT_LEN - nCharsInFirstRec;
+ assertEquals(ContinueRecord.sid, crIn.readUShort());
+ assertEquals(1 + nCharsInSecondRec, crIn.readUShort());
+ assertEquals(0, crIn.readUByte());
+ assertEquals('N', crIn.readUByte());
+ assertEquals('O', crIn.readUByte());
+
+ // re-read and make sure string value is the same
+ RecordInputStream in = TestcaseRecordInputStream.create(ser);
+ StringRecord sr2 = new StringRecord(in);
+ assertEquals(sb.toString(), sr2.getString());
+ }
}
assertEquals( 34, record.getRecordSize() ); //sid+size+data
assertEquals("testURL", record.getURL());
- UnicodeString[] sheetNames = record.getSheetNames();
+ String[] sheetNames = record.getSheetNames();
assertEquals(2, sheetNames.length);
- assertEquals("Sheet1", sheetNames[0].getString());
- assertEquals("Sheet2", sheetNames[1].getString());
+ assertEquals("Sheet1", sheetNames[0]);
+ assertEquals("Sheet2", sheetNames[1]);
}
/**
}
public void testStoreER() {
- UnicodeString url = new UnicodeString("testURL");
- UnicodeString[] sheetNames = {
- new UnicodeString("Sheet1"),
- new UnicodeString("Sheet2"),
- };
+ String url = "testURL";
+ String[] sheetNames = { "Sheet1", "Sheet2", };
SupBookRecord record = SupBookRecord.createExternalReferences(url, sheetNames);
TestcaseRecordInputStream.confirmRecordEncoding(0x01AE, dataER, record.serialize());
"00 00" +
"00 00 " +
"3C 00 " + // ContinueRecord.sid
- "05 00 " + // size 5
- "01 " + // unicode uncompressed
- "41 00 42 00 " + // 'AB'
+ "03 00 " + // size 3
+ "00 " + // unicode compressed
+ "41 42 " + // 'AB'
"3C 00 " + // ContinueRecord.sid
"10 00 " + // size 16
"00 00 18 00 00 00 00 00 " +
assertEquals(true, record.isTextLocked());
assertEquals(TextObjectRecord.TEXT_ORIENTATION_ROT_RIGHT, record.getTextOrientation());
- assertEquals(51, record.getRecordSize() );
+ assertEquals(49, record.getRecordSize() );
}
public void testStore()
public final class TestTextObjectRecord extends TestCase {\r
\r
private static final byte[] simpleData = HexRead.readFromString(\r
- "B6 01 12 00 " +\r
- "12 02 00 00 00 00 00 00" +\r
- "00 00 0D 00 08 00 00 00" +\r
- "00 00 " +\r
- "3C 00 1B 00 " +\r
- "01 48 00 65 00 6C 00 6C 00 6F 00 " +\r
- "2C 00 20 00 57 00 6F 00 72 00 6C " +\r
- "00 64 00 21 00 " + \r
- "3C 00 08 " +\r
- "00 0D 00 00 00 00 00 00 00"\r
+ "B6 01 12 00 " +\r
+ "12 02 00 00 00 00 00 00" +\r
+ "00 00 0D 00 08 00 00 00" +\r
+ "00 00 " +\r
+ "3C 00 0E 00 " +\r
+ "00 48 65 6C 6C 6F 2C 20 57 6F 72 6C 64 21 " +\r
+ "3C 00 08 " +\r
+ "00 0D 00 00 00 00 00 00 00"\r
);\r
\r
\r
record.setStr(str);\r
\r
byte [] ser = record.serialize();\r
- \r
+\r
int formatDataLen = LittleEndian.getUShort(ser, 16);\r
assertEquals("formatDataLength", 0, formatDataLen);\r
\r
assertEquals(22, ser.length); // just the TXO record\r
- \r
+\r
//read again\r
RecordInputStream is = TestcaseRecordInputStream.create(ser);\r
record = new TextObjectRecord(is);\r
byte[] cln = cloned.serialize();\r
assertTrue(Arrays.equals(src, cln));\r
}\r
- \r
- /** similar to {@link #simpleData} but with link formula at end of TXO rec*/ \r
+\r
+ /** similar to {@link #simpleData} but with link formula at end of TXO rec*/\r
private static final byte[] linkData = HexRead.readFromString(\r
- "B6 01 " + // TextObjectRecord.sid\r
- "1E 00 " + // size 18\r
- "44 02 02 00 00 00 00 00" +\r
- "00 00 " +\r
- "02 00 " + // strLen 2\r
- "10 00 " + // 16 bytes for 2 format runs\r
- "00 00 00 00 " +\r
+ "B6 01 " + // TextObjectRecord.sid\r
+ "1E 00 " + // size 18\r
+ "44 02 02 00 00 00 00 00" +\r
+ "00 00 " +\r
+ "02 00 " + // strLen 2\r
+ "10 00 " + // 16 bytes for 2 format runs\r
+ "00 00 00 00 " +\r
\r
"05 00 " + // formula size\r
"D4 F0 8A 03 " + // unknownInt\r
"24 01 00 13 C0 " + //tRef(T2)\r
"13 " + // ??\r
\r
- "3C 00 " + // ContinueRecord.sid\r
- "05 00 " + // size 5\r
- "01 " + // unicode uncompressed\r
- "41 00 42 00 " + // 'AB'\r
- "3C 00 " + // ContinueRecord.sid\r
- "10 00 " + // size 16 \r
- "00 00 18 00 00 00 00 00 " +\r
- "02 00 00 00 00 00 00 00 " \r
+ "3C 00 " + // ContinueRecord.sid\r
+ "03 00 " + // size 3\r
+ "00 " + // unicode compressed\r
+ "41 42 " + // 'AB'\r
+ "3C 00 " + // ContinueRecord.sid\r
+ "10 00 " + // size 16\r
+ "00 00 18 00 00 00 00 00 " +\r
+ "02 00 00 00 00 00 00 00 "\r
);\r
- \r
- \r
+\r
+\r
public void testLinkFormula() {\r
RecordInputStream is = new RecordInputStream(new ByteArrayInputStream(linkData));\r
is.nextRecord();\r
TextObjectRecord rec = new TextObjectRecord(is);\r
- \r
+\r
Ptg ptg = rec.getLinkRefPtg();\r
assertNotNull(ptg);\r
assertEquals(RefPtg.class, ptg.getClass());\r
byte [] data2 = rec.serialize();\r
assertEquals(linkData.length, data2.length);\r
assertTrue(Arrays.equals(linkData, data2));\r
- }\r
- \r
+ }\r
+\r
}\r
limitations under the License.
==================================================================== */
-
package org.apache.poi.hssf.record;
-import org.apache.poi.util.HexRead;
-
import junit.framework.TestCase;
+import org.apache.poi.hssf.record.cont.ContinuableRecordOutput;
+
/**
- * Tests that records size calculates correctly.
+ * Tests that {@link UnicodeString} record size calculates correctly. The record size
+ * is used when serializing {@link SSTRecord}s.
*
* @author Jason Height (jheight at apache.org)
*/
public final class TestUnicodeString extends TestCase {
+ private static final int MAX_DATA_SIZE = RecordInputStream.MAX_RECORD_DATA_SIZE;
+ /** a 4 character string requiring 16 bit encoding */
+ private static final String STR_16_BIT = "A\u591A\u8A00\u8A9E";
+
+ private static void confirmSize(int expectedSize, UnicodeString s) {
+ confirmSize(expectedSize, s, 0);
+ }
+ /**
+ * Note - a value of zero for <tt>amountUsedInCurrentRecord</tt> would only ever occur just
+ * after a {@link ContinueRecord} had been started. In the initial {@link SSTRecord} this
+ * value starts at 8 (for the first {@link UnicodeString} written). In general, it can be
+ * any value between 0 and {@link #MAX_DATA_SIZE}
+ */
+ private static void confirmSize(int expectedSize, UnicodeString s, int amountUsedInCurrentRecord) {
+ ContinuableRecordOutput out = ContinuableRecordOutput.createForCountingOnly();
+ out.writeContinue();
+ for(int i=amountUsedInCurrentRecord; i>0; i--) {
+ out.writeByte(0);
+ }
+ int size0 = out.getTotalSize();
+ s.serialize(out);
+ int size1 = out.getTotalSize();
+ int actualSize = size1-size0;
+ assertEquals(expectedSize, actualSize);
+ }
public void testSmallStringSize() {
//Test a basic string
UnicodeString s = makeUnicodeString("Test");
- UnicodeString.UnicodeRecordStats stats = new UnicodeString.UnicodeRecordStats();
- s.getRecordSize(stats);
- assertEquals(7, stats.recordSize);
+ confirmSize(7, s);
//Test a small string that is uncompressed
+ s = makeUnicodeString(STR_16_BIT);
s.setOptionFlags((byte)0x01);
- stats = new UnicodeString.UnicodeRecordStats();
- s.getRecordSize(stats);
- assertEquals(11, stats.recordSize);
+ confirmSize(11, s);
//Test a compressed small string that has rich text formatting
+ s.setString("Test");
s.setOptionFlags((byte)0x8);
UnicodeString.FormatRun r = new UnicodeString.FormatRun((short)0,(short)1);
s.addFormatRun(r);
UnicodeString.FormatRun r2 = new UnicodeString.FormatRun((short)2,(short)2);
s.addFormatRun(r2);
- stats = new UnicodeString.UnicodeRecordStats();
- s.getRecordSize(stats);
- assertEquals(17, stats.recordSize);
+ confirmSize(17, s);
//Test a uncompressed small string that has rich text formatting
+ s.setString(STR_16_BIT);
s.setOptionFlags((byte)0x9);
- stats = new UnicodeString.UnicodeRecordStats();
- s.getRecordSize(stats);
- assertEquals(21, stats.recordSize);
+ confirmSize(21, s);
//Test a compressed small string that has rich text and extended text
+ s.setString("Test");
s.setOptionFlags((byte)0xC);
s.setExtendedRst(new byte[]{(byte)0x1,(byte)0x2,(byte)0x3,(byte)0x4,(byte)0x5});
- stats = new UnicodeString.UnicodeRecordStats();
- s.getRecordSize(stats);
- assertEquals(26, stats.recordSize);
+ confirmSize(26, s);
//Test a uncompressed small string that has rich text and extended text
+ s.setString(STR_16_BIT);
s.setOptionFlags((byte)0xD);
- stats = new UnicodeString.UnicodeRecordStats();
- s.getRecordSize(stats);
- assertEquals(30, stats.recordSize);
+ confirmSize(30, s);
}
public void testPerfectStringSize() {
//Test a basic string
- UnicodeString s = makeUnicodeString(SSTRecord.MAX_RECORD_SIZE-2-1);
- UnicodeString.UnicodeRecordStats stats = new UnicodeString.UnicodeRecordStats();
- s.getRecordSize(stats);
- assertEquals(SSTRecord.MAX_RECORD_SIZE, stats.recordSize);
+ UnicodeString s = makeUnicodeString(MAX_DATA_SIZE-2-1);
+ confirmSize(MAX_DATA_SIZE, s);
//Test an uncompressed string
//Note that we can only ever get to a maximim size of 8227 since an uncompressed
//string is writing double bytes.
- s = makeUnicodeString((SSTRecord.MAX_RECORD_SIZE-2-1)/2);
+ s = makeUnicodeString((MAX_DATA_SIZE-2-1)/2, true);
s.setOptionFlags((byte)0x1);
- stats = new UnicodeString.UnicodeRecordStats();
- s.getRecordSize(stats);
- assertEquals(SSTRecord.MAX_RECORD_SIZE-1, stats.recordSize);
+ confirmSize(MAX_DATA_SIZE-1, s);
}
public void testPerfectRichStringSize() {
//Test a rich text string
- UnicodeString s = makeUnicodeString(SSTRecord.MAX_RECORD_SIZE-2-1-8-2);
+ UnicodeString s = makeUnicodeString(MAX_DATA_SIZE-2-1-8-2);
s.addFormatRun(new UnicodeString.FormatRun((short)1,(short)0));
s.addFormatRun(new UnicodeString.FormatRun((short)2,(short)1));
- UnicodeString.UnicodeRecordStats stats = new UnicodeString.UnicodeRecordStats();
s.setOptionFlags((byte)0x8);
- s.getRecordSize(stats);
- assertEquals(SSTRecord.MAX_RECORD_SIZE, stats.recordSize);
+ confirmSize(MAX_DATA_SIZE, s);
//Test an uncompressed rich text string
- //Note that we can only ever get to a maximim size of 8227 since an uncompressed
+ //Note that we can only ever get to a maximum size of 8227 since an uncompressed
//string is writing double bytes.
- s = makeUnicodeString((SSTRecord.MAX_RECORD_SIZE-2-1-8-2)/2);
+ s = makeUnicodeString((MAX_DATA_SIZE-2-1-8-2)/2, true);
s.addFormatRun(new UnicodeString.FormatRun((short)1,(short)0));
s.addFormatRun(new UnicodeString.FormatRun((short)2,(short)1));
s.setOptionFlags((byte)0x9);
- stats = new UnicodeString.UnicodeRecordStats();
- s.getRecordSize(stats);
- assertEquals(SSTRecord.MAX_RECORD_SIZE-1, stats.recordSize);
+ confirmSize(MAX_DATA_SIZE-1, s);
}
public void testContinuedStringSize() {
//Test a basic string
- UnicodeString s = makeUnicodeString(SSTRecord.MAX_RECORD_SIZE-2-1+20);
- UnicodeString.UnicodeRecordStats stats = new UnicodeString.UnicodeRecordStats();
- s.getRecordSize(stats);
- assertEquals(SSTRecord.MAX_RECORD_SIZE+4+1+20, stats.recordSize);
+ UnicodeString s = makeUnicodeString(MAX_DATA_SIZE-2-1+20);
+ confirmSize(MAX_DATA_SIZE+4+1+20, s);
}
/** Tests that a string size calculation that fits neatly in two records, the second being a continue*/
public void testPerfectContinuedStringSize() {
//Test a basic string
- int strSize = SSTRecord.MAX_RECORD_SIZE*2;
+ int strSize = MAX_DATA_SIZE*2;
//String overhead
strSize -= 3;
//Continue Record overhead
//Continue Record additional byte overhead
strSize -= 1;
UnicodeString s = makeUnicodeString(strSize);
- UnicodeString.UnicodeRecordStats stats = new UnicodeString.UnicodeRecordStats();
- s.getRecordSize(stats);
- assertEquals(SSTRecord.MAX_RECORD_SIZE*2, stats.recordSize);
+ confirmSize(MAX_DATA_SIZE*2, s);
}
-
-
- private static UnicodeString makeUnicodeString( String s )
- {
+ private static UnicodeString makeUnicodeString(String s) {
UnicodeString st = new UnicodeString(s);
st.setOptionFlags((byte)0);
return st;
}
- private static UnicodeString makeUnicodeString( int numChars) {
+ private static UnicodeString makeUnicodeString(int numChars) {
+ return makeUnicodeString(numChars, false);
+ }
+ /**
+ * @param is16Bit if <code>true</code> the created string will have characters > 0x00FF
+ * @return a string of the specified number of characters
+ */
+ private static UnicodeString makeUnicodeString(int numChars, boolean is16Bit) {
StringBuffer b = new StringBuffer(numChars);
+ int charBase = is16Bit ? 0x8A00 : 'A';
for (int i=0;i<numChars;i++) {
- b.append(i%10);
+ char ch = (char) ((i%16)+charBase);
+ b.append(ch);
}
return makeUnicodeString(b.toString());
}
import junit.framework.TestCase;
import org.apache.poi.hssf.record.TestcaseRecordInputStream;
-import org.apache.poi.hssf.record.UnicodeString;
import org.apache.poi.hssf.usermodel.HSSFErrorConstants;
import org.apache.poi.util.HexRead;
import org.apache.poi.util.LittleEndianByteArrayOutputStream;
Boolean.TRUE,
null,
new Double(1.1),
- new UnicodeString("Sample text"),
+ "Sample text",
ErrorConstant.valueOf(HSSFErrorConstants.ERROR_DIV_0),
};
private static final byte[] SAMPLE_ENCODING = HexRead.readFromString(
assertEquals(Boolean.TRUE, values[0][0]);
- assertEquals(new UnicodeString("ABCD"), values[0][1]);
+ assertEquals("ABCD", values[0][1]);
assertEquals(new Double(0), values[1][0]);
assertEquals(Boolean.FALSE, values[1][1]);
- assertEquals(new UnicodeString("FG"), values[1][2]);
+ assertEquals("FG", values[1][2]);
byte[] outBuf = new byte[ENCODED_CONSTANT_DATA.length];
ptg.writeTokenValueBytes(new LittleEndianByteArrayOutputStream(outBuf, 0));
import org.apache.poi.hssf.util.HSSFColor;
/**
- * Tests various functionity having to do with HSSFCell. For instance support for
- * paticular datatypes, etc.
+ * Tests various functionality having to do with {@link HSSFCell}. For instance support for
+ * particular datatypes, etc.
* @author Andrew C. Oliver (andy at superlinksoftware dot com)
* @author Dan Sherman (dsherman at isisph.com)
* @author Alex Jacoby (ajacoby at gmail.com)
}
}
- /**
- * Test to ensure we can only assign cell styles that belong
- * to our workbook, and not those from other workbooks.
- */
- public void testCellStyleWorkbookMatch() throws Exception {
- HSSFWorkbook wbA = new HSSFWorkbook();
- HSSFWorkbook wbB = new HSSFWorkbook();
-
- HSSFCellStyle styA = wbA.createCellStyle();
- HSSFCellStyle styB = wbB.createCellStyle();
-
- styA.verifyBelongsToWorkbook(wbA);
- styB.verifyBelongsToWorkbook(wbB);
- try {
- styA.verifyBelongsToWorkbook(wbB);
- fail();
- } catch(IllegalArgumentException e) {}
- try {
- styB.verifyBelongsToWorkbook(wbA);
- fail();
- } catch(IllegalArgumentException e) {}
-
- HSSFCell cellA = wbA.createSheet().createRow(0).createCell(0);
- HSSFCell cellB = wbB.createSheet().createRow(0).createCell(0);
-
- cellA.setCellStyle(styA);
- cellB.setCellStyle(styB);
- try {
- cellA.setCellStyle(styB);
- fail();
- } catch(IllegalArgumentException e) {}
- try {
- cellB.setCellStyle(styA);
- fail();
- } catch(IllegalArgumentException e) {}
- }
+ /**
+ * Test to ensure we can only assign cell styles that belong
+ * to our workbook, and not those from other workbooks.
+ */
+ public void testCellStyleWorkbookMatch() {
+ HSSFWorkbook wbA = new HSSFWorkbook();
+ HSSFWorkbook wbB = new HSSFWorkbook();
+
+ HSSFCellStyle styA = wbA.createCellStyle();
+ HSSFCellStyle styB = wbB.createCellStyle();
+
+ styA.verifyBelongsToWorkbook(wbA);
+ styB.verifyBelongsToWorkbook(wbB);
+ try {
+ styA.verifyBelongsToWorkbook(wbB);
+ fail();
+ } catch (IllegalArgumentException e) {}
+ try {
+ styB.verifyBelongsToWorkbook(wbA);
+ fail();
+ } catch (IllegalArgumentException e) {}
+
+ HSSFCell cellA = wbA.createSheet().createRow(0).createCell(0);
+ HSSFCell cellB = wbB.createSheet().createRow(0).createCell(0);
+
+ cellA.setCellStyle(styA);
+ cellB.setCellStyle(styB);
+ try {
+ cellA.setCellStyle(styB);
+ fail();
+ } catch (IllegalArgumentException e) {}
+ try {
+ cellB.setCellStyle(styA);
+ fail();
+ } catch (IllegalArgumentException e) {}
+ }
+
+ public void testChangeTypeStringToBool() {
+ HSSFCell cell = new HSSFWorkbook().createSheet("Sheet1").createRow(0).createCell(0);
+
+ cell.setCellValue(new HSSFRichTextString("TRUE"));
+ assertEquals(HSSFCell.CELL_TYPE_STRING, cell.getCellType());
+ try {
+ cell.setCellType(HSSFCell.CELL_TYPE_BOOLEAN);
+ } catch (ClassCastException e) {
+ throw new AssertionFailedError(
+ "Identified bug in conversion of cell from text to boolean");
+ }
+
+ assertEquals(HSSFCell.CELL_TYPE_BOOLEAN, cell.getCellType());
+ assertEquals(true, cell.getBooleanCellValue());
+ cell.setCellType(HSSFCell.CELL_TYPE_STRING);
+ assertEquals("TRUE", cell.getRichStringCellValue().getString());
+
+ // 'false' text to bool and back
+ cell.setCellValue(new HSSFRichTextString("FALSE"));
+ cell.setCellType(HSSFCell.CELL_TYPE_BOOLEAN);
+ assertEquals(HSSFCell.CELL_TYPE_BOOLEAN, cell.getCellType());
+ assertEquals(false, cell.getBooleanCellValue());
+ cell.setCellType(HSSFCell.CELL_TYPE_STRING);
+ assertEquals("FALSE", cell.getRichStringCellValue().getString());
+ }
+
+ public void testChangeTypeBoolToString() {
+ HSSFCell cell = new HSSFWorkbook().createSheet("Sheet1").createRow(0).createCell(0);
+ cell.setCellValue(true);
+ try {
+ cell.setCellType(HSSFCell.CELL_TYPE_STRING);
+ } catch (IllegalStateException e) {
+ if (e.getMessage().equals("Cannot get a text value from a boolean cell")) {
+ throw new AssertionFailedError(
+ "Identified bug in conversion of cell from boolean to text");
+ }
+ throw e;
+ }
+ assertEquals("TRUE", cell.getRichStringCellValue().getString());
+ }
}