Otherwise, please follow the instructions found here:
http://xmlgraphics.apache.org/fop/stable/compiling.html
++[TEMPORARY]
++
++Until the above referenced instructions are updated, one of the following
++may be used to build FOP:
++
++Building with Maven (preferred)
++
++% mvn clean install
++
++Building with Ant (deprecated)
++
++% ant -f fop/build.xml clean all
How do I run FOP?
-----------------
For more details, see:
http://xmlgraphics.apache.org/fop/stable/running.html
++[TEMPORARY]
++
++Note that FOP command line scripts and build results from ant will be found
++under the ./fop sub-directory.
++
==============================================================================
RELEASE NOTES
==============================================================================
++Version 2.2 (Forthcoming)
++===========
++
++Major Changes in Version 2.2
++----------------------------
++
++* Transition from Ant to Maven Build Process
++
++This release also contains a number of bug fixes.
++
Version 2.1
===========
--- /dev/null
- OpenFont otf = (isCFF) ? new OTFFile() : new TTFFile(useKerning, useAdvanced);
+ /*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ /* $Id$ */
+
+ package org.apache.fop.fonts.truetype;
+
+ import java.awt.Rectangle;
+ import java.io.IOException;
+ import java.io.InputStream;
+ import java.net.URI;
+ import java.util.Map;
+ import java.util.Set;
+
+ import org.apache.commons.io.IOUtils;
+
+ import org.apache.fop.apps.io.InternalResourceResolver;
+ import org.apache.fop.fonts.CIDFontType;
+ import org.apache.fop.fonts.CMapSegment;
+ import org.apache.fop.fonts.EmbeddingMode;
+ import org.apache.fop.fonts.EncodingMode;
+ import org.apache.fop.fonts.FontLoader;
+ import org.apache.fop.fonts.FontType;
+ import org.apache.fop.fonts.MultiByteFont;
+ import org.apache.fop.fonts.NamedCharacter;
+ import org.apache.fop.fonts.SingleByteFont;
+ import org.apache.fop.fonts.truetype.OpenFont.PostScriptVersion;
+ import org.apache.fop.util.HexEncoder;
+
+ /**
+ * Loads a TrueType font into memory directly from the original font file.
+ */
+ public class OFFontLoader extends FontLoader {
+
+ private MultiByteFont multiFont;
+ private SingleByteFont singleFont;
+ private final String subFontName;
+ private EncodingMode encodingMode;
+ private EmbeddingMode embeddingMode;
+
+ /**
+ * Default constructor
+ * @param fontFileURI the URI representing the font file
+ * @param resourceResolver the resource resolver for font URI resolution
+ */
+ public OFFontLoader(URI fontFileURI, InternalResourceResolver resourceResolver) {
+ this(fontFileURI, null, true, EmbeddingMode.AUTO, EncodingMode.AUTO, true, true, resourceResolver);
+ }
+
+ /**
+ * Additional constructor for TrueType Collections.
+ * @param fontFileURI the URI representing the font file
+ * @param subFontName the sub-fontname of a font in a TrueType Collection (or null for normal
+ * TrueType fonts)
+ * @param embedded indicates whether the font is embedded or referenced
+ * @param embeddingMode the embedding mode of the font
+ * @param encodingMode the requested encoding mode
+ * @param useKerning true to enable loading kerning info if available, false to disable
+ * @param useAdvanced true to enable loading advanced info if available, false to disable
+ * @param resolver the FontResolver for font URI resolution
+ */
+ public OFFontLoader(URI fontFileURI, String subFontName, boolean embedded,
+ EmbeddingMode embeddingMode, EncodingMode encodingMode, boolean useKerning,
+ boolean useAdvanced, InternalResourceResolver resolver) {
+ super(fontFileURI, embedded, useKerning, useAdvanced, resolver);
+ this.subFontName = subFontName;
+ this.encodingMode = encodingMode;
+ this.embeddingMode = embeddingMode;
+ if (this.encodingMode == EncodingMode.AUTO) {
+ this.encodingMode = EncodingMode.CID; //Default to CID mode for TrueType
+ }
+ if (this.embeddingMode == EmbeddingMode.AUTO) {
+ this.embeddingMode = EmbeddingMode.SUBSET;
+ }
+ }
+
+ /** {@inheritDoc} */
+ protected void read() throws IOException {
+ read(this.subFontName);
+ }
+
+ /**
+ * Reads a TrueType font.
+ * @param ttcFontName the TrueType sub-font name of TrueType Collection (may be null for
+ * normal TrueType fonts)
+ * @throws IOException if an I/O error occurs
+ */
+ private void read(String ttcFontName) throws IOException {
+ InputStream in = resourceResolver.getResource(this.fontFileURI);
+ try {
+ FontFileReader reader = new FontFileReader(in);
+ String header = readHeader(reader);
+ boolean isCFF = header.equals("OTTO");
++ OpenFont otf = (isCFF) ? new OTFFile(useKerning, useAdvanced) : new TTFFile(useKerning, useAdvanced);
+ boolean supported = otf.readFont(reader, header, ttcFontName);
+ if (!supported) {
+ throw new IOException("The font does not have a Unicode cmap table: " + fontFileURI);
+ }
+ buildFont(otf, ttcFontName);
+ loaded = true;
+ } finally {
+ IOUtils.closeQuietly(in);
+ }
+ }
+
+ public static String readHeader(FontFileReader fontFile) throws IOException {
+ if (fontFile != null) {
+ fontFile.seekSet(0);
+ return fontFile.readTTFString(4); // TTF_FIXED_SIZE (4 bytes)
+ }
+ return null;
+ }
+
+ private void buildFont(OpenFont otf, String ttcFontName) {
+ boolean isCid = this.embedded;
+ if (this.encodingMode == EncodingMode.SINGLE_BYTE) {
+ isCid = false;
+ }
+
+ if (isCid) {
+ multiFont = new MultiByteFont(resourceResolver, embeddingMode);
+ multiFont.setIsOTFFile(otf instanceof OTFFile);
+ returnFont = multiFont;
+ multiFont.setTTCName(ttcFontName);
+ } else {
+ singleFont = new SingleByteFont(resourceResolver, embeddingMode);
+ returnFont = singleFont;
+ }
+
+ returnFont.setFontURI(fontFileURI);
+ returnFont.setFontName(otf.getPostScriptName());
+ returnFont.setFullName(otf.getFullName());
+ returnFont.setFamilyNames(otf.getFamilyNames());
+ returnFont.setFontSubFamilyName(otf.getSubFamilyName());
+ returnFont.setCapHeight(otf.getCapHeight());
+ returnFont.setXHeight(otf.getXHeight());
+ returnFont.setAscender(otf.getLowerCaseAscent());
+ returnFont.setDescender(otf.getLowerCaseDescent());
+ returnFont.setFontBBox(otf.getFontBBox());
+ returnFont.setUnderlinePosition(otf.getUnderlinePosition() - otf.getUnderlineThickness() / 2);
+ returnFont.setUnderlineThickness(otf.getUnderlineThickness());
+ returnFont.setStrikeoutPosition(otf.getStrikeoutPosition() - otf.getStrikeoutThickness() / 2);
+ returnFont.setStrikeoutThickness(otf.getStrikeoutThickness());
+ returnFont.setFlags(otf.getFlags());
+ returnFont.setStemV(Integer.parseInt(otf.getStemV())); //not used for TTF
+ returnFont.setItalicAngle(Integer.parseInt(otf.getItalicAngle()));
+ returnFont.setMissingWidth(0);
+ returnFont.setWeight(otf.getWeightClass());
+ returnFont.setEmbeddingMode(this.embeddingMode);
+ if (isCid) {
+ if (otf instanceof OTFFile) {
+ multiFont.setCIDType(CIDFontType.CIDTYPE0);
+ } else {
+ multiFont.setCIDType(CIDFontType.CIDTYPE2);
+ }
+ multiFont.setWidthArray(otf.getWidths());
+ multiFont.setBBoxArray(otf.getBoundingBoxes());
+ } else {
+ singleFont.setFontType(FontType.TRUETYPE);
+ singleFont.setEncoding(otf.getCharSetName());
+ returnFont.setFirstChar(otf.getFirstChar());
+ returnFont.setLastChar(otf.getLastChar());
+ singleFont.setTrueTypePostScriptVersion(otf.getPostScriptVersion());
+ copyGlyphMetricsSingleByte(otf);
+ }
+ returnFont.setCMap(getCMap(otf));
+
+ if (otf.getKerning() != null && useKerning) {
+ copyKerning(otf, isCid);
+ }
+ if (useAdvanced) {
+ copyAdvanced(otf);
+ }
+ if (this.embedded) {
+ if (otf.isEmbeddable()) {
+ returnFont.setEmbedURI(this.fontFileURI);
+ } else {
+ String msg = "The font " + this.fontFileURI + " is not embeddable due to a"
+ + " licensing restriction.";
+ throw new RuntimeException(msg);
+ }
+ }
+ }
+
+ private CMapSegment[] getCMap(OpenFont otf) {
+ CMapSegment[] array = new CMapSegment[otf.getCMaps().size()];
+ return otf.getCMaps().toArray(array);
+ }
+
+ private void copyGlyphMetricsSingleByte(OpenFont otf) {
+ int[] wx = otf.getWidths();
+ Rectangle[] bboxes = otf.getBoundingBoxes();
+ for (int i = singleFont.getFirstChar(); i <= singleFont.getLastChar(); i++) {
+ singleFont.setWidth(i, otf.getCharWidth(i));
+ int[] bbox = otf.getBBox(i);
+ singleFont.setBoundingBox(i,
+ new Rectangle(bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]));
+ }
+
+ for (CMapSegment segment : otf.getCMaps()) {
+ if (segment.getUnicodeStart() < 0xFFFE) {
+ for (char u = (char)segment.getUnicodeStart(); u <= segment.getUnicodeEnd(); u++) {
+ int codePoint = singleFont.getEncoding().mapChar(u);
+ if (codePoint <= 0) {
+ int glyphIndex = segment.getGlyphStartIndex() + u - segment.getUnicodeStart();
+ String glyphName = otf.getGlyphName(glyphIndex);
+ if (glyphName.length() == 0 && otf.getPostScriptVersion() != PostScriptVersion.V2) {
+ glyphName = "u" + HexEncoder.encode(u);
+ }
+ if (glyphName.length() > 0) {
+ String unicode = Character.toString(u);
+ NamedCharacter nc = new NamedCharacter(glyphName, unicode);
+ singleFont.addUnencodedCharacter(nc, wx[glyphIndex], bboxes[glyphIndex]);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Copy kerning information.
+ */
+ private void copyKerning(OpenFont otf, boolean isCid) {
+
+ // Get kerning
+ Set<Integer> kerningSet;
+ if (isCid) {
+ kerningSet = otf.getKerning().keySet();
+ } else {
+ kerningSet = otf.getAnsiKerning().keySet();
+ }
+
+ for (Integer kpx1 : kerningSet) {
+ Map<Integer, Integer> h2;
+ if (isCid) {
+ h2 = otf.getKerning().get(kpx1);
+ } else {
+ h2 = otf.getAnsiKerning().get(kpx1);
+ }
+ returnFont.putKerningEntry(kpx1, h2);
+ }
+ }
+
+ /**
+ * Copy advanced typographic information.
+ */
+ private void copyAdvanced(OpenFont otf) {
+ if (returnFont instanceof MultiByteFont) {
+ MultiByteFont mbf = (MultiByteFont) returnFont;
+ mbf.setGDEF(otf.getGDEF());
+ mbf.setGSUB(otf.getGSUB());
+ mbf.setGPOS(otf.getGPOS());
+ }
+ }
+
+ }
--- /dev/null
+ /*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ /* $Id$ */
+
+ package org.apache.fop.fonts.truetype;
+
+ import java.io.IOException;
+ import java.util.ArrayList;
+ import java.util.Collection;
+ import java.util.HashMap;
+ import java.util.Iterator;
+ import java.util.List;
+ import java.util.Map;
+
+ import org.apache.fontbox.cff.CFFDataInput;
+ import org.apache.fontbox.cff.CFFFont;
+ import org.apache.fontbox.cff.CFFParser;
+ import org.apache.fontbox.cff.charset.CFFCharset;
+
+ public class OTFFile extends OpenFont {
+
+ protected CFFFont fileFont;
+
+ public OTFFile() throws IOException {
++ this(true, false);
++ }
++
++ public OTFFile(boolean useKerning, boolean useAdvanced) throws IOException {
++ super(useKerning, useAdvanced);
+ checkForFontbox();
+ }
+
+ private void checkForFontbox() throws IOException {
+ try {
+ Class.forName("org.apache.fontbox.cff.CFFFont");
+ } catch (ClassNotFoundException ex) {
+ throw new IOException("The Fontbox jar was not found in the classpath. This is "
+ + "required for OTF CFF ssupport.");
+ }
+ }
+
+ @Override
+ protected void updateBBoxAndOffset() throws IOException {
+ List<Mapping> gidMappings = getGIDMappings(fileFont);
+ Map<Integer, String> sidNames = constructNameMap(gidMappings);
+ UnicodeMapping[] mappings = unicodeMappings.toArray(new UnicodeMapping[unicodeMappings.size()]);
+ for (int i = 0; i < mappings.length; i++) {
+ int glyphIdx = mappings[i].getGlyphIndex();
+ Mapping m = gidMappings.get(glyphIdx);
+ String name = sidNames.get(m.getSID());
+ mtxTab[glyphIdx].setName(name);
+ }
+ }
+
+ private List<Mapping> getGIDMappings(CFFFont font) {
+ List<Mapping> gidMappings = new ArrayList<Mapping>();
+ Mapping notdef = new Mapping();
+ gidMappings.add(notdef);
+ for (CFFCharset.Entry entry : font.getCharset().getEntries()) {
+ String name = entry.getName();
+ byte[] bytes = font.getCharStringsDict().get(name);
+ if (bytes == null) {
+ continue;
+ }
+ Mapping mapping = new Mapping();
+ mapping.setSID(entry.getSID());
+ mapping.setName(name);
+ mapping.setBytes(bytes);
+ gidMappings.add(mapping);
+ }
+ return gidMappings;
+ }
+
+ private Map<Integer, String> constructNameMap(Collection<Mapping> mappings) {
+ Map<Integer, String> sidNames = new HashMap<Integer, String>();
+ Iterator<Mapping> it = mappings.iterator();
+ while (it.hasNext()) {
+ Mapping mapping = it.next();
+ sidNames.put(mapping.getSID(), mapping.getName());
+ }
+ return sidNames;
+ }
+
+ private static class Mapping {
+ private int sid;
+ private String name;
+ private byte[] bytes;
+
+ public void setSID(int sid) {
+ this.sid = sid;
+ }
+
+ public int getSID() {
+ return sid;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public void setBytes(byte[] bytes) {
+ this.bytes = bytes;
+ }
+
+ public byte[] getBytes() {
+ return bytes;
+ }
+ }
+
+
+ @Override
+ protected void initializeFont(FontFileReader in) throws IOException {
+ fontFile = in;
+ fontFile.seekSet(0);
+ CFFParser parser = new CFFParser();
+ fileFont = parser.parse(in.getAllBytes()).get(0);
+ }
+
+ protected void readName() throws IOException {
+ Object familyName = fileFont.getProperty("FamilyName");
+ if (familyName != null && !familyName.equals("")) {
+ familyNames.add(familyName.toString());
+ fullName = familyName.toString();
+ } else {
+ fullName = fileFont.getName();
+ familyNames.add(fullName);
+ }
+ }
+
+ /**
+ * Reads the CFFData from a given font file
+ * @param fontFile The font file being read
+ * @return The byte data found in the CFF table
+ */
+ public static byte[] getCFFData(FontFileReader fontFile) throws IOException {
+ byte[] cff = fontFile.getAllBytes();
+ CFFDataInput input = new CFFDataInput(fontFile.getAllBytes());
+ input.readBytes(4); //OTTO
+ short numTables = input.readShort();
+ input.readShort(); //searchRange
+ input.readShort(); //entrySelector
+ input.readShort(); //rangeShift
+
+ for (int q = 0; q < numTables; q++) {
+ String tagName = new String(input.readBytes(4));
+ readLong(input); //Checksum
+ long offset = readLong(input);
+ long length = readLong(input);
+ if (tagName.equals("CFF ")) {
+ cff = new byte[(int)length];
+ System.arraycopy(fontFile.getAllBytes(), (int)offset, cff, 0, cff.length);
+ break;
+ }
+ }
+ return cff;
+ }
+
+ private static long readLong(CFFDataInput input) throws IOException {
+ return (input.readCard16() << 16) | input.readCard16();
+ }
+ }
--- /dev/null
- * @param name Name to be checked for in the font file
+ /*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ /* $Id$ */
+
+ package org.apache.fop.fonts.truetype;
+
+ import java.io.IOException;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collections;
+ import java.util.Comparator;
+ import java.util.HashMap;
+ import java.util.LinkedHashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Map.Entry;
+ import java.util.Set;
+
++import org.apache.commons.logging.Log;
++import org.apache.commons.logging.LogFactory;
++
+ import org.apache.fontbox.cff.CFFStandardString;
+ import org.apache.fontbox.cff.encoding.CFFEncoding;
+
+ import org.apache.fop.fonts.MultiByteFont;
+ import org.apache.fop.fonts.cff.CFFDataReader;
+ import org.apache.fop.fonts.cff.CFFDataReader.CFFIndexData;
+ import org.apache.fop.fonts.cff.CFFDataReader.DICTEntry;
+ import org.apache.fop.fonts.cff.CFFDataReader.FDSelect;
+ import org.apache.fop.fonts.cff.CFFDataReader.FontDict;
+ import org.apache.fop.fonts.cff.CFFDataReader.Format0FDSelect;
+ import org.apache.fop.fonts.cff.CFFDataReader.Format3FDSelect;
+
+ /**
+ * Reads an OpenType CFF file and generates a subset
+ * The OpenType specification can be found at the Microsoft
+ * Typography site: http://www.microsoft.com/typography/otspec/
+ */
+ public class OTFSubSetFile extends OTFFile {
+
+ protected byte[] output;
+ protected int currentPos;
+ private int realSize;
+
+ /** A map containing each glyph to be included in the subset
+ * with their existing and new GID's **/
+ protected LinkedHashMap<Integer, Integer> subsetGlyphs = new LinkedHashMap<Integer, Integer>();
+
+ /** A map of the new GID to SID used to construct the charset table **/
+ protected LinkedHashMap<Integer, Integer> gidToSID;
+
+ protected CFFIndexData localIndexSubr;
+ protected CFFIndexData globalIndexSubr;
+
+ /** List of subroutines to write to the local / global indexes in the subset font **/
+ protected List<byte[]> subsetLocalIndexSubr;
+ protected List<byte[]> subsetGlobalIndexSubr;
+
+ /** For fonts which have an FDSelect or ROS flag in Top Dict, this is used to store the
+ * local subroutine indexes for each group as opposed to the above subsetLocalIndexSubr */
+ private ArrayList<List<byte[]>> fdSubrs;
+
+ /** The subset FD Select table used to store the mappings between glyphs and their
+ * associated FDFont object which point to a private dict and local subroutines. */
+ private LinkedHashMap<Integer, FDIndexReference> subsetFDSelect;
+
+ /** A list of unique subroutines from the global / local subroutine indexes */
+ protected List<Integer> localUniques;
+ protected List<Integer> globalUniques;
+
+ /** A store of the number of subroutines each global / local subroutine will store **/
+ protected int subsetLocalSubrCount;
+ protected int subsetGlobalSubrCount;
+
+ /** A list of char string data for each glyph to be stored in the subset font **/
+ protected List<byte[]> subsetCharStringsIndex;
+
+ /** The embedded name to change in the name table **/
+ protected String embeddedName;
+
+ /** An array used to hold the string index data for the subset font **/
+ protected List<byte[]> stringIndexData = new ArrayList<byte[]>();
+
+ /** The CFF reader object used to read data and offsets from the original font file */
+ protected CFFDataReader cffReader;
+
+ /** The class used to represent this font **/
+ private MultiByteFont mbFont;
+
+ /** The number of standard strings in CFF **/
+ public static final int NUM_STANDARD_STRINGS = 391;
+ /** The operator used to identify a local subroutine reference */
+ private static final int LOCAL_SUBROUTINE = 10;
+ /** The operator used to identify a global subroutine reference */
+ private static final int GLOBAL_SUBROUTINE = 29;
++ /** The parser used to parse type2 charstring */
++ private Type2Parser type2Parser;
+
+ public OTFSubSetFile() throws IOException {
+ super();
+ }
+
+ public void readFont(FontFileReader in, String embeddedName, String header,
+ MultiByteFont mbFont) throws IOException {
+ this.mbFont = mbFont;
+ readFont(in, embeddedName, header, mbFont.getUsedGlyphs());
+ }
+
+ /**
+ * Reads and creates a subset of the font.
+ *
+ * @param in FontFileReader to read from
- * @param glyphs Map of glyphs (glyphs has old index as (Integer) key and
++ * @param embeddedName Name to be checked for in the font file
+ * @param header The header of the font file
- for (int i = 0; i < uniqueNewRefs.size(); i++) {
- FontDict fdFont = fdFonts.get(uniqueNewRefs.get(i));
++ * @param usedGlyphs Map of glyphs (glyphs has old index as (Integer) key and
+ * new index as (Integer) value)
+ * @throws IOException in case of an I/O problem
+ */
+ void readFont(FontFileReader in, String embeddedName, String header,
+ Map<Integer, Integer> usedGlyphs) throws IOException {
+ fontFile = in;
+
+ currentPos = 0;
+ realSize = 0;
+
+ this.embeddedName = embeddedName;
+
+ //Sort by the new GID and store in a LinkedHashMap
+ subsetGlyphs = sortByValue(usedGlyphs);
+
+ output = new byte[in.getFileSize()];
+
+ initializeFont(in);
+
+ cffReader = new CFFDataReader(fontFile);
+
+ //Create the CIDFontType0C data
+ createCFF();
+ }
+
+ private LinkedHashMap<Integer, Integer> sortByValue(Map<Integer, Integer> map) {
+ List<Entry<Integer, Integer>> list = new ArrayList<Entry<Integer, Integer>>(map.entrySet());
+ Collections.sort(list, new Comparator<Entry<Integer, Integer>>() {
+ public int compare(Entry<Integer, Integer> o1, Entry<Integer, Integer> o2) {
+ return ((Comparable<Integer>) o1.getValue()).compareTo(o2.getValue());
+ }
+ });
+
+ LinkedHashMap<Integer, Integer> result = new LinkedHashMap<Integer, Integer>();
+ for (Entry<Integer, Integer> entry : list) {
+ result.put(entry.getKey(), entry.getValue());
+ }
+ return result;
+ }
+
+ protected void createCFF() throws IOException {
+ //Header
+ writeBytes(cffReader.getHeader());
+
+ //Name Index
+ writeIndex(Arrays.asList(embeddedName.getBytes()));
+
+ //Keep offset of the topDICT so it can be updated once all data has been written
+ int topDictOffset = currentPos;
+ //Top DICT Index and Data
+ byte[] topDictIndex = cffReader.getTopDictIndex().getByteData();
+ int offSize = topDictIndex[2];
+ writeBytes(topDictIndex, 0, 3 + (offSize * 2));
+ int topDictDataOffset = currentPos;
+ writeTopDICT();
+
+ //Create the char string index data and related local / global subroutines
+ if (cffReader.getFDSelect() == null) {
+ createCharStringData();
+ } else {
+ createCharStringDataCID();
+ }
+
+ //If it is a CID-Keyed font, store each FD font and add each SID
+ List<Integer> fontNameSIDs = null;
+ List<Integer> subsetFDFonts = null;
+ if (cffReader.getFDSelect() != null) {
+ subsetFDFonts = getUsedFDFonts();
+ fontNameSIDs = storeFDStrings(subsetFDFonts);
+ }
+
+ //String index
+ writeStringIndex();
+
+ //Global subroutine index
+ writeIndex(subsetGlobalIndexSubr);
+
+ //Encoding
+ int encodingOffset = currentPos;
+ writeEncoding(fileFont.getEncoding());
+
+ //Charset table
+ int charsetOffset = currentPos;
+ writeCharsetTable(cffReader.getFDSelect() != null);
+
+ //FDSelect table
+ int fdSelectOffset = currentPos;
+ if (cffReader.getFDSelect() != null) {
+ writeFDSelect();
+ }
+
+ //Char Strings Index
+ int charStringOffset = currentPos;
+ writeIndex(subsetCharStringsIndex);
+
+ if (cffReader.getFDSelect() == null) {
+ //Keep offset to modify later with the local subroutine index offset
+ int privateDictOffset = currentPos;
+ writePrivateDict();
+
+ //Local subroutine index
+ int localIndexOffset = currentPos;
+ writeIndex(subsetLocalIndexSubr);
+
+ //Update the offsets
+ updateOffsets(topDictOffset, charsetOffset, charStringOffset, privateDictOffset,
+ localIndexOffset, encodingOffset);
+ } else {
+ List<Integer> privateDictOffsets = writeCIDDictsAndSubrs(subsetFDFonts);
+ int fdArrayOffset = writeFDArray(subsetFDFonts, privateDictOffsets, fontNameSIDs);
+
+ updateCIDOffsets(topDictDataOffset, fdArrayOffset, fdSelectOffset, charsetOffset,
+ charStringOffset, encodingOffset);
+ }
+ }
+
+ protected List<Integer> storeFDStrings(List<Integer> uniqueNewRefs) throws IOException {
+ ArrayList<Integer> fontNameSIDs = new ArrayList<Integer>();
+ List<FontDict> fdFonts = cffReader.getFDFonts();
- for (int i = 0; i < out.length; i++) {
- writeByte(out[i]);
++ for (Integer uniqueNewRef : uniqueNewRefs) {
++ FontDict fdFont = fdFonts.get(uniqueNewRef);
+ byte[] fdFontByteData = fdFont.getByteData();
+ Map<String, DICTEntry> fdFontDict = cffReader.parseDictData(fdFontByteData);
+ fontNameSIDs.add(stringIndexData.size() + NUM_STANDARD_STRINGS);
+ stringIndexData.add(cffReader.getStringIndex().getValue(fdFontDict.get("FontName")
+ .getOperands().get(0).intValue() - NUM_STANDARD_STRINGS));
+ }
+ return fontNameSIDs;
+ }
+
+ protected void writeBytes(byte[] out) {
- + dictEntry.getOperandLengths().get(1), dictEntry.getOperandLengths().get(2), 139);
++ for (byte anOut : out) {
++ writeByte(anOut);
+ }
+ }
+
+ protected void writeBytes(byte[] out, int offset, int length) {
+ for (int i = offset; i < offset + length; i++) {
+ output[currentPos++] = out[i];
+ realSize++;
+ }
+ }
+
+ private void writeEncoding(CFFEncoding encoding) throws IOException {
+ LinkedHashMap<String, DICTEntry> topDICT = cffReader.getTopDictEntries();
+ DICTEntry encodingEntry = topDICT.get("Encoding");
+ if (encodingEntry != null && encodingEntry.getOperands().get(0).intValue() != 0
+ && encodingEntry.getOperands().get(0).intValue() != 1) {
+ writeByte(0);
+ writeByte(gidToSID.size());
+ for (int gid : gidToSID.keySet()) {
+ int code = encoding.getCode(gidToSID.get(gid));
+ writeByte(code);
+ }
+ }
+ }
+
+ protected void writeTopDICT() throws IOException {
+ LinkedHashMap<String, DICTEntry> topDICT = cffReader.getTopDictEntries();
+ List<String> topDictStringEntries = Arrays.asList("version", "Notice", "Copyright",
+ "FullName", "FamilyName", "Weight", "PostScript");
+ for (Map.Entry<String, DICTEntry> dictEntry : topDICT.entrySet()) {
+ String dictKey = dictEntry.getKey();
+ DICTEntry entry = dictEntry.getValue();
+ //If the value is an SID, update the reference but keep the size the same
+ if (dictKey.equals("ROS")) {
+ writeROSEntry(entry);
+ } else if (dictKey.equals("CIDCount")) {
+ writeCIDCount(entry);
+ } else if (topDictStringEntries.contains(dictKey)) {
+ writeTopDictStringEntry(entry);
+ } else {
+ writeBytes(entry.getByteData());
+ }
+ }
+ }
+
+ private void writeROSEntry(DICTEntry dictEntry) throws IOException {
+ int sidA = dictEntry.getOperands().get(0).intValue();
+ if (sidA > 390) {
+ stringIndexData.add(cffReader.getStringIndex().getValue(sidA - NUM_STANDARD_STRINGS));
+ }
+ int sidAStringIndex = stringIndexData.size() + 390;
+ int sidB = dictEntry.getOperands().get(1).intValue();
+ if (sidB > 390) {
+ stringIndexData.add("Identity".getBytes());
+ }
+ int sidBStringIndex = stringIndexData.size() + 390;
+ byte[] cidEntryByteData = dictEntry.getByteData();
+ cidEntryByteData = updateOffset(cidEntryByteData, 0, dictEntry.getOperandLengths().get(0),
+ sidAStringIndex);
+ cidEntryByteData = updateOffset(cidEntryByteData, dictEntry.getOperandLengths().get(0),
+ dictEntry.getOperandLengths().get(1), sidBStringIndex);
+ cidEntryByteData = updateOffset(cidEntryByteData, dictEntry.getOperandLengths().get(0)
- dictEntry.getOperandLength());
++ + dictEntry.getOperandLengths().get(1), dictEntry.getOperandLengths().get(2), 0);
+ writeBytes(cidEntryByteData);
+ }
+
+ protected void writeCIDCount(DICTEntry dictEntry) throws IOException {
+ byte[] cidCountByteData = dictEntry.getByteData();
+ cidCountByteData = updateOffset(cidCountByteData, 0, dictEntry.getOperandLengths().get(0),
+ subsetGlyphs.size());
+ writeBytes(cidCountByteData);
+ }
+
+ private void writeTopDictStringEntry(DICTEntry dictEntry) throws IOException {
+ int sid = dictEntry.getOperands().get(0).intValue();
+ if (sid > 391) {
+ stringIndexData.add(cffReader.getStringIndex().getValue(sid - 391));
+ }
+
+ byte[] newDictEntry = createNewRef(stringIndexData.size() + 390, dictEntry.getOperator(),
- if (index <= cffReader.getStringIndex().getNumObjects()) {
++ dictEntry.getOperandLength(), true);
+ writeBytes(newDictEntry);
+ }
+
+ private void writeStringIndex() throws IOException {
+ Map<String, DICTEntry> topDICT = cffReader.getTopDictEntries();
+ int charsetOffset = topDICT.get("charset").getOperands().get(0).intValue();
+
+ gidToSID = new LinkedHashMap<Integer, Integer>();
+
+ for (int gid : subsetGlyphs.keySet()) {
+ int sid = cffReader.getSIDFromGID(charsetOffset, gid);
+ //Check whether the SID falls into the standard string set
+ if (sid < NUM_STANDARD_STRINGS) {
+ gidToSID.put(subsetGlyphs.get(gid), sid);
+ if (mbFont != null) {
+ mbFont.mapUsedGlyphName(subsetGlyphs.get(gid),
+ CFFStandardString.getName(sid));
+ }
+ } else {
+ int index = sid - NUM_STANDARD_STRINGS;
- for (int i = 0; i < uniqueGroups.size(); i++) {
++ //index is 0 based, should use < not <=
++ if (index < cffReader.getStringIndex().getNumObjects()) {
+ if (mbFont != null) {
+ mbFont.mapUsedGlyphName(subsetGlyphs.get(gid),
+ new String(cffReader.getStringIndex().getValue(index)));
+ }
+ gidToSID.put(subsetGlyphs.get(gid), stringIndexData.size() + 391);
+ stringIndexData.add(cffReader.getStringIndex().getValue(index));
+ } else {
+ if (mbFont != null) {
+ mbFont.mapUsedGlyphName(subsetGlyphs.get(gid), ".notdef");
+ }
+ gidToSID.put(subsetGlyphs.get(gid), index);
+ }
+ }
+ }
+ //Write the String Index
+ writeIndex(stringIndexData);
+ }
+
+ protected void createCharStringDataCID() throws IOException {
+ CFFIndexData charStringsIndex = cffReader.getCharStringIndex();
+
+ FDSelect fontDictionary = cffReader.getFDSelect();
+ if (fontDictionary instanceof Format0FDSelect) {
+ throw new UnsupportedOperationException("OTF CFF CID Format0 currently not implemented");
+ } else if (fontDictionary instanceof Format3FDSelect) {
+ Format3FDSelect fdSelect = (Format3FDSelect)fontDictionary;
+ Map<Integer, Integer> subsetGroups = new HashMap<Integer, Integer>();
+
+ List<Integer> uniqueGroups = new ArrayList<Integer>();
+ for (int gid : subsetGlyphs.keySet()) {
+ Set<Integer> rangeKeys = fdSelect.getRanges().keySet();
+ Integer[] ranges = rangeKeys.toArray(new Integer[rangeKeys.size()]);
+ for (int i = 0; i < ranges.length; i++) {
+ int nextRange = -1;
+ if (i < ranges.length - 1) {
+ nextRange = ranges[i + 1];
+ } else {
+ nextRange = fdSelect.getSentinelGID();
+ }
+ if (gid >= ranges[i] && gid < nextRange) {
+ subsetGroups.put(gid, fdSelect.getRanges().get(ranges[i]));
+ if (!uniqueGroups.contains(fdSelect.getRanges().get(ranges[i]))) {
+ uniqueGroups.add(fdSelect.getRanges().get(ranges[i]));
+ }
+ }
+ }
+ }
+
+ //Prepare resources
+ globalIndexSubr = cffReader.getGlobalIndexSubr();
+
+ //Create the new char string index
+ subsetCharStringsIndex = new ArrayList<byte[]>();
+
+ globalUniques = new ArrayList<Integer>();
+
+ subsetFDSelect = new LinkedHashMap<Integer, FDIndexReference>();
+
+ List<List<Integer>> foundLocalUniques = new ArrayList<List<Integer>>();
- for (int l = 0; l < foundLocalUniques.size(); l++) {
++ for (Integer uniqueGroup1 : uniqueGroups) {
+ foundLocalUniques.add(new ArrayList<Integer>());
+ }
++ Map<Integer, Integer> gidHintMaskLengths = new HashMap<Integer, Integer>();
+ for (int gid : subsetGlyphs.keySet()) {
+ int group = subsetGroups.get(gid);
+ localIndexSubr = cffReader.getFDFonts().get(group).getLocalSubrData();
+ localUniques = foundLocalUniques.get(uniqueGroups.indexOf(subsetGroups.get(gid)));
++ type2Parser = new Type2Parser();
+
+ FDIndexReference newFDReference = new FDIndexReference(
+ uniqueGroups.indexOf(subsetGroups.get(gid)), subsetGroups.get(gid));
+ subsetFDSelect.put(subsetGlyphs.get(gid), newFDReference);
+ byte[] data = charStringsIndex.getValue(gid);
+ preScanForSubsetIndexSize(data);
++ gidHintMaskLengths.put(gid, type2Parser.getMaskLength());
+ }
+
+ //Create the two lists which are to store the local and global subroutines
+ subsetGlobalIndexSubr = new ArrayList<byte[]>();
+
+ fdSubrs = new ArrayList<List<byte[]>>();
+ subsetGlobalSubrCount = globalUniques.size();
+ globalUniques.clear();
+ localUniques = null;
+
- for (int k = 0; k < uniqueGroups.size(); k++) {
++ for (List<Integer> foundLocalUnique : foundLocalUniques) {
+ fdSubrs.add(new ArrayList<byte[]>());
+ }
+ List<List<Integer>> foundLocalUniquesB = new ArrayList<List<Integer>>();
- int encodingValue = 0;
- if (fdPrivateDict.get("Subrs").getOperandLength() == 1) {
- encodingValue = 139;
- }
++ for (Integer uniqueGroup : uniqueGroups) {
+ foundLocalUniquesB.add(new ArrayList<Integer>());
+ }
+ for (Integer gid : subsetGlyphs.keySet()) {
+ int group = subsetGroups.get(gid);
+ localIndexSubr = cffReader.getFDFonts().get(group).getLocalSubrData();
+ localUniques = foundLocalUniquesB.get(subsetFDSelect.get(subsetGlyphs.get(gid)).getNewFDIndex());
+ byte[] data = charStringsIndex.getValue(gid);
+ subsetLocalIndexSubr = fdSubrs.get(subsetFDSelect.get(subsetGlyphs.get(gid)).getNewFDIndex());
+ subsetLocalSubrCount = foundLocalUniques.get(subsetFDSelect.get(subsetGlyphs.get(gid))
+ .getNewFDIndex()).size();
++ type2Parser = new Type2Parser();
++ type2Parser.setMaskLength(gidHintMaskLengths.get(gid));
+ data = readCharStringData(data, subsetLocalSubrCount);
+ subsetCharStringsIndex.add(data);
+ }
+ }
+ }
+
+ protected void writeFDSelect() {
+ writeByte(0); //Format
+ for (Integer gid : subsetFDSelect.keySet()) {
+ writeByte(subsetFDSelect.get(gid).getNewFDIndex());
+ }
+ }
+
+ protected List<Integer> getUsedFDFonts() {
+ List<Integer> uniqueNewRefs = new ArrayList<Integer>();
+ for (int gid : subsetFDSelect.keySet()) {
+ int fdIndex = subsetFDSelect.get(gid).getOldFDIndex();
+ if (!uniqueNewRefs.contains(fdIndex)) {
+ uniqueNewRefs.add(fdIndex);
+ }
+ }
+ return uniqueNewRefs;
+ }
+
+ protected List<Integer> writeCIDDictsAndSubrs(List<Integer> uniqueNewRefs)
+ throws IOException {
+ List<Integer> privateDictOffsets = new ArrayList<Integer>();
+ List<FontDict> fdFonts = cffReader.getFDFonts();
+ for (int i = 0; i < uniqueNewRefs.size(); i++) {
+ FontDict curFDFont = fdFonts.get(uniqueNewRefs.get(i));
+ HashMap<String, DICTEntry> fdPrivateDict = cffReader.parseDictData(
+ curFDFont.getPrivateDictData());
+ int privateDictOffset = currentPos;
+ privateDictOffsets.add(privateDictOffset);
+ byte[] fdPrivateDictByteData = curFDFont.getPrivateDictData();
+ if (fdPrivateDict.get("Subrs") != null) {
- fdPrivateDictByteData.length + encodingValue);
+ fdPrivateDictByteData = updateOffset(fdPrivateDictByteData, fdPrivateDict.get("Subrs").getOffset(),
+ fdPrivateDict.get("Subrs").getOperandLength(),
- for (int i = 0; i < uniqueNewRefs.size(); i++) {
- FontDict fdFont = fdFonts.get(uniqueNewRefs.get(i));
++ fdPrivateDictByteData.length);
+ }
+ writeBytes(fdPrivateDictByteData);
+ writeIndex(fdSubrs.get(i));
+ }
+ return privateDictOffsets;
+ }
+
+ protected int writeFDArray(List<Integer> uniqueNewRefs, List<Integer> privateDictOffsets,
+ List<Integer> fontNameSIDs)
+ throws IOException {
+ int offset = currentPos;
+ List<FontDict> fdFonts = cffReader.getFDFonts();
+
+ writeCard16(uniqueNewRefs.size());
+ writeByte(1); //Offset size
+ writeByte(1); //First offset
+
+ int count = 1;
-
++ for (Integer uniqueNewRef : uniqueNewRefs) {
++ FontDict fdFont = fdFonts.get(uniqueNewRef);
+ count += fdFont.getByteData().length;
+ writeByte(count);
+ }
+
+ for (int i = 0; i < uniqueNewRefs.size(); i++) {
+ FontDict fdFont = fdFonts.get(uniqueNewRefs.get(i));
+ byte[] fdFontByteData = fdFont.getByteData();
+ Map<String, DICTEntry> fdFontDict = cffReader.parseDictData(fdFontByteData);
+ //Update the SID to the FontName
+ fdFontByteData = updateOffset(fdFontByteData, fdFontDict.get("FontName").getOffset() - 1,
+ fdFontDict.get("FontName").getOperandLengths().get(0),
+ fontNameSIDs.get(i));
+ //Update the Private dict reference
+ fdFontByteData = updateOffset(fdFontByteData, fdFontDict.get("Private").getOffset()
+ + fdFontDict.get("Private").getOperandLengths().get(0),
+ fdFontDict.get("Private").getOperandLengths().get(1),
+ privateDictOffsets.get(i));
+ writeBytes(fdFontByteData);
+ }
+ return offset;
+ }
+
+ private class FDIndexReference {
+ private int newFDIndex;
+ private int oldFDIndex;
+
+ public FDIndexReference(int newFDIndex, int oldFDIndex) {
+ this.newFDIndex = newFDIndex;
+ this.oldFDIndex = oldFDIndex;
+ }
+
+ public int getNewFDIndex() {
+ return newFDIndex;
+ }
+
+ public int getOldFDIndex() {
+ return oldFDIndex;
+ }
+ }
+
+ private void createCharStringData() throws IOException {
+ Map<String, DICTEntry> topDICT = cffReader.getTopDictEntries();
+
+ CFFIndexData charStringsIndex = cffReader.getCharStringIndex();
+
+ DICTEntry privateEntry = topDICT.get("Private");
+ if (privateEntry != null) {
+ int privateOffset = privateEntry.getOperands().get(1).intValue();
+ Map<String, DICTEntry> privateDICT = cffReader.getPrivateDict(privateEntry);
+
+ if (privateDICT.get("Subrs") != null) {
+ int localSubrOffset = privateOffset + privateDICT.get("Subrs").getOperands().get(0).intValue();
+ localIndexSubr = cffReader.readIndex(localSubrOffset);
+ } else {
+ localIndexSubr = cffReader.readIndex(null);
+ }
+ }
+
+ globalIndexSubr = cffReader.getGlobalIndexSubr();
+
+ //Create the two lists which are to store the local and global subroutines
+ subsetLocalIndexSubr = new ArrayList<byte[]>();
+ subsetGlobalIndexSubr = new ArrayList<byte[]>();
+
+ //Create the new char string index
+ subsetCharStringsIndex = new ArrayList<byte[]>();
+
+ localUniques = new ArrayList<Integer>();
+ globalUniques = new ArrayList<Integer>();
- BytesNumber operand = new BytesNumber(-1, -1);
++ Map<Integer, Integer> gidHintMaskLengths = new HashMap<Integer, Integer>();
+ for (int gid : subsetGlyphs.keySet()) {
++ type2Parser = new Type2Parser();
+ byte[] data = charStringsIndex.getValue(gid);
+ preScanForSubsetIndexSize(data);
++ gidHintMaskLengths.put(gid, type2Parser.getMaskLength());
+ }
+
+ //Store the size of each subset index and clear the unique arrays
+ subsetLocalSubrCount = localUniques.size();
+ subsetGlobalSubrCount = globalUniques.size();
+ localUniques.clear();
+ globalUniques.clear();
+
+ for (int gid : subsetGlyphs.keySet()) {
+ byte[] data = charStringsIndex.getValue(gid);
++ type2Parser = new Type2Parser();
+ //Retrieve modified char string data and fill local / global subroutine arrays
++ type2Parser.setMaskLength(gidHintMaskLengths.get(gid));
+ data = readCharStringData(data, subsetLocalSubrCount);
+ subsetCharStringsIndex.add(data);
+ }
+ }
+
++ static class Type2Parser {
++ /**
++ * logging instance
++ */
++ protected Log log = LogFactory.getLog(Type2Parser.class);
++
++ private ArrayList<BytesNumber> stack = new ArrayList<BytesNumber>();
++ private int hstemCount;
++ private int vstemCount;
++ private int lastOp = -1;
++ private int maskLength = -1;
++
++ public void pushOperand(BytesNumber v) {
++ stack.add(v);
++ }
++
++ public BytesNumber popOperand() {
++ return stack.remove(stack.size() - 1);
++ }
++
++ public void clearStack() {
++ stack.clear();
++ }
++
++ public int[] getOperands(int numbers) {
++ int[] ret = new int[numbers];
++ while (numbers > 0) {
++ numbers--;
++ ret[numbers] = this.popOperand().getNumber();
++ }
++ return ret;
++ }
++
++ public void setMaskLength(int maskLength) {
++ this.maskLength = maskLength;
++ }
++
++ public int getMaskLength() {
++ // The number of data bytes for mask is exactly the number needed, one
++ // bit per hint, to reference the number of stem hints declared
++ // at the beginning of the charstring program.
++ if (maskLength > 0) {
++ return maskLength;
++ }
++ return 1 + (hstemCount + vstemCount - 1) / 8;
++ }
++
++ public int exec(int b0, byte[] data, int dataPos) {
++ int posDelta = 0;
++ if ((b0 >= 0 && b0 <= 27) || (b0 >= 29 && b0 <= 31)) {
++ if (b0 == 12) {
++ dataPos += 1;
++ log.warn("May not guess the operand count correctly.");
++ posDelta = 1;
++ } else if (b0 == 1 || b0 == 18) {
++ // hstem(hm) operator
++ hstemCount += stack.size() / 2;
++ clearStack();
++ } else if (b0 == 19 || b0 == 20) {
++ if (lastOp == 1 || lastOp == 18) {
++ //If hstem and vstem hints are both declared at the beginning of
++ //a charstring, and this sequence is followed directly by the
++ //hintmask or cntrmask operators, the vstem hint operator need
++ //not be included.
++ vstemCount += stack.size() / 2;
++ }
++ clearStack();
++ posDelta = getMaskLength();
++ } else if (b0 == 3 || b0 == 23) {
++ // vstem(hm) operator
++ vstemCount += stack.size() / 2;
++ clearStack();
++ }
++ if (b0 != 11 && b0 != 12) {
++ lastOp = b0;
++ }
++ } else if (b0 == 28 || (b0 >= 32 && b0 <= 255)) {
++ BytesNumber operand = readNumber(b0, data, dataPos);
++ pushOperand(operand);
++ posDelta = operand.getNumBytes() - 1;
++ } else {
++ throw new UnsupportedOperationException("Operator:" + b0 + " is not supported");
++ }
++ return posDelta;
++ }
++
++ private BytesNumber readNumber(int b0, byte[] input, int curPos) {
++ if (b0 == 28) {
++ int b1 = input[curPos + 1] & 0xff;
++ int b2 = input[curPos + 2] & 0xff;
++ return new BytesNumber((int) (short) (b1 << 8 | b2), 3);
++ } else if (b0 >= 32 && b0 <= 246) {
++ return new BytesNumber(b0 - 139, 1);
++ } else if (b0 >= 247 && b0 <= 250) {
++ int b1 = input[curPos + 1] & 0xff;
++ return new BytesNumber((b0 - 247) * 256 + b1 + 108, 2);
++ } else if (b0 >= 251 && b0 <= 254) {
++ int b1 = input[curPos + 1] & 0xff;
++ return new BytesNumber(-(b0 - 251) * 256 - b1 - 108, 2);
++ } else if (b0 == 255) {
++ int b1 = input[curPos + 1] & 0xff;
++ int b2 = input[curPos + 2] & 0xff;
++ int b3 = input[curPos + 3] & 0xff;
++ int b4 = input[curPos + 4] & 0xff;
++ return new BytesNumber((b1 << 24 | b2 << 16 | b3 << 8 | b4), 5);
++ } else {
++ throw new IllegalArgumentException();
++ }
++ }
++ }
+ private void preScanForSubsetIndexSize(byte[] data) throws IOException {
+ boolean hasLocalSubroutines = localIndexSubr != null && localIndexSubr.getNumObjects() > 0;
+ boolean hasGlobalSubroutines = globalIndexSubr != null && globalIndexSubr.getNumObjects() > 0;
- int subrNumber = getSubrNumber(localIndexSubr.getNumObjects(), operand.getNumber());
-
+ for (int dataPos = 0; dataPos < data.length; dataPos++) {
+ int b0 = data[dataPos] & 0xff;
+ if (b0 == LOCAL_SUBROUTINE && hasLocalSubroutines) {
- operand.clearNumber();
++ int subrNumber = getSubrNumber(localIndexSubr.getNumObjects(), type2Parser.popOperand().getNumber());
+ if (!localUniques.contains(subrNumber) && subrNumber < localIndexSubr.getNumObjects()) {
+ localUniques.add(subrNumber);
++ }
++ if (subrNumber < localIndexSubr.getNumObjects()) {
+ byte[] subr = localIndexSubr.getValue(subrNumber);
+ preScanForSubsetIndexSize(subr);
++ } else {
++ throw new IllegalArgumentException("callsubr out of range");
+ }
- int subrNumber = getSubrNumber(globalIndexSubr.getNumObjects(), operand.getNumber());
-
+ } else if (b0 == GLOBAL_SUBROUTINE && hasGlobalSubroutines) {
- operand.clearNumber();
- } else if ((b0 >= 0 && b0 <= 27) || (b0 >= 29 && b0 <= 31)) {
- operand.clearNumber();
- if (b0 == 19 || b0 == 20) {
- dataPos += 1;
- }
- } else if (b0 == 28 || (b0 >= 32 && b0 <= 255)) {
- operand = readNumber(b0, data, dataPos);
- dataPos += operand.getNumBytes() - 1;
++ int subrNumber = getSubrNumber(globalIndexSubr.getNumObjects(), type2Parser.popOperand().getNumber());
+ if (!globalUniques.contains(subrNumber) && subrNumber < globalIndexSubr.getNumObjects()) {
+ globalUniques.add(subrNumber);
++ }
++ if (subrNumber < globalIndexSubr.getNumObjects()) {
+ byte[] subr = globalIndexSubr.getValue(subrNumber);
+ preScanForSubsetIndexSize(subr);
++ } else {
++ throw new IllegalArgumentException("callgsubr out of range");
+ }
- BytesNumber operand = new BytesNumber(-1, -1);
++ } else {
++ dataPos += type2Parser.exec(b0, data, dataPos);
+ }
+ }
+ }
+
+ private int getSubrNumber(int numSubroutines, int operand) {
+ int bias = getBias(numSubroutines);
+ return bias + operand;
+ }
+
+ private byte[] readCharStringData(byte[] data, int subsetLocalSubrCount) throws IOException {
+ boolean hasLocalSubroutines = localIndexSubr != null && localIndexSubr.getNumObjects() > 0;
+ boolean hasGlobalSubroutines = globalIndexSubr != null && globalIndexSubr.getNumObjects() > 0;
-
- operand.clearNumber();
+ for (int dataPos = 0; dataPos < data.length; dataPos++) {
+ int b0 = data[dataPos] & 0xff;
+ if (b0 == 10 && hasLocalSubroutines) {
++ BytesNumber operand = type2Parser.popOperand();
+ int subrNumber = getSubrNumber(localIndexSubr.getNumObjects(), operand.getNumber());
+
+ int newRef = getNewRefForReference(subrNumber, localUniques, localIndexSubr, subsetLocalIndexSubr,
+ subsetLocalSubrCount);
+
+ if (newRef != -1) {
+ byte[] newData = constructNewRefData(dataPos, data, operand, subsetLocalSubrCount,
+ newRef, new int[] {10});
+ dataPos -= data.length - newData.length;
+ data = newData;
+ }
-
- operand.clearNumber();
- } else if ((b0 >= 0 && b0 <= 27) || (b0 >= 29 && b0 <= 31)) {
- operand.clearNumber();
- if (b0 == 19) {
- dataPos += 1;
- }
- } else if (b0 == 28 || (b0 >= 32 && b0 <= 255)) {
- operand = readNumber(b0, data, dataPos);
- dataPos += operand.getNumBytes() - 1;
+ } else if (b0 == 29 && hasGlobalSubroutines) {
++ BytesNumber operand = type2Parser.popOperand();
+ int subrNumber = getSubrNumber(globalIndexSubr.getNumObjects(), operand.getNumber());
+
+ int newRef = getNewRefForReference(subrNumber, globalUniques, globalIndexSubr, subsetGlobalIndexSubr,
+ subsetGlobalSubrCount);
+
+ if (newRef != -1) {
+ byte[] newData = constructNewRefData(dataPos, data, operand, subsetGlobalSubrCount,
+ newRef, new int[] {29});
+ dataPos -= (data.length - newData.length);
+ data = newData;
+ }
- int newRef = -1;
++ } else {
++ dataPos += type2Parser.exec(b0, data, dataPos);
+ }
+ }
+
+ //Return the data with the modified references to our arrays
+ return data;
+ }
+
+ private int getNewRefForReference(int subrNumber, List<Integer> uniquesArray,
+ CFFIndexData indexSubr, List<byte[]> subsetIndexSubr, int subrCount) throws IOException {
- if (!uniquesArray.contains(subrNumber)) {
- uniquesArray.add(subrNumber);
- subsetIndexSubr.add(subr);
- newRef = subsetIndexSubr.size() - 1;
- } else {
- newRef = uniquesArray.indexOf(subrNumber);
- }
++ int newRef;
+ if (!uniquesArray.contains(subrNumber)) {
+ if (subrNumber < indexSubr.getNumObjects()) {
+ byte[] subr = indexSubr.getValue(subrNumber);
+ subr = readCharStringData(subr, subrCount);
- byte[] newRefBytes = createNewRef(newRef, operatorCode, -1);
++ uniquesArray.add(subrNumber);
++ subsetIndexSubr.add(subr);
++ newRef = subsetIndexSubr.size() - 1;
++ } else {
++ throw new IllegalArgumentException("subrNumber out of range");
+ }
+ } else {
+ newRef = uniquesArray.indexOf(subrNumber);
+ }
+ return newRef;
+ }
+
+ private int getBias(int subrCount) {
+ if (subrCount < 1240) {
+ return 107;
+ } else if (subrCount < 33900) {
+ return 1131;
+ } else {
+ return 32768;
+ }
+ }
+
+ private byte[] constructNewRefData(int curDataPos, byte[] currentData, BytesNumber operand,
+ int fullSubsetIndexSize, int curSubsetIndexSize, int[] operatorCode) {
+ //Create the new array with the modified reference
+ byte[] newData;
+ int startRef = curDataPos - operand.getNumBytes();
+ int length = operand.getNumBytes() + 1;
+ byte[] preBytes = new byte[startRef];
+ System.arraycopy(currentData, 0, preBytes, 0, startRef);
+ int newBias = getBias(fullSubsetIndexSize);
+ int newRef = curSubsetIndexSize - newBias;
- public static byte[] createNewRef(int newRef, int[] operatorCode, int forceLength) {
++ byte[] newRefBytes = createNewRef(newRef, operatorCode, -1, false);
+ newData = concatArray(preBytes, newRefBytes);
+ byte[] postBytes = new byte[currentData.length - (startRef + length)];
+ System.arraycopy(currentData, startRef + length, postBytes, 0,
+ currentData.length - (startRef + length));
+ return concatArray(newData, postBytes);
+ }
+
- if ((forceLength == -1 && newRef <= 107) || forceLength == 1) {
++ public static byte[] createNewRef(int newRef, int[] operatorCode, int forceLength, boolean isDict) {
+ byte[] newRefBytes;
+ int sizeOfOperator = operatorCode.length;
- } else if ((forceLength == -1 && newRef <= 1131) || forceLength == 2) {
++ if ((forceLength == -1 && newRef >= -107 && newRef <= 107) || forceLength == 1) {
+ newRefBytes = new byte[1 + sizeOfOperator];
+ //The index values are 0 indexed
+ newRefBytes[0] = (byte)(newRef + 139);
+ for (int i = 0; i < operatorCode.length; i++) {
+ newRefBytes[1 + i] = (byte)operatorCode[i];
+ }
- if (newRef <= 363) {
++ } else if ((forceLength == -1 && newRef >= -1131 && newRef <= 1131) || forceLength == 2) {
+ newRefBytes = new byte[2 + sizeOfOperator];
- newRefBytes[1] = (byte)(newRef - 108);
++ if (newRef <= -876) {
++ newRefBytes[0] = (byte)254;
++ } else if (newRef <= -620) {
++ newRefBytes[0] = (byte)253;
++ } else if (newRef <= -364) {
++ newRefBytes[0] = (byte)252;
++ } else if (newRef <= -108) {
++ newRefBytes[0] = (byte)251;
++ } else if (newRef <= 363) {
+ newRefBytes[0] = (byte)247;
+ } else if (newRef <= 619) {
+ newRefBytes[0] = (byte)248;
+ } else if (newRef <= 875) {
+ newRefBytes[0] = (byte)249;
+ } else {
+ newRefBytes[0] = (byte)250;
+ }
- } else if ((forceLength == -1 && newRef <= 32767) || forceLength == 3) {
++ if (newRef > 0) {
++ newRefBytes[1] = (byte)(newRef - 108);
++ } else {
++ newRefBytes[1] = (byte)(-newRef - 108);
++ }
+ for (int i = 0; i < operatorCode.length; i++) {
+ newRefBytes[2 + i] = (byte)operatorCode[i];
+ }
- newRefBytes[0] = 29;
++ } else if ((forceLength == -1 && newRef >= -32768 && newRef <= 32767) || forceLength == 3) {
+ newRefBytes = new byte[3 + sizeOfOperator];
+ newRefBytes[0] = 28;
+ newRefBytes[1] = (byte)(newRef >> 8);
+ newRefBytes[2] = (byte)newRef;
+ for (int i = 0; i < operatorCode.length; i++) {
+ newRefBytes[3 + i] = (byte)operatorCode[i];
+ }
+ } else {
+ newRefBytes = new byte[5 + sizeOfOperator];
- int totLength = 0;
- for (int i = 0; i < dataArray.size(); i++) {
- totLength += dataArray.get(i).length;
++ if (isDict) {
++ newRefBytes[0] = 29;
++ } else {
++ newRefBytes[0] = (byte)255;
++ }
+ newRefBytes[1] = (byte)(newRef >> 24);
+ newRefBytes[2] = (byte)(newRef >> 16);
+ newRefBytes[3] = (byte)(newRef >> 8);
+ newRefBytes[4] = (byte)newRef;
+ for (int i = 0; i < operatorCode.length; i++) {
+ newRefBytes[5 + i] = (byte)operatorCode[i];
+ }
+ }
+ return newRefBytes;
+ }
+
+ public static byte[] concatArray(byte[] a, byte[] b) {
+ int aLen = a.length;
+ int bLen = b.length;
+ byte[] c = new byte[aLen + bLen];
+ System.arraycopy(a, 0, c, 0, aLen);
+ System.arraycopy(b, 0, c, aLen, bLen);
+ return c;
+ }
+
+ protected int writeIndex(List<byte[]> dataArray) {
+ int hdrTotal = 3;
+ //2 byte number of items
+ this.writeCard16(dataArray.size());
+ //Offset Size: 1 byte = 256, 2 bytes = 65536 etc.
- if (totLength <= (1 << 8)) {
++ //Offsets in the offset array are relative to the byte that precedes the object data.
++ //Therefore the first element of the offset array is always 1.
++ int totLength = 1;
++ for (byte[] aDataArray1 : dataArray) {
++ totLength += aDataArray1.length;
+ }
+ int offSize = 1;
- } else if (totLength <= (1 << 16)) {
++ if (totLength < (1 << 8)) {
+ offSize = 1;
- } else if (totLength <= (1 << 24)) {
++ } else if (totLength < (1 << 16)) {
+ offSize = 2;
- for (int i = 0; i < dataArray.size(); i++) {
- writeBytes(dataArray.get(i));
++ } else if (totLength < (1 << 24)) {
+ offSize = 3;
+ } else {
+ offSize = 4;
+ }
+ this.writeByte(offSize);
+ //Count the first offset 1
+ hdrTotal += offSize;
+ int total = 0;
+ for (int i = 0; i < dataArray.size(); i++) {
+ hdrTotal += offSize;
+ int length = dataArray.get(i).length;
+ switch (offSize) {
+ case 1:
+ if (i == 0) {
+ writeByte(1);
+ }
+ total += length;
+ writeByte(total + 1);
+ break;
+ case 2:
+ if (i == 0) {
+ writeCard16(1);
+ }
+ total += length;
+ writeCard16(total + 1);
+ break;
+ case 3:
+ if (i == 0) {
+ writeThreeByteNumber(1);
+ }
+ total += length;
+ writeThreeByteNumber(total + 1);
+ break;
+ case 4:
+ if (i == 0) {
+ writeULong(1);
+ }
+ total += length;
+ writeULong(total + 1);
+ break;
+ default:
+ throw new AssertionError("Offset Size was not an expected value.");
+ }
+ }
- private BytesNumber readNumber(int b0, byte[] input, int curPos) throws IOException {
- if (b0 == 28) {
- int b1 = input[curPos + 1] & 0xff;
- int b2 = input[curPos + 2] & 0xff;
- return new BytesNumber(Integer.valueOf((short) (b1 << 8 | b2)), 3);
- } else if (b0 >= 32 && b0 <= 246) {
- return new BytesNumber(Integer.valueOf(b0 - 139), 1);
- } else if (b0 >= 247 && b0 <= 250) {
- int b1 = input[curPos + 1] & 0xff;
- return new BytesNumber(Integer.valueOf((b0 - 247) * 256 + b1 + 108), 2);
- } else if (b0 >= 251 && b0 <= 254) {
- int b1 = input[curPos + 1] & 0xff;
- return new BytesNumber(Integer.valueOf(-(b0 - 251) * 256 - b1 - 108), 2);
- } else if (b0 == 255) {
- int b1 = input[curPos + 1] & 0xff;
- int b2 = input[curPos + 2] & 0xff;
- return new BytesNumber(Integer.valueOf((short)(b1 << 8 | b2)), 5);
- } else {
- throw new IllegalArgumentException();
- }
- }
-
++ for (byte[] aDataArray : dataArray) {
++ writeBytes(aDataArray);
+ }
+ return hdrTotal + total;
+ }
+
- //Value needs to be converted to -139 etc.
- int encodeValue = 0;
- if (subroutines.getOperandLength() == 1) {
- encodeValue = 139;
- }
+ /**
+ * A class used to store the last number operand and also it's size in bytes
+ */
+ static class BytesNumber {
+ private int number;
+ private int numBytes;
+
+ public BytesNumber(int number, int numBytes) {
+ this.number = number;
+ this.numBytes = numBytes;
+ }
+
+ public int getNumber() {
+ return this.number;
+ }
+
+ public int getNumBytes() {
+ return this.numBytes;
+ }
+
+ public void clearNumber() {
+ this.number = -1;
+ this.numBytes = -1;
+ }
+
+ public String toString() {
+ return Integer.toString(number);
+ }
+
+ @Override
+ public boolean equals(Object entry) {
+ assert entry instanceof BytesNumber;
+ BytesNumber bnEntry = (BytesNumber)entry;
+ return this.number == bnEntry.getNumber()
+ && this.numBytes == bnEntry.getNumBytes();
+ }
+
+ @Override
+ public int hashCode() {
+ int hash = 1;
+ hash = hash * 17 + number;
+ hash = hash * 31 + numBytes;
+ return hash;
+ }
+ }
+
+ private void writeCharsetTable(boolean cidFont) throws IOException {
+ writeByte(0);
+ for (int gid : gidToSID.keySet()) {
+ if (cidFont && gid == 0) {
+ continue;
+ }
+ writeCard16((cidFont) ? gid : gidToSID.get(gid));
+ }
+ }
+
+ protected void writePrivateDict() throws IOException {
+ Map<String, DICTEntry> topDICT = cffReader.getTopDictEntries();
+
+ DICTEntry privateEntry = topDICT.get("Private");
+ if (privateEntry != null) {
+ writeBytes(cffReader.getPrivateDictBytes(privateEntry));
+ }
+ }
+
+ protected void updateOffsets(int topDictOffset, int charsetOffset, int charStringOffset,
+ int privateDictOffset, int localIndexOffset, int encodingOffset)
+ throws IOException {
+ Map<String, DICTEntry> topDICT = cffReader.getTopDictEntries();
+ Map<String, DICTEntry> privateDICT = null;
+
+ DICTEntry privateEntry = topDICT.get("Private");
+ if (privateEntry != null) {
+ privateDICT = cffReader.getPrivateDict(privateEntry);
+ }
+
+ int dataPos = 3 + (cffReader.getTopDictIndex().getOffSize()
+ * cffReader.getTopDictIndex().getOffsets().length);
+ int dataTopDictOffset = topDictOffset + dataPos;
+
+ updateFixedOffsets(topDICT, dataTopDictOffset, charsetOffset, charStringOffset, encodingOffset);
+
+ if (privateDICT != null) {
+ //Private index offset in the top dict
+ int oldPrivateOffset = dataTopDictOffset + privateEntry.getOffset();
+ output = updateOffset(output, oldPrivateOffset + privateEntry.getOperandLengths().get(0),
+ privateEntry.getOperandLengths().get(1), privateDictOffset);
+
+ //Update the local subroutine index offset in the private dict
+ DICTEntry subroutines = privateDICT.get("Subrs");
+ if (subroutines != null) {
+ int oldLocalSubrOffset = privateDictOffset + subroutines.getOffset();
- (localIndexOffset - privateDictOffset) + encodeValue);
+ output = updateOffset(output, oldLocalSubrOffset, subroutines.getOperandLength(),
- out[position] = (byte)(replacement & 0xFF);
++ (localIndexOffset - privateDictOffset));
+ }
+ }
+ }
+
+ protected void updateFixedOffsets(Map<String, DICTEntry> topDICT, int dataTopDictOffset,
+ int charsetOffset, int charStringOffset, int encodingOffset) {
+ //Charset offset in the top dict
+ DICTEntry charset = topDICT.get("charset");
+ int oldCharsetOffset = dataTopDictOffset + charset.getOffset();
+ output = updateOffset(output, oldCharsetOffset, charset.getOperandLength(), charsetOffset);
+
+ //Char string index offset in the private dict
+ DICTEntry charString = topDICT.get("CharStrings");
+ int oldCharStringOffset = dataTopDictOffset + charString.getOffset();
+ output = updateOffset(output, oldCharStringOffset, charString.getOperandLength(), charStringOffset);
+
+ DICTEntry encodingEntry = topDICT.get("Encoding");
+ if (encodingEntry != null && encodingEntry.getOperands().get(0).intValue() != 0
+ && encodingEntry.getOperands().get(0).intValue() != 1) {
+ int oldEncodingOffset = dataTopDictOffset + encodingEntry.getOffset();
+ output = updateOffset(output, oldEncodingOffset, encodingEntry.getOperandLength(), encodingOffset);
+ }
+ }
+
+ protected void updateCIDOffsets(int topDictDataOffset, int fdArrayOffset, int fdSelectOffset,
+ int charsetOffset, int charStringOffset, int encodingOffset) {
+ LinkedHashMap<String, DICTEntry> topDict = cffReader.getTopDictEntries();
+
+ DICTEntry fdArrayEntry = topDict.get("FDArray");
+ if (fdArrayEntry != null) {
+ output = updateOffset(output, topDictDataOffset + fdArrayEntry.getOffset() - 1,
+ fdArrayEntry.getOperandLength(), fdArrayOffset);
+ }
+
+ DICTEntry fdSelect = topDict.get("FDSelect");
+ if (fdSelect != null) {
+ output = updateOffset(output, topDictDataOffset + fdSelect.getOffset() - 1,
+ fdSelect.getOperandLength(), fdSelectOffset);
+ }
+
+ updateFixedOffsets(topDict, topDictDataOffset, charsetOffset, charStringOffset, encodingOffset);
+ }
+
+ protected byte[] updateOffset(byte[] out, int position, int length, int replacement) {
+ switch (length) {
+ case 1:
- if (replacement <= 363) {
++ out[position] = (byte)(replacement + 139);
+ break;
+ case 2:
- out[position + 1] = (byte)(replacement - 108);
++ if (replacement <= -876) {
++ out[position] = (byte)254;
++ } else if (replacement <= -620) {
++ out[position] = (byte)253;
++ } else if (replacement <= -364) {
++ out[position] = (byte)252;
++ } else if (replacement <= -108) {
++ out[position] = (byte)251;
++ } else if (replacement <= 363) {
+ out[position] = (byte)247;
+ } else if (replacement <= 619) {
+ out[position] = (byte)248;
+ } else if (replacement <= 875) {
+ out[position] = (byte)249;
+ } else {
+ out[position] = (byte)250;
+ }
++ if (replacement > 0) {
++ out[position + 1] = (byte)(replacement - 108);
++ } else {
++ out[position + 1] = (byte)(-replacement - 108);
++ }
+ break;
+ case 3:
+ out[position] = (byte)28;
+ out[position + 1] = (byte)((replacement >> 8) & 0xFF);
+ out[position + 2] = (byte)(replacement & 0xFF);
+ break;
+ case 5:
+ out[position] = (byte)29;
+ out[position + 1] = (byte)((replacement >> 24) & 0xFF);
+ out[position + 2] = (byte)((replacement >> 16) & 0xFF);
+ out[position + 3] = (byte)((replacement >> 8) & 0xFF);
+ out[position + 4] = (byte)(replacement & 0xFF);
+ break;
+ default:
+ }
+ return out;
+ }
+
+ /**
+ * Appends a byte to the output array,
+ * updates currentPost but not realSize
+ */
+ protected void writeByte(int b) {
+ output[currentPos++] = (byte)b;
+ realSize++;
+ }
+
+ /**
+ * Appends a USHORT to the output array,
+ * updates currentPost but not realSize
+ */
+ protected void writeCard16(int s) {
+ byte b1 = (byte)((s >> 8) & 0xff);
+ byte b2 = (byte)(s & 0xff);
+ writeByte(b1);
+ writeByte(b2);
+ }
+
+ private void writeThreeByteNumber(int s) {
+ byte b1 = (byte)((s >> 16) & 0xFF);
+ byte b2 = (byte)((s >> 8) & 0xFF);
+ byte b3 = (byte)(s & 0xFF);
+ writeByte(b1);
+ writeByte(b2);
+ writeByte(b3);
+ }
+
+ /**
+ * Appends a ULONG to the output array,
+ * at the given position
+ */
+ private void writeULong(int s) {
+ byte b1 = (byte)((s >> 24) & 0xff);
+ byte b2 = (byte)((s >> 16) & 0xff);
+ byte b3 = (byte)((s >> 8) & 0xff);
+ byte b4 = (byte)(s & 0xff);
+ writeByte(b1);
+ writeByte(b2);
+ writeByte(b3);
+ writeByte(b4);
+ }
+
+ /**
+ * Returns a subset of the fonts (readFont() MUST be called first in order to create the
+ * subset).
+ * @return byte array
+ */
+ public byte[] getFontSubset() {
+ byte[] ret = new byte[realSize];
+ System.arraycopy(output, 0, ret, 0, realSize);
+ return ret;
+ }
+
+ /**
+ * Returns the parsed CFF data for the original font.
+ * @return The CFFDataReader contaiing the parsed data
+ */
+ public CFFDataReader getCFFReader() {
+ return cffReader;
+ }
+ }
--- /dev/null
- if (log.isWarnEnabled()) {
- int lack = difference + bestActiveNode.availableShrink;
- // if this LLM is nested inside a BlockContainerLayoutManager that is constraining
- // the available width and thus responsible for the overflow then we do not issue
- // warning event here and instead let the BCLM handle that at a later stage
- if (lack < 0 && !handleOverflow(-lack)) {
- InlineLevelEventProducer eventProducer
- = InlineLevelEventProducer.Provider.get(
- getFObj().getUserAgent().getEventBroadcaster());
- if (curChildLM.getFObj() == null) {
- eventProducer.lineOverflows(this, getFObj().getName(), bestActiveNode.line,
- -lack, getFObj().getLocator());
- } else {
- eventProducer.lineOverflows(this, curChildLM.getFObj().getName(), bestActiveNode.line,
- -lack, curChildLM.getFObj().getLocator());
- }
+ /*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ /* $Id$ */
+
+ package org.apache.fop.layoutmgr.inline;
+
+ import java.util.ArrayList;
+ import java.util.Collections;
+ import java.util.Iterator;
+ import java.util.LinkedList;
+ import java.util.List;
+ import java.util.ListIterator;
+
+ import org.apache.commons.logging.Log;
+ import org.apache.commons.logging.LogFactory;
+
+ import org.apache.fop.area.Area;
+ import org.apache.fop.area.LineArea;
+ import org.apache.fop.area.Trait;
+ import org.apache.fop.area.inline.InlineArea;
+ import org.apache.fop.complexscripts.bidi.BidiResolver;
+ import org.apache.fop.datatypes.Length;
+ import org.apache.fop.datatypes.Numeric;
+ import org.apache.fop.fo.Constants;
+ import org.apache.fop.fo.flow.Block;
+ import org.apache.fop.fo.properties.CommonHyphenation;
+ import org.apache.fop.fo.properties.KeepProperty;
+ import org.apache.fop.fonts.Font;
+ import org.apache.fop.fonts.FontInfo;
+ import org.apache.fop.fonts.FontTriplet;
+ import org.apache.fop.hyphenation.Hyphenation;
+ import org.apache.fop.hyphenation.Hyphenator;
+ import org.apache.fop.layoutmgr.Adjustment;
+ import org.apache.fop.layoutmgr.BlockLayoutManager;
+ import org.apache.fop.layoutmgr.BlockLevelLayoutManager;
+ import org.apache.fop.layoutmgr.BreakElement;
+ import org.apache.fop.layoutmgr.BreakingAlgorithm;
+ import org.apache.fop.layoutmgr.ElementListObserver;
+ import org.apache.fop.layoutmgr.FloatContentLayoutManager;
+ import org.apache.fop.layoutmgr.FootenoteUtil;
+ import org.apache.fop.layoutmgr.FootnoteBodyLayoutManager;
+ import org.apache.fop.layoutmgr.InlineKnuthSequence;
+ import org.apache.fop.layoutmgr.Keep;
+ import org.apache.fop.layoutmgr.KnuthBlockBox;
+ import org.apache.fop.layoutmgr.KnuthBox;
+ import org.apache.fop.layoutmgr.KnuthElement;
+ import org.apache.fop.layoutmgr.KnuthGlue;
+ import org.apache.fop.layoutmgr.KnuthPenalty;
+ import org.apache.fop.layoutmgr.KnuthPossPosIter;
+ import org.apache.fop.layoutmgr.KnuthSequence;
+ import org.apache.fop.layoutmgr.LayoutContext;
+ import org.apache.fop.layoutmgr.LayoutManager;
+ import org.apache.fop.layoutmgr.LeafPosition;
+ import org.apache.fop.layoutmgr.ListElement;
+ import org.apache.fop.layoutmgr.NonLeafPosition;
+ import org.apache.fop.layoutmgr.Position;
+ import org.apache.fop.layoutmgr.PositionIterator;
+ import org.apache.fop.layoutmgr.SpaceSpecifier;
+ import org.apache.fop.traits.MinOptMax;
+
+ /**
+ * LayoutManager for lines. It builds one or more lines containing
+ * inline areas generated by its sub layout managers.
+ * A break is found for each line which may contain one of more
+ * breaks from the child layout managers.
+ * Once a break is found then it is return for the parent layout
+ * manager to handle.
+ * When the areas are being added to the page this manager
+ * creates a line area to contain the inline areas added by the
+ * child layout managers.
+ */
+ public class LineLayoutManager extends InlineStackingLayoutManager
+ implements BlockLevelLayoutManager {
+
+ /**
+ * this constant is used to create elements when text-align is center:
+ * every TextLM descendant of LineLM must use the same value,
+ * otherwise the line breaking algorithm does not find the right
+ * break point
+ */
+ public static final int DEFAULT_SPACE_WIDTH = 3336;
+
+ /**
+ * logging instance
+ */
+ private static Log log = LogFactory.getLog(LineLayoutManager.class);
+
+ private final Block fobj;
+ private boolean isFirstInBlock;
+
+ /**
+ * Private class to store information about inline breaks.
+ * Each value holds the start and end indexes into a List of
+ * inline break positions.
+ */
+ static class LineBreakPosition extends LeafPosition {
+ private final int parIndex; // index of the Paragraph this Position refers to
+ private final int startIndex; //index of the first element this Position refers to
+ private final int availableShrink;
+ private final int availableStretch;
+ private final int difference;
+ private final double dAdjust; // Percentage to adjust (stretch or shrink)
+ private final double ipdAdjust; // Percentage to adjust (stretch or shrink)
+ private final int startIndent;
+ private final int endIndent;
+ private final int lineHeight;
+ private final int lineWidth;
+ private final int spaceBefore;
+ private final int spaceAfter;
+ private final int baseline;
+
+ LineBreakPosition(LayoutManager lm, int index, int startIndex, int breakIndex,
+ int shrink, int stretch, int diff, double ipdA, double adjust, int si,
+ int ei, int lh, int lw, int sb, int sa, int bl) {
+ super(lm, breakIndex);
+ availableShrink = shrink;
+ availableStretch = stretch;
+ difference = diff;
+ parIndex = index;
+ this.startIndex = startIndex;
+ ipdAdjust = ipdA;
+ dAdjust = adjust;
+ startIndent = si;
+ endIndent = ei;
+ lineHeight = lh;
+ lineWidth = lw;
+ spaceBefore = sb;
+ spaceAfter = sa;
+ baseline = bl;
+ }
+
+ }
+
+
+ private int bidiLevel = -1;
+ private int textAlignment = EN_JUSTIFY;
+ private int textAlignmentLast;
+ private int effectiveAlignment;
+ private Length textIndent;
+ private Length lastLineEndIndent;
+ private CommonHyphenation hyphenationProperties;
+ private Numeric hyphenationLadderCount;
+ private int wrapOption = EN_WRAP;
+ private int whiteSpaceTreament;
+ //private LayoutProps layoutProps;
+
+ private final Length lineHeight;
+ private final int lead;
+ private final int follow;
+ private AlignmentContext alignmentContext;
+
+ private int baselineOffset = -1;
+
+ private List<KnuthSequence> knuthParagraphs;
+
+ private LineLayoutPossibilities lineLayouts;
+ private LineLayoutPossibilities[] lineLayoutsList;
+ private int ipd;
+ /**
+ * When layout must be re-started due to a change of IPD, there is no need
+ * to perform hyphenation on the remaining Knuth sequence once again.
+ */
+ private boolean hyphenationPerformed;
+
+ /**
+ * This class is used to remember
+ * which was the first element in the paragraph
+ * returned by each LM.
+ */
+ private final class Update {
+ private final InlineLevelLayoutManager inlineLM;
+ private final int firstIndex;
+
+ private Update(InlineLevelLayoutManager lm, int index) {
+ inlineLM = lm;
+ firstIndex = index;
+ }
+ }
+
+ // this class represents a paragraph
+ private static class Paragraph extends InlineKnuthSequence {
+
+ private static final long serialVersionUID = 5862072380375189105L;
+
+ /** Number of elements to ignore at the beginning of the list. */
+ private int ignoreAtStart;
+ /** Number of elements to ignore at the end of the list. */
+ private int ignoreAtEnd;
+
+ // space at the end of the last line (in millipoints)
+ private MinOptMax lineFiller;
+ private final int textAlignment;
+ private final int textAlignmentLast;
+ private final int textIndent;
+ private final int lastLineEndIndent;
+ // the LM which created the paragraph
+ private final LineLayoutManager layoutManager;
+
+ Paragraph(LineLayoutManager llm, int alignment, int alignmentLast,
+ int indent, int endIndent) {
+ super();
+ layoutManager = llm;
+ textAlignment = alignment;
+ textAlignmentLast = alignmentLast;
+ textIndent = indent;
+ lastLineEndIndent = endIndent;
+ }
+
+ @Override
+ public void startSequence() {
+ // set the minimum amount of empty space at the end of the
+ // last line
+ if (textAlignment == EN_CENTER) {
+ lineFiller = MinOptMax.getInstance(lastLineEndIndent);
+ } else {
+ lineFiller = MinOptMax.getInstance(lastLineEndIndent, lastLineEndIndent,
+ layoutManager.ipd);
+ }
+
+ // add auxiliary elements at the beginning of the paragraph
+ if (textAlignment == EN_CENTER && textAlignmentLast != EN_JUSTIFY) {
+ this.add(new KnuthGlue(0, 3 * DEFAULT_SPACE_WIDTH, 0,
+ null, false));
+ ignoreAtStart++;
+ }
+
+ // add the element representing text indentation
+ // at the beginning of the first paragraph
+ if (layoutManager.isFirstInBlock && layoutManager.knuthParagraphs.size() == 0
+ && textIndent != 0) {
+ this.add(new KnuthInlineBox(textIndent, null,
+ null, false));
+ ignoreAtStart++;
+ }
+ }
+
+ public void endParagraph() {
+ KnuthSequence finishedPar = this.endSequence();
+ if (finishedPar != null) {
+ layoutManager.knuthParagraphs.add(finishedPar);
+ }
+ }
+
+ @Override
+ public KnuthSequence endSequence() {
+ if (this.size() > ignoreAtStart) {
+ if (textAlignment == EN_CENTER
+ && textAlignmentLast != EN_JUSTIFY) {
+ this.add(new KnuthGlue(0, 3 * DEFAULT_SPACE_WIDTH, 0,
+ null, false));
+ this.add(new KnuthPenalty(lineFiller.getOpt(), -KnuthElement.INFINITE,
+ false, null, false));
+ ignoreAtEnd = 2;
+ } else if (textAlignmentLast != EN_JUSTIFY) {
+ // add the elements representing the space
+ // at the end of the last line
+ // and the forced break
+ this.add(new KnuthPenalty(0, KnuthElement.INFINITE,
+ false, null, false));
+ this.add(new KnuthGlue(0,
+ lineFiller.getStretch(),
+ lineFiller.getShrink(), null, false));
+ this.add(new KnuthPenalty(lineFiller.getOpt(), -KnuthElement.INFINITE,
+ false, null, false));
+ ignoreAtEnd = 3;
+ } else {
+ // add only the element representing the forced break
+ this.add(new KnuthPenalty(lineFiller.getOpt(), -KnuthElement.INFINITE,
+ false, null, false));
+ ignoreAtEnd = 1;
+ }
+ return this;
+ } else {
+ this.clear();
+ return null;
+ }
+ }
+
+ /**
+ * @return true if the sequence contains a box
+ */
+ public boolean containsBox() {
+ for (int i = 0; i < this.size(); i++) {
+ KnuthElement el = (KnuthElement)this.get(i);
+ if (el.isBox()) {
+ return true;
+ }
+ }
+ return false;
+ }
+ }
+
+ private class LineBreakingAlgorithm extends BreakingAlgorithm {
+ private final LineLayoutManager thisLLM;
+ private final int pageAlignment;
+ private int activePossibility;
+ private int addedPositions;
+ private final int textIndent;
+ private final int lineHeight;
+ private final int lead;
+ private final int follow;
+ private static final double MAX_DEMERITS = 10e6;
+
+ public LineBreakingAlgorithm(int pageAlign, int textAlign, int textAlignLast, int indent, int fillerWidth,
+ int lh, int ld, int fl, boolean first, int maxFlagCount, LineLayoutManager llm) {
+ super(textAlign, textAlignLast, first, false, maxFlagCount);
+ pageAlignment = pageAlign;
+ textIndent = indent;
+ lineHeight = lh;
+ lead = ld;
+ follow = fl;
+ thisLLM = llm;
+ activePossibility = -1;
+ }
+
+ @Override
+ public void updateData1(int lineCount, double demerits) {
+ lineLayouts.addPossibility(lineCount, demerits);
+ if (log.isTraceEnabled()) {
+ log.trace("Layout possibility in " + lineCount + " lines; break at position:");
+ }
+ }
+
+ @Override
+ public void updateData2(KnuthNode bestActiveNode,
+ KnuthSequence par,
+ int total) {
+ // compute indent and adjustment ratio, according to
+ // the value of text-align and text-align-last
+ int startIndent;
+ int endIndent;
+ int difference = bestActiveNode.difference;
+ int textAlign = (bestActiveNode.line < total) ? alignment : alignmentLast;
+
+ switch (textAlign) {
+ case Constants.EN_START:
+ startIndent = 0;
+ endIndent = difference > 0 ? difference : 0;
+ break;
+ case Constants.EN_END:
+ startIndent = difference;
+ endIndent = 0;
+ break;
+ case Constants.EN_CENTER:
+ startIndent = difference / 2;
+ endIndent = startIndent;
+ break;
+ default:
+ case Constants.EN_JUSTIFY:
+ startIndent = 0;
+ endIndent = 0;
+ break;
+ }
+
+ /*
+ startIndent += (textAlign == Constants.EN_CENTER)
+ ? difference / 2 : (textAlign == Constants.EN_END) ? difference : 0;
+ */
+ startIndent += (bestActiveNode.line == 1 && indentFirstPart && isFirstInBlock)
+ ? textIndent : 0;
+
+ double ratio = (textAlign == Constants.EN_JUSTIFY
+ || difference < 0 && -difference <= bestActiveNode.availableShrink)
+ ? bestActiveNode.adjustRatio : 0;
+
+ // add nodes at the beginning of the list, as they are found
+ // backwards, from the last one to the first one
+
+ // the first time this method is called, initialize activePossibility
+ if (activePossibility == -1) {
+ activePossibility = 0;
+ addedPositions = 0;
+ }
+
+ if (addedPositions == lineLayouts.getLineCount(activePossibility)) {
+ activePossibility++;
+ addedPositions = 0;
+ }
+
++ int lack = difference + bestActiveNode.availableShrink;
++ // if this LLM is nested inside a BlockContainerLayoutManager that is constraining
++ // the available width and thus responsible for the overflow then we do not issue
++ // warning event here and instead let the BCLM handle that at a later stage
++ if (lack < 0 && !handleOverflow(-lack)) {
++ InlineLevelEventProducer eventProducer
++ = InlineLevelEventProducer.Provider.get(
++ getFObj().getUserAgent().getEventBroadcaster());
++ if (curChildLM.getFObj() == null) {
++ eventProducer.lineOverflows(this, getFObj().getName(), bestActiveNode.line,
++ -lack, getFObj().getLocator());
++ } else {
++ eventProducer.lineOverflows(this, curChildLM.getFObj().getName(), bestActiveNode.line,
++ -lack, curChildLM.getFObj().getLocator());
+ }
+ }
+
+ //log.debug("LLM> (" + (lineLayouts.getLineNumber(activePossibility) - addedPositions)
+ // + ") difference = " + difference + " ratio = " + ratio);
+ lineLayouts.addBreakPosition(makeLineBreakPosition(par,
+ (bestActiveNode.line > 1 ? bestActiveNode.previous.position + 1 : 0),
+ bestActiveNode.position,
+ bestActiveNode.availableShrink - (addedPositions > 0
+ ? 0 : ((Paragraph) par).lineFiller.getShrink()),
+ bestActiveNode.availableStretch,
+ difference, ratio, startIndent, endIndent), activePossibility);
+ addedPositions++;
+ }
+
+ /* reset activePossibility, as if breakpoints have not yet been computed
+ */
+ public void resetAlgorithm() {
+ activePossibility = -1;
+ }
+
+ private LineBreakPosition makeLineBreakPosition(KnuthSequence par, int firstElementIndex, int lastElementIndex,
+ int availableShrink, int availableStretch, int difference, double ratio,
+ int startIndent, int endIndent) {
+ // line height calculation - spaceBefore may differ from spaceAfter
+ // by 1mpt due to rounding
+ int spaceBefore = (lineHeight - lead - follow) / 2;
+ int spaceAfter = lineHeight - lead - follow - spaceBefore;
+ // height before the main baseline
+ int lineLead = lead;
+ // maximum follow
+ int lineFollow = follow;
+ // true if this line contains only zero-height, auxiliary boxes
+ // and the actual line width is 0; in this case, the line "collapses"
+ // i.e. the line area will have bpd = 0
+ boolean isZeroHeightLine = (difference == ipd);
+
+ // if line-stacking-strategy is "font-height", the line height
+ // is not affected by its content
+ if (fobj.getLineStackingStrategy() != EN_FONT_HEIGHT) {
+ ListIterator inlineIterator
+ = par.listIterator(firstElementIndex);
+ AlignmentContext lastAC = null;
+ int maxIgnoredHeight = 0; // See spec 7.13
+ for (int j = firstElementIndex;
+ j <= lastElementIndex;
+ j++) {
+ KnuthElement element = (KnuthElement) inlineIterator.next();
+ if (element instanceof KnuthInlineBox) {
+ AlignmentContext ac = ((KnuthInlineBox) element).getAlignmentContext();
+ if (ac != null && lastAC != ac) {
+ if (!ac.usesInitialBaselineTable()
+ || ac.getAlignmentBaselineIdentifier() != EN_BEFORE_EDGE
+ && ac.getAlignmentBaselineIdentifier() != EN_AFTER_EDGE) {
+ if (fobj.getLineHeightShiftAdjustment() == EN_CONSIDER_SHIFTS
+ || ac.getBaselineShiftValue() == 0) {
+ int alignmentOffset = ac.getTotalAlignmentBaselineOffset();
+ if (alignmentOffset + ac.getAltitude() > lineLead) {
+ lineLead = alignmentOffset + ac.getAltitude();
+ }
+ if (ac.getDepth() - alignmentOffset > lineFollow) {
+ lineFollow = ac.getDepth() - alignmentOffset;
+ }
+ }
+ } else {
+ if (ac.getHeight() > maxIgnoredHeight) {
+ maxIgnoredHeight = ac.getHeight();
+ }
+ }
+ lastAC = ac;
+ }
+ if (isZeroHeightLine
+ && (!element.isAuxiliary() || ac != null && ac.getHeight() > 0)) {
+ isZeroHeightLine = false;
+ }
+ }
+ }
+
+ if (lineFollow < maxIgnoredHeight - lineLead) {
+ lineFollow = maxIgnoredHeight - lineLead;
+ }
+ }
+
+ constantLineHeight = lineLead + lineFollow;
+
+ if (isZeroHeightLine) {
+ return new LineBreakPosition(thisLLM,
+ knuthParagraphs.indexOf(par),
+ firstElementIndex, lastElementIndex,
+ availableShrink, availableStretch,
+ difference, ratio, 0, startIndent, endIndent,
+ 0, ipd, 0, 0, 0);
+ } else {
+ return new LineBreakPosition(thisLLM,
+ knuthParagraphs.indexOf(par),
+ firstElementIndex, lastElementIndex,
+ availableShrink, availableStretch,
+ difference, ratio, 0, startIndent, endIndent,
+ lineLead + lineFollow,
+ ipd, spaceBefore, spaceAfter,
+ lineLead);
+ }
+ }
+
+ @Override
+ protected int filterActiveNodes() {
+ KnuthNode bestActiveNode = null;
+
+ if (pageAlignment == EN_JUSTIFY) {
+ // leave all active nodes and find the optimum line number
+ //log.debug("LBA.filterActiveNodes> " + activeNodeCount + " layouts");
+ for (int i = startLine; i < endLine; i++) {
+ for (KnuthNode node = getNode(i); node != null; node = node.next) {
+ //log.debug(" + lines = "
+ //+ node.line + " demerits = " + node.totalDemerits);
+ bestActiveNode = compareNodes(bestActiveNode, node);
+ }
+ }
+
+ // scan the node set once again and remove some nodes
+ //log.debug("LBA.filterActiveList> layout selection");
+ for (int i = startLine; i < endLine; i++) {
+ for (KnuthNode node = getNode(i); node != null; node = node.next) {
+ //if (Math.abs(node.line - bestActiveNode.line) > maxDiff) {
+ //if (false) {
+ if (node.line != bestActiveNode.line
+ && node.totalDemerits > MAX_DEMERITS) {
+ //log.debug(" XXX lines = "
+ //+ node.line + " demerits = " + node.totalDemerits);
+ removeNode(i, node);
+ } else {
+ //log.debug(" ok lines = "
+ //+ node.line + " demerits = " + node.totalDemerits);
+ }
+ }
+ }
+ } else {
+ // leave only the active node with fewest total demerits
+ for (int i = startLine; i < endLine; i++) {
+ for (KnuthNode node = getNode(i); node != null; node = node.next) {
+ bestActiveNode = compareNodes(bestActiveNode, node);
+ if (node != bestActiveNode) {
+ removeNode(i, node);
+ }
+ }
+ }
+ }
+ return bestActiveNode.line;
+ }
+ }
+
+
+ private int constantLineHeight = 12000;
+
+ /**
+ * Create a new Line Layout Manager.
+ * This is used by the block layout manager to create
+ * line managers for handling inline areas flowing into line areas.
+ * @param block the block formatting object
+ * @param lh the default line height
+ * @param l the default lead, from top to baseline
+ * @param f the default follow, from baseline to bottom
+ */
+ public LineLayoutManager(Block block, Length lh, int l, int f) {
+ super(block);
+ fobj = block;
+ // the child FObj are owned by the parent BlockLM
+ // this LM has all its childLMs preloaded
+ fobjIter = null;
+ lineHeight = lh;
+ lead = l;
+ follow = f;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public void initialize() {
+ bidiLevel = fobj.getBidiLevel();
+ textAlignment = fobj.getTextAlign();
+ textAlignmentLast = fobj.getTextAlignLast();
+ textIndent = fobj.getTextIndent();
+ lastLineEndIndent = fobj.getLastLineEndIndent();
+ hyphenationProperties = fobj.getCommonHyphenation();
+ hyphenationLadderCount = fobj.getHyphenationLadderCount();
+ wrapOption = fobj.getWrapOption();
+ whiteSpaceTreament = fobj.getWhitespaceTreatment();
+ //
+ effectiveAlignment = getEffectiveAlignment(textAlignment, textAlignmentLast);
+ isFirstInBlock = (this == getParent().getChildLMs().get(0));
+ }
+
+ private int getEffectiveAlignment(int alignment, int alignmentLast) {
+ if (textAlignment != EN_JUSTIFY && textAlignmentLast == EN_JUSTIFY) {
+ return 0;
+ } else {
+ return textAlignment;
+ }
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public List getNextKnuthElements(LayoutContext context, int alignment) {
+ if (alignmentContext == null) {
+ FontInfo fi = fobj.getFOEventHandler().getFontInfo();
+ FontTriplet[] fontkeys = fobj.getCommonFont().getFontState(fi);
+ Font fs = fi.getFontInstance(fontkeys[0], fobj.getCommonFont().fontSize.getValue(this));
+ alignmentContext = new AlignmentContext(fs, lineHeight.getValue(this),
+ context.getWritingMode());
+ }
+ context.setAlignmentContext(alignmentContext);
+ ipd = context.getRefIPD();
+
+ //PHASE 1: Create Knuth elements
+ if (knuthParagraphs == null) {
+ // it's the first time this method is called
+ knuthParagraphs = new ArrayList<KnuthSequence>();
+
+ // here starts Knuth's algorithm
+ collectInlineKnuthElements(context);
+ } else {
+ // this method has been called before
+ // all line breaks are already calculated
+ }
+
+ // return finished when there's no content
+ if (knuthParagraphs.size() == 0) {
+ setFinished(true);
+ return null;
+ }
+
+ //PHASE 2: Create line breaks
+ return createLineBreaks(context.getBPAlignment(), context);
+ }
+
+ /**
+ * Get a sequence of KnuthElements representing the content
+ * of the node assigned to the LM.
+ * @param context the LayoutContext used to store layout information
+ * @param alignment the desired text alignment
+ * @param restartPosition position at restart
+ * @return the list of KnuthElements
+ * @see LayoutManager#getNextKnuthElements(LayoutContext,int)
+ */
+ public List getNextKnuthElements(LayoutContext context, int alignment,
+ LeafPosition restartPosition) {
+ log.trace("Restarting line breaking from index " + restartPosition.getIndex());
+ int parIndex = restartPosition.getLeafPos();
+ KnuthSequence paragraph = knuthParagraphs.get(parIndex);
+ if (paragraph instanceof Paragraph) {
+ ((Paragraph) paragraph).ignoreAtStart = 0;
+ isFirstInBlock = false;
+ }
+ paragraph.subList(0, restartPosition.getIndex() + 1).clear();
+ Iterator<KnuthElement> iter = paragraph.iterator();
+ while (iter.hasNext() && !iter.next().isBox()) {
+ iter.remove();
+ }
+ if (!iter.hasNext()) {
+ knuthParagraphs.remove(parIndex);
+ }
+
+ // return finished when there's no content
+ if (knuthParagraphs.size() == 0) {
+ setFinished(true);
+ return null;
+ }
+
+ ipd = context.getRefIPD();
+ //PHASE 2: Create line breaks
+ return createLineBreaks(context.getBPAlignment(), context);
+ }
+
+ /**
+ * Phase 1 of Knuth algorithm: Collect all inline Knuth elements before determining line breaks.
+ * @param context the LayoutContext
+ */
+ private void collectInlineKnuthElements(LayoutContext context) {
+ LayoutContext inlineLC = LayoutContext.copyOf(context);
+
+ // convert all the text in a sequence of paragraphs made
+ // of KnuthBox, KnuthGlue and KnuthPenalty objects
+ boolean previousIsBox = false;
+
+ StringBuffer trace = new StringBuffer("LineLM:");
+
+ Paragraph lastPar = null;
+
+ InlineLevelLayoutManager curLM;
+ while ((curLM = (InlineLevelLayoutManager) getChildLM()) != null) {
+ List inlineElements = curLM.getNextKnuthElements(inlineLC, effectiveAlignment);
+ if (inlineElements == null || inlineElements.size() == 0) {
+ /* curLM.getNextKnuthElements() returned null or an empty list;
+ * this can happen if there is nothing more to layout,
+ * so just iterate once more to see if there are other children */
+ continue;
+ }
+
+ if (lastPar != null) {
+ KnuthSequence firstSeq = (KnuthSequence) inlineElements.get(0);
+
+ // finish last paragraph before a new block sequence
+ if (!firstSeq.isInlineSequence()) {
+ lastPar.endParagraph();
+ ElementListObserver.observe(lastPar, "line", null);
+ lastPar = null;
+ if (log.isTraceEnabled()) {
+ trace.append(" ]");
+ }
+ previousIsBox = false;
+ }
+
+ // does the first element of the first paragraph add to an existing word?
+ if (lastPar != null) {
+ KnuthElement thisElement;
+ thisElement = (KnuthElement) firstSeq.get(0);
+ if (thisElement.isBox() && !thisElement.isAuxiliary()
+ && previousIsBox) {
+ lastPar.addALetterSpace();
+ }
+ }
+ }
+
+ // loop over the KnuthSequences (and single KnuthElements) in returnedList
+ ListIterator iter = inlineElements.listIterator();
+ while (iter.hasNext()) {
+ KnuthSequence sequence = (KnuthSequence) iter.next();
+ // the sequence contains inline Knuth elements
+ if (sequence.isInlineSequence()) {
+ // look at the last element
+ ListElement lastElement = sequence.getLast();
+ assert lastElement != null;
+ previousIsBox = lastElement.isBox()
+ && !((KnuthElement) lastElement).isAuxiliary()
+ && ((KnuthElement) lastElement).getWidth() != 0;
+
+ // if last paragraph is open, add the new elements to the paragraph
+ // else this is the last paragraph
+ if (lastPar == null) {
+ lastPar = new Paragraph(this,
+ textAlignment, textAlignmentLast,
+ textIndent.getValue(this),
+ lastLineEndIndent.getValue(this));
+ lastPar.startSequence();
+ if (log.isTraceEnabled()) {
+ trace.append(" [");
+ }
+ } else {
+ if (log.isTraceEnabled()) {
+ trace.append(" +");
+ }
+ }
+ lastPar.addAll(sequence);
+ if (log.isTraceEnabled()) {
+ trace.append(" I");
+ }
+
+ // finish last paragraph if it was closed with a linefeed
+ if (lastElement.isPenalty()
+ && ((KnuthPenalty) lastElement).getPenalty()
+ == -KnuthPenalty.INFINITE) {
+ // a penalty item whose value is -inf
+ // represents a preserved linefeed,
+ // which forces a line break
+ lastPar.removeLast();
+ if (!lastPar.containsBox()) {
+ //only a forced linefeed on this line
+ //-> compensate with an auxiliary glue
+ lastPar.add(new KnuthGlue(ipd, 0, ipd, null, true));
+ }
+ lastPar.endParagraph();
+ ElementListObserver.observe(lastPar, "line", null);
+ lastPar = null;
+ if (log.isTraceEnabled()) {
+ trace.append(" ]");
+ }
+ previousIsBox = false;
+ }
+ } else { // the sequence is a block sequence
+ // the positions will be wrapped with this LM in postProcessLineBreaks
+ knuthParagraphs.add(sequence);
+ if (log.isTraceEnabled()) {
+ trace.append(" B");
+ }
+ }
+ } // end of loop over returnedList
+ }
+
+ if (lastPar != null) {
+ lastPar.endParagraph();
+ ElementListObserver.observe(lastPar, "line", fobj.getId());
+ if (log.isTraceEnabled()) {
+ trace.append(" ]");
+ }
+ }
+ log.trace(trace);
+ }
+
+ /**
+ * Phase 2 of Knuth algorithm: find optimal break points.
+ * @param alignment alignment in BP direction of the paragraph
+ * @param context the layout context
+ * @return a list of Knuth elements representing broken lines
+ */
+ private List<ListElement> createLineBreaks(int alignment, LayoutContext context) {
+ // find the optimal line breaking points for each paragraph
+ Iterator<KnuthSequence> paragraphsIterator = knuthParagraphs.iterator();
+ lineLayoutsList = new LineLayoutPossibilities[knuthParagraphs.size()];
+ LineLayoutPossibilities llPoss;
+ for (int i = 0; paragraphsIterator.hasNext(); i++) {
+ KnuthSequence seq = paragraphsIterator.next();
+ if (!seq.isInlineSequence()) {
+ // This set of line layout possibilities does not matter;
+ // we only need an entry in lineLayoutsList.
+ llPoss = new LineLayoutPossibilities();
+ } else {
+ llPoss = findOptimalBreakingPoints(alignment, (Paragraph) seq,
+ !paragraphsIterator.hasNext());
+ }
+ lineLayoutsList[i] = llPoss;
+ }
+
+ setFinished(true);
+
+ //Post-process the line breaks found
+ return postProcessLineBreaks(alignment, context);
+ }
+
+ /**
+ * Find the optimal linebreaks for a paragraph
+ * @param alignment alignment of the paragraph
+ * @param currPar the Paragraph for which the linebreaks are found
+ * @param isLastPar flag indicating whether currPar is the last paragraph
+ * @return the line layout possibilities for the paragraph
+ */
+ private LineLayoutPossibilities findOptimalBreakingPoints(int alignment, Paragraph currPar,
+ boolean isLastPar) {
+ // use the member lineLayouts, which is read by LineBreakingAlgorithm.updateData1 and 2
+ lineLayouts = new LineLayoutPossibilities();
+ double maxAdjustment = 1;
+ LineBreakingAlgorithm alg = new LineBreakingAlgorithm(alignment,
+ textAlignment, textAlignmentLast,
+ textIndent.getValue(this), currPar.lineFiller.getOpt(),
+ lineHeight.getValue(this), lead, follow,
+ (knuthParagraphs.indexOf(currPar) == 0),
+ hyphenationLadderCount.getEnum() == EN_NO_LIMIT
+ ? 0 : hyphenationLadderCount.getValue(),
+ this);
+ alg.setConstantLineWidth(ipd);
+ boolean canWrap = (wrapOption != EN_NO_WRAP);
+ boolean canHyphenate = (canWrap && hyphenationProperties.hyphenate.getEnum() == EN_TRUE);
+
+ // find hyphenation points, if allowed and not yet done
+ if (canHyphenate && !hyphenationPerformed) {
+ // make sure findHyphenationPoints() is bypassed if
+ // the method is called twice (e.g. due to changing page-ipd)
+ hyphenationPerformed = isLastPar;
+ findHyphenationPoints(currPar);
+ }
+
+ // first try: do not consider hyphenation points as legal breaks
+ int allowedBreaks = (canWrap ? BreakingAlgorithm.NO_FLAGGED_PENALTIES
+ : BreakingAlgorithm.ONLY_FORCED_BREAKS);
+ int breakingPoints = alg.findBreakingPoints(currPar, maxAdjustment, false, allowedBreaks);
+
+ if (breakingPoints == 0 || alignment == EN_JUSTIFY) {
+ // if the first try found a set of breaking points, save them
+ if (breakingPoints > 0) {
+ alg.resetAlgorithm();
+ lineLayouts.savePossibilities(false);
+ } else {
+ // the first try failed
+ log.debug("No set of breaking points found with maxAdjustment = " + maxAdjustment);
+ }
+
+ // now try something different
+ log.debug("Hyphenation possible? " + canHyphenate);
+ // Note: if allowedBreaks is guaranteed to be unchanged by alg.findBreakingPoints(),
+ // the below check can be simplified to 'if (canHyphenate) ...'
+ if (canHyphenate && allowedBreaks != BreakingAlgorithm.ONLY_FORCED_BREAKS) {
+ // consider every hyphenation point as a legal break
+ allowedBreaks = BreakingAlgorithm.ALL_BREAKS;
+ } else {
+ // try with a higher threshold
+ maxAdjustment = 5;
+ }
+
+ breakingPoints = alg.findBreakingPoints(currPar, maxAdjustment, false, allowedBreaks);
+ if (breakingPoints == 0) {
+ // the second try failed too, try with a huge threshold
+ // and force the algorithm to find a set of breaking points
+ if (log.isDebugEnabled()) {
+ log.debug("No set of breaking points found with maxAdjustment = "
+ + maxAdjustment + (canHyphenate ? " and hyphenation" : ""));
+ }
+ maxAdjustment = 20;
+ alg.findBreakingPoints(currPar, maxAdjustment, true, allowedBreaks);
+ }
+
+ // use non-hyphenated breaks, when possible
+ lineLayouts.restorePossibilities();
+ }
+
+ return lineLayouts;
+ }
+
+ /**
+ * Creates the element list in BP direction for the broken lines.
+ * @param alignment the currently applicable vertical alignment
+ * @param context the layout context
+ * @return the newly built element list
+ */
+ private List<ListElement> postProcessLineBreaks(int alignment, LayoutContext context) {
+
+ List<ListElement> returnList = new LinkedList<ListElement>();
+
+ int endIndex = -1;
+ for (int p = 0; p < knuthParagraphs.size(); p++) {
+ // penalty between paragraphs
+ if (p > 0) {
+ Keep keep = getKeepTogether();
+ returnList.add(new BreakElement(
+ new Position(this),
+ keep.getPenalty(),
+ keep.getContext(),
+ context));
+ }
+
+ LineLayoutPossibilities llPoss = lineLayoutsList[p];
+ KnuthSequence seq = knuthParagraphs.get(p);
+
+ if (!seq.isInlineSequence()) {
+ List<ListElement> targetList = new LinkedList<ListElement>();
+ ListIterator listIter = seq.listIterator();
+ while (listIter.hasNext()) {
+ ListElement tempElement;
+ tempElement = (ListElement) listIter.next();
+ LayoutManager lm = tempElement.getLayoutManager();
+ if (baselineOffset < 0 && lm != null && lm.hasLineAreaDescendant()) {
+ baselineOffset = lm.getBaselineOffset();
+ }
+ if (lm != this) {
+ tempElement.setPosition(notifyPos(new NonLeafPosition(this,
+ tempElement.getPosition())));
+ }
+ targetList.add(tempElement);
+ }
+ returnList.addAll(targetList);
+ } else if (seq.isInlineSequence() && alignment == EN_JUSTIFY) {
+ /* justified vertical alignment (not in the XSL FO recommendation):
+ create a multi-layout sequence whose elements will contain
+ a conventional Position */
+ Position returnPosition = new LeafPosition(this, p);
+ createElements(returnList, llPoss, returnPosition);
+ } else {
+ /* "normal" vertical alignment: create a sequence whose boxes
+ represent effective lines, and contain LineBreakPositions */
+ int startIndex = 0;
+ int previousEndIndex = 0;
+ for (int i = 0;
+ i < llPoss.getChosenLineCount();
+ i++) {
+ int orphans = fobj.getOrphans();
+ int widows = fobj.getWidows();
+ if (handlingFloat()) {
+ orphans = 1;
+ widows = 1;
+ }
+ if (returnList.size() > 0
+ && i > 0 //if i==0 break generated above already
+ && i >= orphans && i <= llPoss.getChosenLineCount() - widows) {
+ // penalty allowing a page break between lines
+ Keep keep = getKeepTogether();
+ returnList.add(new BreakElement(
+ new LeafPosition(this, p, endIndex),
+ keep.getPenalty(),
+ keep.getContext(),
+ context));
+ }
+ endIndex = llPoss.getChosenPosition(i).getLeafPos();
+ // create a list of the FootnoteBodyLM handling footnotes
+ // whose citations are in this line
+ List<FootnoteBodyLayoutManager> footnoteList = FootenoteUtil.getFootnotes(
+ seq, startIndex, endIndex);
+ List<FloatContentLayoutManager> floats = FloatContentLayoutManager.checkForFloats(seq,
+ startIndex, endIndex);
+ startIndex = endIndex + 1;
+ LineBreakPosition lbp = llPoss.getChosenPosition(i);
+ if (baselineOffset < 0) {
+ baselineOffset = lbp.spaceBefore + lbp.baseline;
+ }
+ if (floats.isEmpty()) {
+ returnList.add(new KnuthBlockBox(lbp.lineHeight + lbp.spaceBefore + lbp.spaceAfter,
+ footnoteList, lbp, false));
+ } else {
+ // add a line with height zero and no content and attach float to it
+ returnList.add(new KnuthBlockBox(0, Collections.emptyList(), null, false, floats));
+ // add a break element to signal that we should restart LB at this break
+ Keep keep = getKeepTogether();
+ returnList.add(new BreakElement(new LeafPosition(this, p, previousEndIndex), keep
+ .getPenalty(), keep.getContext(), context));
+ // add the original line where the float was but without the float now
+ returnList.add(new KnuthBlockBox(lbp.lineHeight + lbp.spaceBefore + lbp.spaceAfter,
+ footnoteList, lbp, false));
+ }
+ previousEndIndex = endIndex;
+ }
+ }
+ }
+
+ return returnList;
+ }
+
+ private void createElements(List<ListElement> list, LineLayoutPossibilities llPoss,
+ Position elementPosition) {
+ /* number of normal, inner lines */
+ int innerLines = 0;
+ /* number of lines that can be used in order to fill more space */
+ int optionalLines = 0;
+ /* number of lines that can be used in order to fill more space
+ only if the paragraph is not parted */
+ int conditionalOptionalLines = 0;
+ /* number of lines that can be omitted in order to fill less space */
+ int eliminableLines = 0;
+ /* number of lines that can be omitted in order to fill less space
+ only if the paragraph is not parted */
+ int conditionalEliminableLines = 0;
+ /* number of the first unbreakable lines */
+ int firstLines = fobj.getOrphans();
+ /* number of the last unbreakable lines */
+ int lastLines = fobj.getWidows();
+ /* sub-sequence used to separate the elements representing different lines */
+ List<KnuthElement> breaker = new LinkedList<KnuthElement>();
+
+ /* comment out the next lines in order to test particular situations */
+ if (fobj.getOrphans() + fobj.getWidows() <= llPoss.getMinLineCount()) {
+ innerLines = llPoss.getMinLineCount() - (fobj.getOrphans() + fobj.getWidows());
+ optionalLines = llPoss.getMaxLineCount() - llPoss.getOptLineCount();
+ eliminableLines = llPoss.getOptLineCount() - llPoss.getMinLineCount();
+ } else if (fobj.getOrphans() + fobj.getWidows() <= llPoss.getOptLineCount()) {
+ optionalLines = llPoss.getMaxLineCount() - llPoss.getOptLineCount();
+ eliminableLines = llPoss.getOptLineCount() - (fobj.getOrphans() + fobj.getWidows());
+ conditionalEliminableLines
+ = (fobj.getOrphans() + fobj.getWidows()) - llPoss.getMinLineCount();
+ } else if (fobj.getOrphans() + fobj.getWidows() <= llPoss.getMaxLineCount()) {
+ optionalLines = llPoss.getMaxLineCount() - (fobj.getOrphans() + fobj.getWidows());
+ conditionalOptionalLines
+ = (fobj.getOrphans() + fobj.getWidows()) - llPoss.getOptLineCount();
+ conditionalEliminableLines = llPoss.getOptLineCount() - llPoss.getMinLineCount();
+ firstLines -= conditionalOptionalLines;
+ } else {
+ conditionalOptionalLines = llPoss.getMaxLineCount() - llPoss.getOptLineCount();
+ conditionalEliminableLines = llPoss.getOptLineCount() - llPoss.getMinLineCount();
+ firstLines = llPoss.getOptLineCount();
+ lastLines = 0;
+ }
+ /* comment out the previous lines in order to test particular situations */
+
+ /* use these lines to test particular situations
+ innerLines = 0;
+ optionalLines = 1;
+ conditionalOptionalLines = 2;
+ eliminableLines = 0;
+ conditionalEliminableLines = 0;
+ firstLines = 1;
+ lastLines = 3;
+ */
+
+ if (lastLines != 0
+ && (conditionalOptionalLines > 0 || conditionalEliminableLines > 0)) {
+ breaker.add(new KnuthPenalty(0, KnuthElement.INFINITE, false, elementPosition, false));
+ breaker.add(new KnuthGlue(0, -conditionalOptionalLines * constantLineHeight,
+ -conditionalEliminableLines * constantLineHeight,
+ Adjustment.LINE_NUMBER_ADJUSTMENT, elementPosition, false));
+ breaker.add(new KnuthPenalty(conditionalOptionalLines * constantLineHeight,
+ 0, false, elementPosition, false));
+ breaker.add(new KnuthGlue(0, conditionalOptionalLines * constantLineHeight,
+ conditionalEliminableLines * constantLineHeight,
+ Adjustment.LINE_NUMBER_ADJUSTMENT, elementPosition, false));
+ } else if (lastLines != 0) {
+ breaker.add(new KnuthPenalty(0, 0, false, elementPosition, false));
+ }
+
+ // creation of the elements:
+ // first group of lines
+ list.add(new KnuthBox(firstLines * constantLineHeight, elementPosition,
+ (lastLines == 0
+ && conditionalOptionalLines == 0
+ && conditionalEliminableLines == 0)));
+ if (conditionalOptionalLines > 0
+ || conditionalEliminableLines > 0) {
+ list.add(new KnuthPenalty(0, KnuthElement.INFINITE, false, elementPosition, false));
+ list.add(new KnuthGlue(0, conditionalOptionalLines * constantLineHeight,
+ conditionalEliminableLines * constantLineHeight,
+ Adjustment.LINE_NUMBER_ADJUSTMENT, elementPosition, false));
+ list.add(new KnuthBox(0, elementPosition, (lastLines == 0)));
+ }
+
+ // optional lines
+ for (int i = 0; i < optionalLines; i++) {
+ list.addAll(breaker);
+ list.add(new KnuthBox(0, elementPosition, false));
+ list.add(new KnuthPenalty(0, KnuthElement.INFINITE, false, elementPosition, false));
+ list.add(new KnuthGlue(0, constantLineHeight, 0,
+ Adjustment.LINE_NUMBER_ADJUSTMENT, elementPosition, false));
+ list.add(new KnuthBox(0, elementPosition, false));
+ }
+
+ // eliminable lines
+ for (int i = 0; i < eliminableLines; i++) {
+ list.addAll(breaker);
+ list.add(new KnuthBox(constantLineHeight, elementPosition, false));
+ list.add(new KnuthPenalty(0, KnuthElement.INFINITE, false, elementPosition, false));
+ list.add(new KnuthGlue(0, 0, constantLineHeight,
+ Adjustment.LINE_NUMBER_ADJUSTMENT, elementPosition, false));
+ list.add(new KnuthBox(0, elementPosition, false));
+ }
+
+ // inner lines
+ for (int i = 0; i < innerLines; i++) {
+ list.addAll(breaker);
+ list.add(new KnuthBox(constantLineHeight, elementPosition, false));
+ }
+
+ // last group of lines
+ if (lastLines > 0) {
+ list.addAll(breaker);
+ list.add(new KnuthBox(lastLines * constantLineHeight,
+ elementPosition, true));
+ }
+ }
+
+ /** {@inheritDoc} */
+ public boolean mustKeepTogether() {
+ return ((BlockLevelLayoutManager) getParent()).mustKeepTogether();
+ }
+
+ /** {@inheritDoc} */
+ public KeepProperty getKeepTogetherProperty() {
+ return ((BlockLevelLayoutManager) getParent()).getKeepTogetherProperty();
+ }
+
+ /** {@inheritDoc} */
+ public KeepProperty getKeepWithPreviousProperty() {
+ return ((BlockLevelLayoutManager) getParent()).getKeepWithPreviousProperty();
+ }
+
+ /** {@inheritDoc} */
+ public KeepProperty getKeepWithNextProperty() {
+ return ((BlockLevelLayoutManager) getParent()).getKeepWithNextProperty();
+ }
+
+ /** {@inheritDoc} */
+ public Keep getKeepTogether() {
+ return ((BlockLevelLayoutManager) getParent()).getKeepTogether();
+ }
+
+ /** {@inheritDoc} */
+ public boolean mustKeepWithPrevious() {
+ return !getKeepWithPrevious().isAuto();
+ }
+
+ /** {@inheritDoc} */
+ public boolean mustKeepWithNext() {
+ return !getKeepWithNext().isAuto();
+ }
+
+ /** {@inheritDoc} */
+ public Keep getKeepWithNext() {
+ return Keep.KEEP_AUTO;
+ }
+
+ /** {@inheritDoc} */
+ public Keep getKeepWithPrevious() {
+ return Keep.KEEP_AUTO;
+ }
+
+ /** {@inheritDoc} */
+ public int negotiateBPDAdjustment(int adj, KnuthElement lastElement) {
+ Position lastPos = lastElement.getPosition();
+ assert (lastPos instanceof LeafPosition);
+ LeafPosition pos = (LeafPosition) lastPos;
+ //if (lastElement.isPenalty()) {
+ // totalAdj += lastElement.getWidth();
+ //}
+ //int lineNumberDifference = (int)((double) totalAdj / constantLineHeight);
+ int lineNumberDifference = (int) Math.round((double) adj / constantLineHeight
+ + (adj > 0 ? -0.4 : 0.4));
+ //log.debug(" LLM> variazione calcolata = " + ((double) totalAdj / constantLineHeight)
+ //+ " variazione applicata = " + lineNumberDifference);
+ LineLayoutPossibilities llPoss;
+ llPoss = lineLayoutsList[pos.getLeafPos()];
+ lineNumberDifference = llPoss.applyLineCountAdjustment(lineNumberDifference);
+ return lineNumberDifference * constantLineHeight;
+ }
+
+ /** {@inheritDoc} */
+ public void discardSpace(KnuthGlue spaceGlue) {
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public List getChangedKnuthElements(List oldList, int alignment, int depth) {
+ return getChangedKnuthElements(oldList, alignment);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public List getChangedKnuthElements(List oldList, int alignment) {
+ List<KnuthElement> returnList = new LinkedList<KnuthElement>();
+ for (int p = 0; p < knuthParagraphs.size(); p++) {
+ LineLayoutPossibilities llPoss = lineLayoutsList[p];
+ //log.debug("demerits of the chosen layout: " + llPoss.getChosenDemerits());
+ int orphans = fobj.getOrphans();
+ int widows = fobj.getWidows();
+ if (handlingFloat()) {
+ orphans = 1;
+ widows = 1;
+ }
+ for (int i = 0; i < llPoss.getChosenLineCount(); i++) {
+ if (!((BlockLevelLayoutManager) parentLayoutManager).mustKeepTogether() && i >= orphans
+ && i <= llPoss.getChosenLineCount() - widows) {
+ // null penalty allowing a page break between lines
+ returnList.add(new KnuthPenalty(0, 0, false, new Position(this), false));
+ }
+ LineBreakPosition lbp = llPoss.getChosenPosition(i);
+ //log.debug("LLM.getChangedKnuthElements> lineWidth= "
+ // + lbp.lineWidth + " difference= " + lbp.difference);
+ //log.debug(" shrink= "
+ // + lbp.availableShrink + " stretch= " + lbp.availableStretch);
+ //log.debug("linewidth= " + lbp.lineWidth + " difference= "
+ //+ lbp.difference + " indent= " + lbp.startIndent);
+ MinOptMax contentIPD;
+ if (alignment == EN_JUSTIFY) {
+ contentIPD = MinOptMax.getInstance(
+ lbp.lineWidth - lbp.difference - lbp.availableShrink,
+ lbp.lineWidth - lbp.difference,
+ lbp.lineWidth - lbp.difference + lbp.availableStretch);
+ } else if (alignment == EN_CENTER) {
+ contentIPD = MinOptMax.getInstance(lbp.lineWidth - 2 * lbp.startIndent);
+ } else if (alignment == EN_END) {
+ contentIPD = MinOptMax.getInstance(lbp.lineWidth - lbp.startIndent);
+ } else {
+ contentIPD
+ = MinOptMax.getInstance(lbp.lineWidth - lbp.difference + lbp.startIndent);
+ }
+ returnList.add(new KnuthBlockBox(lbp.lineHeight, contentIPD, (lbp.ipdAdjust != 0
+ ? lbp.lineWidth - lbp.difference : 0),
+ lbp, false));
+ }
+ }
+ return returnList;
+ }
+
+ /**
+ * Find hyphenation points for every word in the current paragraph.
+ *
+ * @param currPar the paragraph whose words will be hyphenated
+ */
+ private void findHyphenationPoints(Paragraph currPar) {
+ // hyphenate every word
+ ListIterator currParIterator = currPar.listIterator(currPar.ignoreAtStart);
+ // list of TLM involved in hyphenation
+ List updateList = new LinkedList();
+ KnuthElement firstElement;
+ KnuthElement nextElement;
+ // current InlineLevelLayoutManager
+ InlineLevelLayoutManager currLM = null;
+ // number of KnuthBox elements containing word fragments
+ int boxCount;
+ // number of auxiliary KnuthElements between KnuthBoxes
+ int auxCount;
+ StringBuffer sbChars;
+
+ // find all hyphenation points
+ while (currParIterator.hasNext()) {
+ firstElement = (KnuthElement) currParIterator.next();
+ //
+ if (firstElement.getLayoutManager() != currLM) {
+ currLM = (InlineLevelLayoutManager) firstElement.getLayoutManager();
+ if (currLM != null) {
+ updateList.add(new Update(currLM, currParIterator.previousIndex()));
+ } else {
+ break;
+ }
+ } else if (currLM == null) {
+ break;
+ }
+
+ // collect word fragments, ignoring auxiliary elements;
+ // each word fragment was created by a different TextLM
+ if (firstElement.isBox() && !firstElement.isAuxiliary()) {
+ boxCount = 1;
+ auxCount = 0;
+ sbChars = new StringBuffer();
+ sbChars.append(currLM.getWordChars(firstElement.getPosition()));
+ // look if next elements are boxes too
+ while (currParIterator.hasNext()) {
+ nextElement = (KnuthElement) currParIterator.next();
+ if (nextElement.isBox() && !nextElement.isAuxiliary()) {
+ // a non-auxiliary KnuthBox: append word chars
+ if (currLM != nextElement.getLayoutManager()) {
+ currLM = (InlineLevelLayoutManager) nextElement.getLayoutManager();
+ updateList.add(new Update(currLM, currParIterator.previousIndex()));
+ }
+ // append text to recreate the whole word
+ boxCount++;
+ sbChars.append(currLM.getWordChars(nextElement.getPosition()));
+ } else if (!nextElement.isAuxiliary()) {
+ // a non-auxiliary non-box KnuthElement: stop
+ // go back to the last box or auxiliary element
+ currParIterator.previous();
+ break;
+ } else {
+ if (currLM != nextElement.getLayoutManager()) {
+ currLM = (InlineLevelLayoutManager) nextElement.getLayoutManager();
+ updateList.add(new Update(currLM, currParIterator.previousIndex()));
+ }
+ // an auxiliary KnuthElement: simply ignore it
+ auxCount++;
+ }
+ }
+ if (log.isTraceEnabled()) {
+ log.trace(" Word to hyphenate: " + sbChars.toString());
+ }
+ // find hyphenation points
+ HyphContext hc = getHyphenContext(sbChars);
+ // ask each LM to hyphenate its word fragment
+ if (hc != null) {
+ KnuthElement element = null;
+ for (int i = 0; i < (boxCount + auxCount); i++) {
+ currParIterator.previous();
+ }
+ for (int i = 0; i < (boxCount + auxCount); i++) {
+ element = (KnuthElement) currParIterator.next();
+ if (element.isBox() && !element.isAuxiliary()) {
+ ((InlineLevelLayoutManager)
+ element.getLayoutManager()).hyphenate(element.getPosition(), hc);
+ } else {
+ // nothing to do, element is an auxiliary KnuthElement
+ }
+ }
+ }
+ }
+ }
+ processUpdates(currPar, updateList);
+ }
+
+ private void processUpdates(Paragraph par, List updateList) {
+ // create iterator for the updateList
+ ListIterator updateListIterator = updateList.listIterator();
+ Update currUpdate;
+ int elementsAdded = 0;
+
+ while (updateListIterator.hasNext()) {
+ // ask the LMs to apply the changes and return
+ // the new KnuthElements to replace the old ones
+ currUpdate = (Update) updateListIterator.next();
+ int fromIndex = currUpdate.firstIndex;
+ int toIndex;
+ if (updateListIterator.hasNext()) {
+ Update nextUpdate = (Update) updateListIterator.next();
+ toIndex = nextUpdate.firstIndex;
+ updateListIterator.previous();
+ } else {
+ // maybe this is not always correct!
+ toIndex = par.size() - par.ignoreAtEnd
+ - elementsAdded;
+ }
+
+ // applyChanges() returns true if the LM modifies its data,
+ // so it must return new KnuthElements to replace the old ones
+ if (currUpdate.inlineLM
+ .applyChanges(par.subList(fromIndex + elementsAdded,
+ toIndex + elementsAdded))) {
+ // insert the new KnuthElements
+ List newElements = currUpdate.inlineLM.getChangedKnuthElements(
+ par.subList(fromIndex + elementsAdded,
+ toIndex + elementsAdded),
+ /*flaggedPenalty,*/ effectiveAlignment);
+ // remove the old elements
+ par.subList(fromIndex + elementsAdded,
+ toIndex + elementsAdded).clear();
+ // insert the new elements
+ par.addAll(fromIndex + elementsAdded, newElements);
+ elementsAdded += newElements.size() - (toIndex - fromIndex);
+ }
+ }
+ updateList.clear();
+ }
+
+ /**
+ * Line area is always considered to act as a fence.
+ * @param isNotFirst ignored
+ * @return always true
+ */
+ @Override
+ protected boolean hasLeadingFence(boolean isNotFirst) {
+ return true;
+ }
+
+ /**
+ * Line area is always considered to act as a fence.
+ * @param isNotLast ignored
+ * @return always true
+ */
+ @Override
+ protected boolean hasTrailingFence(boolean isNotLast) {
+ return true;
+ }
+
+ private HyphContext getHyphenContext(StringBuffer sbChars) {
+ // Find all hyphenation points in this word
+ // (get in an array of offsets)
+ // hyphenationProperties are from the block level?.
+ // Note that according to the spec,
+ // they also "apply to" fo:character.
+ // I don't know what that means, since
+ // if we change language in the middle of a "word",
+ // the effect would seem quite strange!
+ // Or perhaps in that case, we say that it's several words.
+ // We probably should bring the hyphenation props up from the actual
+ // TextLM which generate the hyphenation buffer,
+ // since these properties inherit and could be specified
+ // on an inline or wrapper below the block level.
+ Hyphenation hyph = Hyphenator.hyphenate(hyphenationProperties.language.getString(),
+ hyphenationProperties.country.getString(),
+ getFObj().getUserAgent().getResourceResolver(),
+ getFObj().getUserAgent().getHyphenationPatternNames(),
+ sbChars.toString(),
+ hyphenationProperties.hyphenationRemainCharacterCount.getValue(),
+ hyphenationProperties.hyphenationPushCharacterCount.getValue());
+ // They hyph structure contains the information we need
+ // Now start from prev: reset to that position, ask that LM to get
+ // a Position for the first hyphenation offset. If the offset isn't in
+ // its characters, it returns null,
+ // but must tell how many chars it had.
+ // Keep looking at currentBP using next hyphenation point until the
+ // returned size is greater than the available size
+ // or no more hyphenation points remain. Choose the best break.
+ if (hyph != null) {
+ return new HyphContext(hyph.getHyphenationPoints());
+ } else {
+ return null;
+ }
+ }
+
+ @Override
+ public boolean hasLineAreaDescendant() {
+ return true;
+ }
+
+ @Override
+ public int getBaselineOffset() {
+ return baselineOffset;
+ }
+
+ /**
+ * Add the areas with the break points.
+ *
+ * @param parentIter the iterator of break positions
+ * @param context the context for adding areas
+ */
+ @Override
+ public void addAreas(PositionIterator parentIter,
+ LayoutContext context) {
+ while (parentIter.hasNext()) {
+ Position pos = parentIter.next();
+ boolean isLastPosition = !parentIter.hasNext();
+ if (pos instanceof LineBreakPosition) {
+ addInlineArea(context, (LineBreakPosition) pos, isLastPosition);
+ } else if ((pos instanceof NonLeafPosition) && pos.generatesAreas()) {
+ addBlockArea(context, pos, isLastPosition);
+ } else {
+ /*
+ * pos was the Position inside a penalty item, nothing to do;
+ * or Pos does not generate an area,
+ * i.e. it stand for spaces, borders and padding.
+ */
+ }
+ }
+ setCurrentArea(null); // ?? necessary
+ }
+
+ /**
+ * Add a line with inline content
+ * @param context the context for adding areas
+ * @param lbp the position for which the line is generated
+ * @param isLastPosition true if this is the last position of this LM
+ */
+ private void addInlineArea(LayoutContext context, LineBreakPosition lbp,
+ boolean isLastPosition) {
+
+ KnuthSequence seq = knuthParagraphs.get(lbp.parIndex);
+ int startElementIndex = lbp.startIndex;
+ int endElementIndex = lbp.getLeafPos();
+
+ LineArea lineArea = new LineArea(
+ (lbp.getLeafPos() < seq.size() - 1 ? textAlignment : textAlignmentLast),
+ lbp.difference, lbp.availableStretch, lbp.availableShrink);
+ if (lbp.startIndent != 0) {
+ lineArea.addTrait(Trait.START_INDENT, lbp.startIndent);
+ }
+ if (lbp.endIndent != 0) {
+ lineArea.addTrait(Trait.END_INDENT, new Integer(lbp.endIndent));
+ }
+ lineArea.setBPD(lbp.lineHeight);
+ lineArea.setIPD(lbp.lineWidth);
+ lineArea.setBidiLevel(bidiLevel);
+ lineArea.addTrait(Trait.SPACE_BEFORE, lbp.spaceBefore);
+ lineArea.addTrait(Trait.SPACE_AFTER, lbp.spaceAfter);
+ alignmentContext.resizeLine(lbp.lineHeight, lbp.baseline);
+
+ if (seq instanceof Paragraph) {
+ Paragraph currPar = (Paragraph) seq;
+ // ignore the first elements added by the LineLayoutManager
+ startElementIndex += (startElementIndex == 0) ? currPar.ignoreAtStart : 0;
+
+ // if this is the last line area that for this paragraph,
+ // ignore the last elements added by the LineLayoutManager and
+ // subtract the last-line-end-indent from the area ipd
+ if (endElementIndex == (currPar.size() - 1)) {
+ endElementIndex -= currPar.ignoreAtEnd;
+ lineArea.setIPD(lineArea.getIPD() - lastLineEndIndent.getValue(this));
+ }
+ }
+
+ // ignore the last element in the line if it is a KnuthGlue object
+ ListIterator seqIterator = seq.listIterator(endElementIndex);
+ KnuthElement lastElement = (KnuthElement) seqIterator.next();
+ // the TLM which created the last KnuthElement in this line
+ LayoutManager lastLM = lastElement.getLayoutManager();
+ if (lastElement.isGlue()) {
+ // Remove trailing spaces if allowed so
+ if (whiteSpaceTreament == EN_IGNORE_IF_SURROUNDING_LINEFEED
+ || whiteSpaceTreament == EN_IGNORE
+ || whiteSpaceTreament == EN_IGNORE_IF_BEFORE_LINEFEED) {
+ endElementIndex--;
+ // this returns the same KnuthElement
+ seqIterator.previous();
+ if (seqIterator.hasPrevious()) {
+ lastLM = ((KnuthElement) seqIterator.previous()).getLayoutManager();
+ }
+ }
+ }
+
+ // Remove leading spaces if allowed so
+ if (whiteSpaceTreament == EN_IGNORE_IF_SURROUNDING_LINEFEED
+ || whiteSpaceTreament == EN_IGNORE
+ || whiteSpaceTreament == EN_IGNORE_IF_AFTER_LINEFEED) {
+ // ignore KnuthGlue and KnuthPenalty objects
+ // at the beginning of the line
+ seqIterator = seq.listIterator(startElementIndex);
+ while (seqIterator.hasNext() && !((KnuthElement) seqIterator.next()).isBox()) {
+ startElementIndex++;
+ }
+ }
+ // Add the inline areas to lineArea
+ PositionIterator inlinePosIter = new KnuthPossPosIter(seq, startElementIndex,
+ endElementIndex + 1);
+
+ LayoutContext lc = LayoutContext.offspringOf(context);
+ lc.setAlignmentContext(alignmentContext);
+ lc.setSpaceAdjust(lbp.dAdjust);
+ lc.setIPDAdjust(lbp.ipdAdjust);
+ lc.setLeadingSpace(new SpaceSpecifier(true));
+ lc.setTrailingSpace(new SpaceSpecifier(false));
+ lc.setFlags(LayoutContext.RESOLVE_LEADING_SPACE, true);
+
+ setCurrentArea(lineArea);
+ setChildContext(lc);
+ LayoutManager childLM;
+ while ((childLM = inlinePosIter.getNextChildLM()) != null) {
+ lc.setFlags(LayoutContext.LAST_AREA, (childLM == lastLM));
+ childLM.addAreas(inlinePosIter, lc);
+ lc.setLeadingSpace(lc.getTrailingSpace());
+ lc.setTrailingSpace(new SpaceSpecifier(false));
+ }
+
+ // if display-align is distribute, add space after
+ if (context.getSpaceAfter() > 0
+ && (!context.isLastArea() || !isLastPosition)) {
+ lineArea.setBPD(lineArea.getBPD() + context.getSpaceAfter());
+ }
+ lineArea.finish();
+ if (lineArea.getBidiLevel() >= 0) {
+ BidiResolver.reorder(lineArea);
+ }
+ parentLayoutManager.addChildArea(lineArea);
+ }
+
+ /**
+ * Add a line with block content
+ * @param context the context for adding areas
+ * @param pos the position for which the line is generated
+ * @param isLastPosition true if this is the last position of this LM
+ */
+ private void addBlockArea(LayoutContext context, Position pos, boolean isLastPosition) {
+ /* Nested block-level content;
+ * go down the LM stack again;
+ * "unwrap" the positions and put the child positions in a new list.
+ * The positionList must contain one area-generating position,
+ * which creates one line area.
+ */
+ List positionList = new ArrayList(1);
+ Position innerPosition = pos.getPosition();
+ positionList.add(innerPosition);
+
+ // do we have the last LM?
+ LayoutManager lastLM = null;
+ if (isLastPosition) {
+ lastLM = innerPosition.getLM();
+ }
+
+ LineArea lineArea = new LineArea();
+ setCurrentArea(lineArea);
+ LayoutContext lc = LayoutContext.newInstance();
+ lc.setAlignmentContext(alignmentContext);
+ setChildContext(lc);
+
+ PositionIterator childPosIter = new PositionIterator(positionList.listIterator());
+ LayoutContext blocklc = LayoutContext.offspringOf(context);
+ blocklc.setLeadingSpace(new SpaceSpecifier(true));
+ blocklc.setTrailingSpace(new SpaceSpecifier(false));
+ blocklc.setFlags(LayoutContext.RESOLVE_LEADING_SPACE, true);
+ LayoutManager childLM;
+ while ((childLM = childPosIter.getNextChildLM()) != null) {
+ // set last area flag
+ blocklc.setFlags(LayoutContext.LAST_AREA,
+ (context.isLastArea() && childLM == lastLM));
+ blocklc.setStackLimitBP(context.getStackLimitBP());
+ // Add the line areas to Area
+ childLM.addAreas(childPosIter, blocklc);
+ blocklc.setLeadingSpace(blocklc.getTrailingSpace());
+ blocklc.setTrailingSpace(new SpaceSpecifier(false));
+ }
+ lineArea.updateExtentsFromChildren();
+ if (lineArea.getBidiLevel() >= 0) {
+ BidiResolver.reorder(lineArea);
+ }
+ parentLayoutManager.addChildArea(lineArea);
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public void addChildArea(Area childArea) {
+ // Make sure childArea is inline area
+ if (childArea instanceof InlineArea) {
+ Area parent = getCurrentArea();
+ if (getContext().resolveLeadingSpace()) {
+ addSpace(parent, getContext().getLeadingSpace().resolve(false),
+ getContext().getSpaceAdjust());
+ }
+ parent.addChildArea(childArea);
+ }
+ }
+
+ // --------- Property Resolution related functions --------- //
+
+ /** {@inheritDoc} */
+ @Override
+ public boolean getGeneratesBlockArea() {
+ return true;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public boolean getGeneratesLineArea() {
+ return true;
+ }
+
+ /** {@inheritDoc} */
+ @Override
+ public boolean isRestartable() {
+ return true;
+ }
+
+ /**
+ * Whether this LM can handle horizontal overflow error messages (only a BlockContainerLayoutManager can).
+ * @param milliPoints horizontal overflow
+ * @return true if handled by a BlockContainerLayoutManager
+ */
+ public boolean handleOverflow(int milliPoints) {
+ if (getParent() instanceof BlockLayoutManager) {
+ return ((BlockLayoutManager) getParent()).handleOverflow(milliPoints);
+ }
+ return false;
+ }
+ }
--- /dev/null
- TTFTableNameTestCase.class })
+ /*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ /* $Id$ */
+
+ package org.apache.fop.fonts;
+
+ import org.junit.runner.RunWith;
+ import org.junit.runners.Suite;
+ import org.junit.runners.Suite.SuiteClasses;
+
++import org.apache.fop.fonts.cff.CFFDataReaderTestCase;
+ import org.apache.fop.fonts.truetype.FontFileReaderTestCase;
++import org.apache.fop.fonts.truetype.OTFSubSetFileTestCase;
+ import org.apache.fop.fonts.truetype.TTFFileTestCase;
+ import org.apache.fop.fonts.truetype.TTFSubSetFileTestCase;
+ import org.apache.fop.fonts.truetype.TTFTableNameTestCase;
+
+ /**
+ * A test suite designed for org.apache.fop.fonts.*
+ */
+ @RunWith(Suite.class)
+ @SuiteClasses({
+ EncodingModeTestCase.class,
+ FontFileReaderTestCase.class,
+ TTFFileTestCase.class,
+ TTFSubSetFileTestCase.class,
++ TTFTableNameTestCase.class,
++ CFFDataReaderTestCase.class,
++ OTFSubSetFileTestCase.class })
+ public final class FOPFontsTestSuite {
+ }
--- /dev/null
- 392, new int[] { 0 }, -1));
+ /*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ /* $Id$ */
+
+ package org.apache.fop.fonts.cff;
+
+ import java.io.IOException;
+ import java.util.Map;
+ import java.util.Random;
+
+ import org.junit.Before;
+ import org.junit.Test;
+
+ import static org.junit.Assert.assertEquals;
+
+ import org.apache.fontbox.cff.CFFDataInput;
+
+ import org.apache.fop.fonts.cff.CFFDataReader.CFFIndexData;
+ import org.apache.fop.fonts.cff.CFFDataReader.DICTEntry;
+ import org.apache.fop.fonts.truetype.OTFSubSetFile;
+
+ public class CFFDataReaderTestCase {
+ private CFFDataReader cffReader;
+
+ /**
+ * Initializes the CFFDataReader for testing purposes
+ */
+ @Before
+ public void setUp() {
+ cffReader = new CFFDataReader();
+ }
+
+ /**
+ * Parses a test dictionary to verify whether the stored data is read correctly.
+ * @throws IOException
+ */
+ @Test
+ public void parseDictData() throws IOException {
+ byte[] testDictData = prepareDictData();
+ Map<String, DICTEntry> testTopDict = cffReader.parseDictData(testDictData);
+ validateDictData(testTopDict);
+ }
+
+ private byte[] prepareDictData() {
+ byte[] testDictData = new byte[0];
+ //Version
+ testDictData = OTFSubSetFile.concatArray(testDictData, OTFSubSetFile.createNewRef(
- 393, new int[] { 1 }, -1));
++ 392, new int[] { 0 }, -1, true));
+ //Notice
+ testDictData = OTFSubSetFile.concatArray(testDictData, OTFSubSetFile.createNewRef(
- 394, new int[] { 12, 0 }, -1));
++ 393, new int[] { 1 }, -1, true));
+ //Copyright
+ testDictData = OTFSubSetFile.concatArray(testDictData, OTFSubSetFile.createNewRef(
- 395, new int[] { 2 }, -1));
++ 394, new int[] { 12, 0 }, -1, true));
+ //FullName
+ testDictData = OTFSubSetFile.concatArray(testDictData, OTFSubSetFile.createNewRef(
- 396, new int[] { 3 }, -1));
++ 395, new int[] { 2 }, -1, true));
+ //FamilyName
+ testDictData = OTFSubSetFile.concatArray(testDictData, OTFSubSetFile.createNewRef(
- 397, new int[] { 4 }, -1));
++ 396, new int[] { 3 }, -1, true));
+ //Weight
+ testDictData = OTFSubSetFile.concatArray(testDictData, OTFSubSetFile.createNewRef(
- 0, new int[] { 12, 1 }, -1));
++ 397, new int[] { 4 }, -1, true));
+ //isFixedPitch (boolean = false)
+ testDictData = OTFSubSetFile.concatArray(testDictData, OTFSubSetFile.createNewRef(
- -50, new int[0], -1));
++ 0, new int[] { 12, 1 }, -1, true));
+ //FontBBox
+ testDictData = OTFSubSetFile.concatArray(testDictData, OTFSubSetFile.createNewRef(
- -40, new int[0], -1));
++ -50, new int[0], -1, true));
+ testDictData = OTFSubSetFile.concatArray(testDictData, OTFSubSetFile.createNewRef(
- 100, new int[0], -1));
++ -40, new int[0], -1, true));
+ testDictData = OTFSubSetFile.concatArray(testDictData, OTFSubSetFile.createNewRef(
- 120, new int[] { 5 }, -1));
++ 100, new int[0], -1, true));
+ testDictData = OTFSubSetFile.concatArray(testDictData, OTFSubSetFile.createNewRef(
- 1234, new int[] { 15 }, -1));
++ 120, new int[] { 5 }, -1, true));
+ //charset
+ testDictData = OTFSubSetFile.concatArray(testDictData, OTFSubSetFile.createNewRef(
- 3654, new int[] { 17 }, -1));
++ 1234, new int[] { 15 }, -1, true));
+ //CharStrings
+ testDictData = OTFSubSetFile.concatArray(testDictData, OTFSubSetFile.createNewRef(
- 11454, new int[] { 18 }, -1));
++ 3654, new int[] { 17 }, -1, true));
+ //Private
+ testDictData = OTFSubSetFile.concatArray(testDictData, OTFSubSetFile.createNewRef(
++ 11454, new int[] { 18 }, -1, true));
+ return testDictData;
+ }
+
+ private void validateDictData(Map<String, DICTEntry> dictMap) {
+ //SID Values (numbers)
+ assertEquals(dictMap.get("version").getOperands().get(0).intValue(), 392);
+ assertEquals(dictMap.get("Notice").getOperands().get(0).intValue(), 393);
+ assertEquals(dictMap.get("Copyright").getOperands().get(0).intValue(), 394);
+ assertEquals(dictMap.get("FullName").getOperands().get(0).intValue(), 395);
+ assertEquals(dictMap.get("FamilyName").getOperands().get(0).intValue(), 396);
+ assertEquals(dictMap.get("Weight").getOperands().get(0).intValue(), 397);
+ //Boolean comparison
+ assertEquals(dictMap.get("isFixedPitch").getOperands().get(0).intValue(), 0);
+ //Array comparison
+ int[] fontBBox = { -50, -40, 100, 120 };
+ DICTEntry fontBBoxEntry = dictMap.get("FontBBox");
+ for (int i = 0; i < fontBBoxEntry.getOperands().size(); i++) {
+ assertEquals(fontBBoxEntry.getOperands().get(i).intValue(), fontBBox[i]);
+ }
+ //Multi-byte offset (number)
+ assertEquals(dictMap.get("charset").getOperands().get(0).intValue(), 1234);
+ assertEquals(dictMap.get("CharStrings").getOperands().get(0).intValue(), 3654);
+ //Larger offset
+ assertEquals(dictMap.get("Private").getOperands().get(0).intValue(), 11454);
+ }
+
+ /**
+ * Tests the parsing of an example byte data index structure
+ * @throws IOException
+ */
+ @Test
+ public void testIndexParsing() throws IOException {
+ byte[] testIndex = {
+ 0, 5, //Number of objects
+ 1, //Offset size
+ 1, //Offsets...
+ 5,
+ 12,
+ 24,
+ 27,
+ 32
+ };
+ Random randGen = new Random();
+ byte[] data = new byte[31];
+ for (int i = 0; i < data.length; i++) {
+ data[i] = (byte)randGen.nextInt(255);
+ }
+ testIndex = OTFSubSetFile.concatArray(testIndex, data);
+ CFFIndexData indexData = cffReader.readIndex(new CFFDataInput(testIndex));
+ assertEquals(indexData.getNumObjects(), 5);
+ assertEquals(indexData.getOffSize(), 1);
+ assertEquals(indexData.getOffsets().length, 6);
+ assertEquals(indexData.getOffsets()[5], 32);
+ }
+ }
--- /dev/null
-import java.util.Arrays;
+ /*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ /* $Id$ */
+
+ package org.apache.fop.fonts.truetype;
+
+ import java.io.IOException;
+ import java.util.ArrayList;
- CFFDataReader cffReaderHeitiStd;
+ import java.util.HashMap;
+ import java.util.List;
+ import java.util.Map;
+
+ import org.junit.Before;
+ import org.junit.Test;
+
+ import static org.junit.Assert.assertEquals;
+ import static org.junit.Assert.assertTrue;
+
+ import org.apache.fontbox.cff.CFFFont;
+
+ import org.apache.fop.fonts.cff.CFFDataReader;
+ import org.apache.fop.fonts.cff.CFFDataReader.CFFIndexData;
+ import org.apache.fop.fonts.cff.CFFDataReader.DICTEntry;
+ import org.apache.fop.fonts.truetype.OTFSubSetFile.BytesNumber;
+
+ public class OTFSubSetFileTestCase extends OTFFileTestCase {
+
+ CFFDataReader cffReaderSourceSans;
+ private OTFSubSetFile sourceSansSubset;
+ private byte[] sourceSansData;
- List<BytesNumber> origOperands = getFullCharString(origCharData, origCFF);
- List<BytesNumber> subsetOperands = getFullCharString(charData, subsetCFF);
+
+ /**
+ * Initialises the test by creating the font subset. A CFFDataReader is
+ * also created based on the subset data for use in the tests.
+ * @throws IOException
+ */
+ @Before
+ public void setUp() throws Exception {
+ super.setUp();
+
+ Map<Integer, Integer> glyphs = new HashMap<Integer, Integer>();
+ for (int i = 0; i < 256; i++) {
+ glyphs.put(i, i);
+ }
+
+ sourceSansSubset = new OTFSubSetFile();
+ String sourceSansHeader = OFFontLoader.readHeader(sourceSansReader);
+ sourceSansSubset.readFont(sourceSansReader, "SourceSansProBold", sourceSansHeader, glyphs);
+ sourceSansData = sourceSansSubset.getFontSubset();
+ cffReaderSourceSans = new CFFDataReader(sourceSansData);
+ }
+
+ /**
+ * Validates the CharString data against the original font
+ * @throws IOException
+ */
+ @Test
+ public void testCharStringIndex() throws IOException {
+ assertEquals(256, cffReaderSourceSans.getCharStringIndex().getNumObjects());
+ assertTrue(checkCorrectOffsets(cffReaderSourceSans.getCharStringIndex()));
+ validateCharStrings(cffReaderSourceSans, sourceSansSubset.getCFFReader());
+ }
+
+ /**
+ * Checks the index data to ensure that the offsets are valid
+ * @param indexData The index data to check
+ * @return Returns true if it is found to be valid
+ */
+ private boolean checkCorrectOffsets(CFFIndexData indexData) {
+ int last = 0;
+ for (int i = 0; i < indexData.getOffsets().length; i++) {
+ if (indexData.getOffsets()[i] < last) {
+ return false;
+ }
+ last = indexData.getOffsets()[i];
+ }
+ return true;
+ }
+
+ /**
+ * Validates the subset font CharString data by comparing it with the original.
+ * @param subsetCFF The subset CFFDataReader containing the CharString data
+ * @param origCFF The original CFFDataReader containing the CharString data
+ * @throws IOException
+ */
+ private void validateCharStrings(CFFDataReader subsetCFF, CFFDataReader origCFF)
+ throws IOException {
+ CFFFont sourceSansOriginal = sourceSansProBold.fileFont;
+ CFFIndexData charStrings = subsetCFF.getCharStringIndex();
+ Map<String, byte[]> origCharStringData = sourceSansOriginal.getCharStringsDict();
+ for (int i = 0; i < charStrings.getNumObjects(); i++) {
+ byte[] origCharData = origCharStringData.get(origCharStringData.keySet().toArray(
+ new String[0])[i]);
+ byte[] charData = charStrings.getValue(i);
- assertTrue(origOperands.get(j).equals(subsetOperands.get(j)));
++ List<BytesNumber> origOperands = getFullCharString(new Context(), origCharData, origCFF);
++ List<BytesNumber> subsetOperands = getFullCharString(new Context(), charData, subsetCFF);
+ for (int j = 0; j < origOperands.size(); j++) {
- private List<BytesNumber> getFullCharString(byte[] data, CFFDataReader cffData) throws IOException {
++ assertTrue(origOperands.get(j).equals(subsetOperands.get(j)));
+ }
+ }
+ }
+
++ static class Context {
++ private ArrayList<BytesNumber> operands = new ArrayList<BytesNumber>();
++ private ArrayList<BytesNumber> stack = new ArrayList<BytesNumber>();
++ private int hstemCount;
++ private int vstemCount;
++ private int lastOp = -1;
++ private int maskLength = -1;
++
++ public void pushOperand(BytesNumber v) {
++ operands.add(v);
++ if (v instanceof Operator) {
++ if (v.getNumber() != 11 && v.getNumber() != 12) {
++ lastOp = v.getNumber();
++ }
++ } else {
++ stack.add(v);
++ }
++ }
++
++ public BytesNumber popOperand() {
++ operands.remove(operands.size() - 1);
++ return stack.remove(stack.size() - 1);
++ }
++
++ public BytesNumber lastOperand() {
++ return operands.get(operands.size() - 1);
++ }
++
++ public void clearStack() {
++ stack.clear();
++ }
++
++ public int getMaskLength() {
++ // The number of data bytes for mask is exactly the number needed, one
++ // bit per hint, to reference the number of stem hints declared
++ // at the beginning of the charstring program.
++ if (maskLength > 0) {
++ return maskLength;
++ }
++ return 1 + (hstemCount + vstemCount - 1) / 8;
++ }
++
++ public List<BytesNumber> getFullOperandsList() {
++ return operands;
++ }
++
++ public void countHstem() {
++ // hstem(hm) operator
++ hstemCount += stack.size() / 2;
++ clearStack();
++ }
++
++ public void countVstem() {
++ // vstem(hm) operator
++ vstemCount += stack.size() / 2;
++ clearStack();
++ }
++
++ public int calcMaskLength() {
++ if (lastOp == 1 || lastOp == 18) {
++ //If hstem and vstem hints are both declared at the beginning of
++ //a charstring, and this sequence is followed directly by the
++ //hintmask or cntrmask operators, the vstem hint operator need
++ //not be included.
++ vstemCount += stack.size() / 2;
++ }
++ clearStack();
++ return getMaskLength();
++ }
++ }
+ /**
+ * Recursively reads and constructs the full CharString for comparison
+ * @param data The original byte data of the CharString
+ * @param cffData The CFFDataReader containing the subroutine indexes
+ * @return Returns a list of parsed operands and operators
+ * @throws IOException
+ */
- ArrayList<BytesNumber> operands = new ArrayList<BytesNumber>();
++ private List<BytesNumber> getFullCharString(Context context, byte[] data, CFFDataReader cffData)
++ throws IOException {
+ CFFIndexData localIndexSubr = cffData.getLocalIndexSubr();
+ CFFIndexData globalIndexSubr = cffData.getGlobalIndexSubr();
+ boolean hasLocalSubroutines = localIndexSubr != null && localIndexSubr.getNumObjects() > 0;
+ boolean hasGlobalSubroutines = globalIndexSubr != null && globalIndexSubr.getNumObjects() > 0;
- operands.get(operands.size() - 1).getNumber());
+ for (int dataPos = 0; dataPos < data.length; dataPos++) {
+ int b0 = data[dataPos] & 0xff;
+ if (b0 == 10 && hasLocalSubroutines) {
+ int subrNumber = getSubrNumber(localIndexSubr.getNumObjects(),
- List<BytesNumber> subrOperands = getFullCharString(subr, cffData);
- operands = mergeOperands(operands, subrOperands);
++ context.popOperand().getNumber());
+ byte[] subr = localIndexSubr.getValue(subrNumber);
- operands.get(operands.size() - 1).getNumber());
++ getFullCharString(context, subr, cffData);
+ } else if (b0 == 29 && hasGlobalSubroutines) {
+ int subrNumber = getSubrNumber(globalIndexSubr.getNumObjects(),
- ArrayList<BytesNumber> subrOperands = (ArrayList<BytesNumber>)getFullCharString(subr, cffData);
- operands = mergeOperands(operands, subrOperands);
++ context.popOperand().getNumber());
+ byte[] subr = globalIndexSubr.getValue(subrNumber);
- if (b0 == 19 || b0 == 20) {
- dataPos += 1;
- size = 2;
- }
- operands.add(new Operator(b0, size, getOperatorName(b0, b1)));
++ getFullCharString(context, subr, cffData);
+ } else if ((b0 >= 0 && b0 <= 27) || (b0 >= 29 && b0 <= 31)) {
+ int size = 1;
+ int b1 = -1;
+ if (b0 == 12) {
+ b1 = data[dataPos++] & 0xff;
+ size = 2;
++ } else if (b0 == 1 || b0 == 18) {
++ context.countHstem();
++ } else if (b0 == 3 || b0 == 23) {
++ context.countVstem();
++ } else if (b0 == 19 || b0 == 20) {
++ int length = context.calcMaskLength();
++ dataPos += length;
++ size = length + 1;
+ }
- operands.add(readNumber(b0, data, dataPos));
- dataPos += operands.get(operands.size() - 1).getNumBytes() - 1;
++ context.pushOperand(new Operator(b0, size, getOperatorName(b0, b1)));
+ } else if (b0 == 28 || (b0 >= 32 && b0 <= 255)) {
- return operands;
- }
-
- /**
- * Merges two lists of operands. This is typically used to merge the CharString
- * data with that of a parsed and referenced subroutine.
- * @param charString The parsed CharString data so far
- * @param subroutine The parsed elements from a subroutine
- * @return Returns a merged list of both CharString and subroutine elements.
- */
- private ArrayList<BytesNumber> mergeOperands(List<BytesNumber> charString,
- List<BytesNumber> subroutine) {
- BytesNumber[] charStringOperands = charString.toArray(new BytesNumber[0]);
- BytesNumber[] subroutineOperands = subroutine.toArray(new BytesNumber[0]);
- BytesNumber[] mergeData = new BytesNumber[charStringOperands.length - 1
- + subroutineOperands.length - 1];
- System.arraycopy(charStringOperands, 0, mergeData, 0, charStringOperands.length - 1);
- System.arraycopy(subroutineOperands, 0, mergeData, charStringOperands.length - 1,
- subroutineOperands.length - 1);
- ArrayList<BytesNumber> hello = new ArrayList<BytesNumber>();
- hello.addAll(Arrays.asList(mergeData));
- return hello;
++ context.pushOperand(readNumber(b0, data, dataPos));
++ dataPos += context.lastOperand().getNumBytes() - 1;
+ }
+ }
- return new BytesNumber(Integer.valueOf((short)(b1 << 8 | b2)), 5);
++ return context.getFullOperandsList();
+ }
+
+ /**
+ * Parses a number from one or more bytes
+ * @param b0 The first byte to identify how to interpret the number
+ * @param input The original byte data containing the number
+ * @param curPos The current position of the number
+ * @return Returns the number
+ * @throws IOException
+ */
+ private BytesNumber readNumber(int b0, byte[] input, int curPos) throws IOException {
+ if (b0 == 28) {
+ int b1 = input[curPos + 1] & 0xff;
+ int b2 = input[curPos + 2] & 0xff;
+ return new BytesNumber(Integer.valueOf((short) (b1 << 8 | b2)), 3);
+ } else if (b0 >= 32 && b0 <= 246) {
+ return new BytesNumber(Integer.valueOf(b0 - 139), 1);
+ } else if (b0 >= 247 && b0 <= 250) {
+ int b1 = input[curPos + 1] & 0xff;
+ return new BytesNumber(Integer.valueOf((b0 - 247) * 256 + b1 + 108), 2);
+ } else if (b0 >= 251 && b0 <= 254) {
+ int b1 = input[curPos + 1] & 0xff;
+ return new BytesNumber(Integer.valueOf(-(b0 - 251) * 256 - b1 - 108), 2);
+ } else if (b0 == 255) {
+ int b1 = input[curPos + 1] & 0xff;
+ int b2 = input[curPos + 2] & 0xff;
- * @param codeb The second byte of the operator
++ int b3 = input[curPos + 3] & 0xff;
++ int b4 = input[curPos + 4] & 0xff;
++ return new BytesNumber(Integer.valueOf((b1 << 24 | b2 << 16 | b3 << 8 | b4)), 5);
+ } else {
+ throw new IllegalArgumentException();
+ }
+ }
+
+ /**
+ * Gets the subroutine number according to the number of subroutines
+ * and the provided operand.
+ * @param numSubroutines The number of subroutines used to calculate the
+ * subroutine reference.
+ * @param operand The operand for the subroutine
+ * @return Returns the calculated subroutine number
+ */
+ private int getSubrNumber(int numSubroutines, int operand) {
+ int bias = getBias(numSubroutines);
+ return bias + operand;
+ }
+
+ /**
+ * Gets the bias give the number of subroutines. This is used in the
+ * calculation to determine a subroutine's number
+ * @param subrCount The number of subroutines for a given index
+ * @return Returns the bias value
+ */
+ private int getBias(int subrCount) {
+ if (subrCount < 1240) {
+ return 107;
+ } else if (subrCount < 33900) {
+ return 1131;
+ } else {
+ return 32768;
+ }
+ }
+
+ /**
+ * A class representing an operator from the CharString data
+ */
+ private class Operator extends BytesNumber {
+ private String opName = "";
+
+ public Operator(int number, int numBytes, String opName) {
+ super(number, numBytes);
+ this.opName = opName;
+ }
+ public String toString() {
+ return String.format("[%s]", opName);
+ }
+ }
+
+ /**
+ * Gets the identifying name for the given operator. This is primarily
+ * used for debugging purposes. See the Type 2 CharString Format specification
+ * document (Technical Note #5177) Appendix A (Command Codes).
+ * @param operator The operator code
++ * @param operatorB The second byte of the operator
+ * @return Returns the operator name.
+ */
+ private String getOperatorName(int operator, int operatorB) {
+ switch (operator) {
+ case 0: return "Reserved";
+ case 1: return "hstem";
+ case 2: return "Reserved";
+ case 3: return "vstem";
+ case 4: return "vmoveto";
+ case 5: return "rlineto";
+ case 6: return "hlineto";
+ case 7: return "vlineto";
+ case 8: return "rrcurveto";
+ case 9: return "Reserved";
+ case 10: return "callsubr";
+ case 11: return "return";
+ case 12: return getDoubleOpName(operatorB);
+ case 13: return "Reserved";
+ case 14: return "enchar";
+ case 15:
+ case 16:
+ case 17: return "Reserved";
+ case 18: return "hstemhm";
+ case 19: return "hintmask";
+ case 20: return "cntrmask";
+ case 21: return "rmoveto";
+ case 22: return "hmoveto";
+ case 23: return "vstemhm";
+ case 24: return "rcurveline";
+ case 25: return "rlinecurve";
+ case 26: return "vvcurveto";
+ case 27: return "hhcurveto";
+ case 28: return "shortint";
+ case 29: return "callgsubr";
+ case 30: return "vhcurveto";
+ case 31: return "hvcurveto";
+ default: return "Unknown";
+ }
+ }
+
+ /**
+ * Gets the name of a double byte operator code
+ * @param operator The second byte of the operator
+ * @return Returns the name
+ */
+ private String getDoubleOpName(int operator) {
+ switch (operator) {
+ case 0:
+ case 1:
+ case 2: return "Reserved";
+ case 3: return "and";
+ case 4: return "or";
+ case 5: return "not";
+ case 6:
+ case 7:
+ case 8: return "Reserved";
+ case 9: return "abs";
+ case 10: return "add";
+ case 11: return "sub";
+ case 12: return "div";
+ case 13: return "Reserved";
+ case 14: return "neg";
+ case 15: return "eq";
+ case 16:
+ case 17: return "Reserved";
+ case 18: return "drop";
+ case 19: return "Reserved";
+ case 20: return "put";
+ case 21: return "get";
+ case 22: return "ifelse";
+ case 23: return "random";
+ case 24: return "mul";
+ case 25: return "Reserved";
+ case 26: return "sqrt";
+ case 27: return "dup";
+ case 28: return "exch";
+ case 29: return "index";
+ case 30: return "roll";
+ case 31:
+ case 32:
+ case 33: return "Reserved";
+ case 34: return "hflex";
+ case 35: return "flex";
+ case 36: return "hflex1";
+ case 37: return "flex1";
+ case 38: return "Reserved";
+ default: return "Unknown";
+ }
+ }
+
+ /**
+ * Validates the String index data and size
+ * @throws IOException
+ */
+ @Test
+ public void testStringIndex() throws IOException {
+ assertEquals(164, cffReaderSourceSans.getStringIndex().getNumObjects());
+ assertTrue(checkCorrectOffsets(cffReaderSourceSans.getStringIndex()));
+ assertEquals("Amacron", new String(cffReaderSourceSans.getStringIndex().getValue(5)));
+ assertEquals("Edotaccent", new String(cffReaderSourceSans.getStringIndex().getValue(32)));
+ assertEquals("uni0122", new String(cffReaderSourceSans.getStringIndex().getValue(45)));
+ }
+
+ /**
+ * Validates the Top Dict data
+ * @throws IOException
+ */
+ @Test
+ public void testTopDictData() throws IOException {
+ Map<String, DICTEntry> topDictEntries = cffReaderSourceSans.parseDictData(
+ cffReaderSourceSans.getTopDictIndex().getData());
+ assertEquals(10, topDictEntries.size());
+ }
+ }