/*
Copyright (c) 2005 Health Market Science, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.healthmarketscience.jackcess.impl;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.StringWriter;
import java.nio.BufferOverflowException;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.util.AbstractMap;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import com.healthmarketscience.jackcess.BatchUpdateException;
import com.healthmarketscience.jackcess.Column;
import com.healthmarketscience.jackcess.ColumnBuilder;
import com.healthmarketscience.jackcess.ConstraintViolationException;
import com.healthmarketscience.jackcess.CursorBuilder;
import com.healthmarketscience.jackcess.IndexBuilder;
import com.healthmarketscience.jackcess.JackcessException;
import com.healthmarketscience.jackcess.PropertyMap;
import com.healthmarketscience.jackcess.Row;
import com.healthmarketscience.jackcess.RowId;
import com.healthmarketscience.jackcess.Table;
import com.healthmarketscience.jackcess.util.ErrorHandler;
import com.healthmarketscience.jackcess.util.ExportUtil;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* A single database table
*
* Is not thread-safe.
*
* @author Tim McCune
* @usage _intermediate_class_
*/
public class TableImpl implements Table
{
private static final Log LOG = LogFactory.getLog(TableImpl.class);
private static final short OFFSET_MASK = (short)0x1FFF;
private static final short DELETED_ROW_MASK = (short)0x8000;
private static final short OVERFLOW_ROW_MASK = (short)0x4000;
static final int MAGIC_TABLE_NUMBER = 1625;
private static final int MAX_BYTE = 256;
/**
* Table type code for system tables
* @usage _intermediate_class_
*/
public static final byte TYPE_SYSTEM = 0x53;
/**
* Table type code for user tables
* @usage _intermediate_class_
*/
public static final byte TYPE_USER = 0x4e;
/** comparator which sorts variable length columns based on their index into
the variable length offset table */
private static final Comparator VAR_LEN_COLUMN_COMPARATOR =
new Comparator() {
public int compare(ColumnImpl c1, ColumnImpl c2) {
return ((c1.getVarLenTableIndex() < c2.getVarLenTableIndex()) ? -1 :
((c1.getVarLenTableIndex() > c2.getVarLenTableIndex()) ? 1 :
0));
}
};
/** comparator which sorts columns based on their display index */
private static final Comparator DISPLAY_ORDER_COMPARATOR =
new Comparator() {
public int compare(ColumnImpl c1, ColumnImpl c2) {
return ((c1.getDisplayIndex() < c2.getDisplayIndex()) ? -1 :
((c1.getDisplayIndex() > c2.getDisplayIndex()) ? 1 :
0));
}
};
/** owning database */
private final DatabaseImpl _database;
/** additional table flags from the catalog entry */
private final int _flags;
/** Type of the table (either TYPE_SYSTEM or TYPE_USER) */
private final byte _tableType;
/** Number of actual indexes on the table */
private int _indexCount;
/** Number of logical indexes for the table */
private int _logicalIndexCount;
/** page number of the definition of this table */
private final int _tableDefPageNumber;
/** max Number of columns in the table (includes previous deletions) */
private short _maxColumnCount;
/** max Number of variable columns in the table */
private short _maxVarColumnCount;
/** List of columns in this table, ordered by column number */
private final List _columns = new ArrayList();
/** List of variable length columns in this table, ordered by offset */
private final List _varColumns = new ArrayList();
/** List of autonumber columns in this table, ordered by column number */
private final List _autoNumColumns = new ArrayList(1);
/** List of indexes on this table (multiple logical indexes may be backed by
the same index data) */
private final List _indexes = new ArrayList();
/** List of index datas on this table (the actual backing data for an
index) */
private final List _indexDatas = new ArrayList();
/** List of columns in this table which are in one or more indexes */
private final Set _indexColumns = new LinkedHashSet();
/** Table name as stored in Database */
private final String _name;
/** Usage map of pages that this table owns */
private final UsageMap _ownedPages;
/** Usage map of pages that this table owns with free space on them */
private final UsageMap _freeSpacePages;
/** Number of rows in the table */
private int _rowCount;
/** last long auto number for the table */
private int _lastLongAutoNumber;
/** last complex type auto number for the table */
private int _lastComplexTypeAutoNumber;
/** modification count for the table, keeps row-states up-to-date */
private int _modCount;
/** page buffer used to update data pages when adding rows */
private final TempPageHolder _addRowBufferH =
TempPageHolder.newHolder(TempBufferHolder.Type.SOFT);
/** page buffer used to update the table def page */
private final TempPageHolder _tableDefBufferH =
TempPageHolder.newHolder(TempBufferHolder.Type.SOFT);
/** buffer used to writing rows of data */
private final TempBufferHolder _writeRowBufferH =
TempBufferHolder.newHolder(TempBufferHolder.Type.SOFT, true);
/** page buffer used to write out-of-row "long value" data */
private final TempPageHolder _longValueBufferH =
TempPageHolder.newHolder(TempBufferHolder.Type.SOFT);
/** optional error handler to use when row errors are encountered */
private ErrorHandler _tableErrorHandler;
/** properties for this table */
private PropertyMap _props;
/** properties group for this table (and columns) */
private PropertyMaps _propertyMaps;
/** optional flag indicating whether or not auto numbers can be directly
inserted by the user */
private Boolean _allowAutoNumInsert;
/** foreign-key enforcer for this table */
private final FKEnforcer _fkEnforcer;
/** default cursor for iterating through the table, kept here for basic
table traversal */
private CursorImpl _defaultCursor;
/**
* Only used by unit tests
* @usage _advanced_method_
*/
protected TableImpl(boolean testing, List columns)
throws IOException
{
if(!testing) {
throw new IllegalArgumentException();
}
_database = null;
_tableDefPageNumber = PageChannel.INVALID_PAGE_NUMBER;
_name = null;
_columns.addAll(columns);
for(ColumnImpl col : _columns) {
if(col.getType().isVariableLength()) {
_varColumns.add(col);
}
}
_maxColumnCount = (short)_columns.size();
_maxVarColumnCount = (short)_varColumns.size();
initAutoNumberColumns();
_fkEnforcer = null;
_flags = 0;
_tableType = TYPE_USER;
_indexCount = 0;
_logicalIndexCount = 0;
_ownedPages = null;
_freeSpacePages = null;
}
/**
* @param database database which owns this table
* @param tableBuffer Buffer to read the table with
* @param pageNumber Page number of the table definition
* @param name Table name
*/
protected TableImpl(DatabaseImpl database, ByteBuffer tableBuffer,
int pageNumber, String name, int flags)
throws IOException
{
_database = database;
_tableDefPageNumber = pageNumber;
_name = name;
_flags = flags;
System.out.println("FOO " + _name + " tdefLen " + tableBuffer.getInt(8) +
" free " +
tableBuffer.getShort(database.getFormat().OFFSET_FREE_SPACE));
// read table definition
tableBuffer = loadCompleteTableDefinitionBuffer(tableBuffer, null);
_rowCount = tableBuffer.getInt(getFormat().OFFSET_NUM_ROWS);
_lastLongAutoNumber = tableBuffer.getInt(getFormat().OFFSET_NEXT_AUTO_NUMBER);
if(getFormat().OFFSET_NEXT_COMPLEX_AUTO_NUMBER >= 0) {
_lastComplexTypeAutoNumber = tableBuffer.getInt(
getFormat().OFFSET_NEXT_COMPLEX_AUTO_NUMBER);
}
_tableType = tableBuffer.get(getFormat().OFFSET_TABLE_TYPE);
_maxColumnCount = tableBuffer.getShort(getFormat().OFFSET_MAX_COLS);
_maxVarColumnCount = tableBuffer.getShort(getFormat().OFFSET_NUM_VAR_COLS);
short columnCount = tableBuffer.getShort(getFormat().OFFSET_NUM_COLS);
_logicalIndexCount = tableBuffer.getInt(getFormat().OFFSET_NUM_INDEX_SLOTS);
_indexCount = tableBuffer.getInt(getFormat().OFFSET_NUM_INDEXES);
tableBuffer.position(getFormat().OFFSET_OWNED_PAGES);
_ownedPages = UsageMap.read(getDatabase(), tableBuffer, false);
tableBuffer.position(getFormat().OFFSET_FREE_SPACE_PAGES);
_freeSpacePages = UsageMap.read(getDatabase(), tableBuffer, false);
for (int i = 0; i < _indexCount; i++) {
_indexDatas.add(IndexData.create(this, tableBuffer, i, getFormat()));
}
readColumnDefinitions(tableBuffer, columnCount);
readIndexDefinitions(tableBuffer);
// read column usage map info
while((tableBuffer.remaining() >= 2) &&
readColumnUsageMaps(tableBuffer)) {
// keep reading ...
}
System.out.println("FOO done " + tableBuffer.position());
// re-sort columns if necessary
if(getDatabase().getColumnOrder() != ColumnOrder.DATA) {
Collections.sort(_columns, DISPLAY_ORDER_COMPARATOR);
}
for(ColumnImpl col : _columns) {
// some columns need to do extra work after the table is completely
// loaded
col.postTableLoadInit();
}
_fkEnforcer = new FKEnforcer(this);
if(!isSystem()) {
// after fully constructed, allow column validator to be configured (but
// only for user tables)
for(ColumnImpl col : _columns) {
col.setColumnValidator(null);
}
}
}
public String getName() {
return _name;
}
public boolean isHidden() {
return((_flags & DatabaseImpl.HIDDEN_OBJECT_FLAG) != 0);
}
public boolean isSystem() {
return(_tableType != TYPE_USER);
}
/**
* @usage _advanced_method_
*/
public int getMaxColumnCount() {
return _maxColumnCount;
}
public int getColumnCount() {
return _columns.size();
}
public DatabaseImpl getDatabase() {
return _database;
}
/**
* @usage _advanced_method_
*/
public JetFormat getFormat() {
return getDatabase().getFormat();
}
/**
* @usage _advanced_method_
*/
public PageChannel getPageChannel() {
return getDatabase().getPageChannel();
}
public ErrorHandler getErrorHandler() {
return((_tableErrorHandler != null) ? _tableErrorHandler :
getDatabase().getErrorHandler());
}
public void setErrorHandler(ErrorHandler newErrorHandler) {
_tableErrorHandler = newErrorHandler;
}
public int getTableDefPageNumber() {
return _tableDefPageNumber;
}
public boolean isAllowAutoNumberInsert() {
return ((_allowAutoNumInsert != null) ? (boolean)_allowAutoNumInsert :
getDatabase().isAllowAutoNumberInsert());
}
public void setAllowAutoNumberInsert(Boolean allowAutoNumInsert) {
_allowAutoNumInsert = allowAutoNumInsert;
}
/**
* @usage _advanced_method_
*/
public RowState createRowState() {
return new RowState(TempBufferHolder.Type.HARD);
}
/**
* @usage _advanced_method_
*/
public UsageMap.PageCursor getOwnedPagesCursor() {
return _ownedPages.cursor();
}
/**
* Returns the approximate number of database pages owned by this
* table and all related indexes (this number does not take into
* account pages used for large OLE/MEMO fields).
*
* To calculate the approximate number of bytes owned by a table:
*
* int approxTableBytes = (table.getApproximateOwnedPageCount() *
* table.getFormat().PAGE_SIZE);
*
* @usage _intermediate_method_
*/
public int getApproximateOwnedPageCount() {
// add a page for the table def (although that might actually be more than
// one page)
int count = _ownedPages.getPageCount() + 1;
for(ColumnImpl col : _columns) {
count += col.getOwnedPageCount();
}
// note, we count owned pages from _physical_ indexes, not logical indexes
// (otherwise we could double count pages)
for(IndexData indexData : _indexDatas) {
count += indexData.getOwnedPageCount();
}
return count;
}
protected TempPageHolder getLongValueBuffer() {
return _longValueBufferH;
}
public List getColumns() {
return Collections.unmodifiableList(_columns);
}
public ColumnImpl getColumn(String name) {
for(ColumnImpl column : _columns) {
if(column.getName().equalsIgnoreCase(name)) {
return column;
}
}
throw new IllegalArgumentException(withErrorContext(
"Column with name " + name + " does not exist in this table"));
}
public boolean hasColumn(String name) {
for(ColumnImpl column : _columns) {
if(column.getName().equalsIgnoreCase(name)) {
return true;
}
}
return false;
}
public PropertyMap getProperties() throws IOException {
if(_props == null) {
_props = getPropertyMaps().getDefault();
}
return _props;
}
/**
* @return all PropertyMaps for this table (and columns)
* @usage _advanced_method_
*/
public PropertyMaps getPropertyMaps() throws IOException {
if(_propertyMaps == null) {
_propertyMaps = getDatabase().getPropertiesForObject(
_tableDefPageNumber);
}
return _propertyMaps;
}
public List getIndexes() {
return Collections.unmodifiableList(_indexes);
}
public IndexImpl getIndex(String name) {
for(IndexImpl index : _indexes) {
if(index.getName().equalsIgnoreCase(name)) {
return index;
}
}
throw new IllegalArgumentException(withErrorContext(
"Index with name " + name + " does not exist on this table"));
}
public IndexImpl getPrimaryKeyIndex() {
for(IndexImpl index : _indexes) {
if(index.isPrimaryKey()) {
return index;
}
}
throw new IllegalArgumentException(withErrorContext(
"No primary key index found"));
}
public IndexImpl getForeignKeyIndex(Table otherTable) {
for(IndexImpl index : _indexes) {
if(index.isForeignKey() && (index.getReference() != null) &&
(index.getReference().getOtherTablePageNumber() ==
((TableImpl)otherTable).getTableDefPageNumber())) {
return index;
}
}
throw new IllegalArgumentException(withErrorContext(
"No foreign key reference to " +
otherTable.getName() + " found"));
}
/**
* @return All of the IndexData on this table (unmodifiable List)
* @usage _advanced_method_
*/
public List getIndexDatas() {
return Collections.unmodifiableList(_indexDatas);
}
/**
* Only called by unit tests
* @usage _advanced_method_
*/
public int getLogicalIndexCount() {
return _logicalIndexCount;
}
int getIndexCount() {
return _indexCount;
}
List getAutoNumberColumns() {
return _autoNumColumns;
}
public CursorImpl getDefaultCursor() {
if(_defaultCursor == null) {
_defaultCursor = CursorImpl.createCursor(this);
}
return _defaultCursor;
}
public CursorBuilder newCursor() {
return new CursorBuilder(this);
}
public void reset() {
getDefaultCursor().reset();
}
public Row deleteRow(Row row) throws IOException {
deleteRow(row.getId());
return row;
}
/**
* Delete the row with the given id. Provided RowId must have previously
* been returned from this Table.
* @return the given rowId
* @throws IllegalStateException if the given row is not valid
* @usage _intermediate_method_
*/
public RowId deleteRow(RowId rowId) throws IOException {
deleteRow(getDefaultCursor().getRowState(), (RowIdImpl)rowId);
return rowId;
}
/**
* Delete the row for the given rowId.
* @usage _advanced_method_
*/
public void deleteRow(RowState rowState, RowIdImpl rowId)
throws IOException
{
requireValidRowId(rowId);
getPageChannel().startWrite();
try {
// ensure that the relevant row state is up-to-date
ByteBuffer rowBuffer = positionAtRowHeader(rowState, rowId);
if(rowState.isDeleted()) {
// don't care about duplicate deletion
return;
}
requireNonDeletedRow(rowState, rowId);
// delete flag always gets set in the "header" row (even if data is on
// overflow row)
int pageNumber = rowState.getHeaderRowId().getPageNumber();
int rowNumber = rowState.getHeaderRowId().getRowNumber();
// attempt to fill in index column values
Object[] rowValues = null;
if(!_indexDatas.isEmpty()) {
// move to row data to get index values
rowBuffer = positionAtRowData(rowState, rowId);
for(ColumnImpl idxCol : _indexColumns) {
getRowColumn(getFormat(), rowBuffer, idxCol, rowState, null);
}
// use any read rowValues to help update the indexes
rowValues = rowState.getRowCacheValues();
// check foreign keys before proceeding w/ deletion
_fkEnforcer.deleteRow(rowValues);
// move back to the header
rowBuffer = positionAtRowHeader(rowState, rowId);
}
// finally, pull the trigger
int rowIndex = getRowStartOffset(rowNumber, getFormat());
rowBuffer.putShort(rowIndex, (short)(rowBuffer.getShort(rowIndex)
| DELETED_ROW_MASK | OVERFLOW_ROW_MASK));
writeDataPage(rowBuffer, pageNumber);
// update the indexes
for(IndexData indexData : _indexDatas) {
indexData.deleteRow(rowValues, rowId);
}
// make sure table def gets updated
updateTableDefinition(-1);
} finally {
getPageChannel().finishWrite();
}
}
public Row getNextRow() throws IOException {
return getDefaultCursor().getNextRow();
}
/**
* Reads a single column from the given row.
* @usage _advanced_method_
*/
public Object getRowValue(RowState rowState, RowIdImpl rowId,
ColumnImpl column)
throws IOException
{
if(this != column.getTable()) {
throw new IllegalArgumentException(withErrorContext(
"Given column " + column + " is not from this table"));
}
requireValidRowId(rowId);
// position at correct row
ByteBuffer rowBuffer = positionAtRowData(rowState, rowId);
requireNonDeletedRow(rowState, rowId);
return getRowColumn(getFormat(), rowBuffer, column, rowState, null);
}
/**
* Reads some columns from the given row.
* @param columnNames Only column names in this collection will be returned
* @usage _advanced_method_
*/
public RowImpl getRow(
RowState rowState, RowIdImpl rowId, Collection columnNames)
throws IOException
{
requireValidRowId(rowId);
// position at correct row
ByteBuffer rowBuffer = positionAtRowData(rowState, rowId);
requireNonDeletedRow(rowState, rowId);
return getRow(getFormat(), rowState, rowBuffer, _columns, columnNames);
}
/**
* Reads the row data from the given row buffer. Leaves limit unchanged.
* Saves parsed row values to the given rowState.
*/
private static RowImpl getRow(
JetFormat format,
RowState rowState,
ByteBuffer rowBuffer,
Collection columns,
Collection columnNames)
throws IOException
{
RowImpl rtn = new RowImpl(rowState.getHeaderRowId(), columns.size());
for(ColumnImpl column : columns) {
if((columnNames == null) || (columnNames.contains(column.getName()))) {
// Add the value to the row data
column.setRowValue(
rtn, getRowColumn(format, rowBuffer, column, rowState, null));
}
}
return rtn;
}
/**
* Reads the column data from the given row buffer. Leaves limit unchanged.
* Caches the returned value in the rowState.
*/
private static Object getRowColumn(JetFormat format,
ByteBuffer rowBuffer,
ColumnImpl column,
RowState rowState,
Map rawVarValues)
throws IOException
{
byte[] columnData = null;
try {
NullMask nullMask = rowState.getNullMask(rowBuffer);
boolean isNull = nullMask.isNull(column);
if(column.storeInNullMask()) {
// Boolean values are stored in the null mask. see note about
// caching below
return rowState.setRowCacheValue(column.getColumnIndex(),
column.readFromNullMask(isNull));
} else if(isNull) {
// well, that's easy! (no need to update cache w/ null)
return null;
}
Object cachedValue = rowState.getRowCacheValue(column.getColumnIndex());
if(cachedValue != null) {
// we already have it, use it
return cachedValue;
}
// reset position to row start
rowBuffer.reset();
// locate the column data bytes
int rowStart = rowBuffer.position();
int colDataPos = 0;
int colDataLen = 0;
if(!column.isVariableLength()) {
// read fixed length value (non-boolean at this point)
int dataStart = rowStart + format.OFFSET_COLUMN_FIXED_DATA_ROW_OFFSET;
colDataPos = dataStart + column.getFixedDataOffset();
colDataLen = column.getType().getFixedSize(column.getLength());
} else {
int varDataStart;
int varDataEnd;
if(format.SIZE_ROW_VAR_COL_OFFSET == 2) {
// read simple var length value
int varColumnOffsetPos =
(rowBuffer.limit() - nullMask.byteSize() - 4) -
(column.getVarLenTableIndex() * 2);
varDataStart = rowBuffer.getShort(varColumnOffsetPos);
varDataEnd = rowBuffer.getShort(varColumnOffsetPos - 2);
} else {
// read jump-table based var length values
short[] varColumnOffsets = readJumpTableVarColOffsets(
rowState, rowBuffer, rowStart, nullMask);
varDataStart = varColumnOffsets[column.getVarLenTableIndex()];
varDataEnd = varColumnOffsets[column.getVarLenTableIndex() + 1];
}
colDataPos = rowStart + varDataStart;
colDataLen = varDataEnd - varDataStart;
}
// grab the column data
rowBuffer.position(colDataPos);
columnData = ByteUtil.getBytes(rowBuffer, colDataLen);
if((rawVarValues != null) && column.isVariableLength()) {
// caller wants raw value as well
rawVarValues.put(column, columnData);
}
// parse the column data. we cache the row values in order to be able
// to update the index on row deletion. note, most of the returned
// values are immutable, except for binary data (returned as byte[]),
// but binary data shouldn't be indexed anyway.
return rowState.setRowCacheValue(column.getColumnIndex(),
column.read(columnData));
} catch(Exception e) {
// cache "raw" row value. see note about caching above
rowState.setRowCacheValue(column.getColumnIndex(),
ColumnImpl.rawDataWrapper(columnData));
return rowState.handleRowError(column, columnData, e);
}
}
private static short[] readJumpTableVarColOffsets(
RowState rowState, ByteBuffer rowBuffer, int rowStart,
NullMask nullMask)
{
short[] varColOffsets = rowState.getVarColOffsets();
if(varColOffsets != null) {
return varColOffsets;
}
// calculate offsets using jump-table info
int nullMaskSize = nullMask.byteSize();
int rowEnd = rowStart + rowBuffer.remaining() - 1;
int numVarCols = ByteUtil.getUnsignedByte(rowBuffer,
rowEnd - nullMaskSize);
varColOffsets = new short[numVarCols + 1];
int rowLen = rowEnd - rowStart + 1;
int numJumps = (rowLen - 1) / MAX_BYTE;
int colOffset = rowEnd - nullMaskSize - numJumps - 1;
// If last jump is a dummy value, ignore it
if(((colOffset - rowStart - numVarCols) / MAX_BYTE) < numJumps) {
numJumps--;
}
int jumpsUsed = 0;
for(int i = 0; i < numVarCols + 1; i++) {
while((jumpsUsed < numJumps) &&
(i == ByteUtil.getUnsignedByte(
rowBuffer, rowEnd - nullMaskSize-jumpsUsed - 1))) {
jumpsUsed++;
}
varColOffsets[i] = (short)
(ByteUtil.getUnsignedByte(rowBuffer, colOffset - i)
+ (jumpsUsed * MAX_BYTE));
}
rowState.setVarColOffsets(varColOffsets);
return varColOffsets;
}
/**
* Reads the null mask from the given row buffer. Leaves limit unchanged.
*/
private NullMask getRowNullMask(ByteBuffer rowBuffer)
throws IOException
{
// reset position to row start
rowBuffer.reset();
// Number of columns in this row
int columnCount = ByteUtil.getUnsignedVarInt(
rowBuffer, getFormat().SIZE_ROW_COLUMN_COUNT);
// read null mask
NullMask nullMask = new NullMask(columnCount);
rowBuffer.position(rowBuffer.limit() - nullMask.byteSize()); //Null mask at end
nullMask.read(rowBuffer);
return nullMask;
}
/**
* Sets a new buffer to the correct row header page using the given rowState
* according to the given rowId. Deleted state is
* determined, but overflow row pointers are not followed.
*
* @return a ByteBuffer of the relevant page, or null if row was invalid
* @usage _advanced_method_
*/
public static ByteBuffer positionAtRowHeader(RowState rowState,
RowIdImpl rowId)
throws IOException
{
ByteBuffer rowBuffer = rowState.setHeaderRow(rowId);
if(rowState.isAtHeaderRow()) {
// this task has already been accomplished
return rowBuffer;
}
if(!rowState.isValid()) {
// this was an invalid page/row
rowState.setStatus(RowStateStatus.AT_HEADER);
return null;
}
// note, we don't use findRowStart here cause we need the unmasked value
short rowStart = rowBuffer.getShort(
getRowStartOffset(rowId.getRowNumber(),
rowState.getTable().getFormat()));
// check the deleted, overflow flags for the row (the "real" flags are
// always set on the header row)
RowStatus rowStatus = RowStatus.NORMAL;
if(isDeletedRow(rowStart)) {
rowStatus = RowStatus.DELETED;
} else if(isOverflowRow(rowStart)) {
rowStatus = RowStatus.OVERFLOW;
}
rowState.setRowStatus(rowStatus);
rowState.setStatus(RowStateStatus.AT_HEADER);
return rowBuffer;
}
/**
* Sets the position and limit in a new buffer using the given rowState
* according to the given row number and row end, following overflow row
* pointers as necessary.
*
* @return a ByteBuffer narrowed to the actual row data, or null if row was
* invalid or deleted
* @usage _advanced_method_
*/
public static ByteBuffer positionAtRowData(RowState rowState,
RowIdImpl rowId)
throws IOException
{
positionAtRowHeader(rowState, rowId);
if(!rowState.isValid() || rowState.isDeleted()) {
// row is invalid or deleted
rowState.setStatus(RowStateStatus.AT_FINAL);
return null;
}
ByteBuffer rowBuffer = rowState.getFinalPage();
int rowNum = rowState.getFinalRowId().getRowNumber();
JetFormat format = rowState.getTable().getFormat();
if(rowState.isAtFinalRow()) {
// we've already found the final row data
return PageChannel.narrowBuffer(
rowBuffer,
findRowStart(rowBuffer, rowNum, format),
findRowEnd(rowBuffer, rowNum, format));
}
while(true) {
// note, we don't use findRowStart here cause we need the unmasked value
short rowStart = rowBuffer.getShort(getRowStartOffset(rowNum, format));
short rowEnd = findRowEnd(rowBuffer, rowNum, format);
// note, at this point we know the row is not deleted, so ignore any
// subsequent deleted flags (as overflow rows are always marked deleted
// anyway)
boolean overflowRow = isOverflowRow(rowStart);
// now, strip flags from rowStart offset
rowStart = (short)(rowStart & OFFSET_MASK);
if (overflowRow) {
if((rowEnd - rowStart) < 4) {
throw new IOException(rowState.getTable().withErrorContext(
"invalid overflow row info"));
}
// Overflow page. the "row" data in the current page points to
// another page/row
int overflowRowNum = ByteUtil.getUnsignedByte(rowBuffer, rowStart);
int overflowPageNum = ByteUtil.get3ByteInt(rowBuffer, rowStart + 1);
rowBuffer = rowState.setOverflowRow(
new RowIdImpl(overflowPageNum, overflowRowNum));
rowNum = overflowRowNum;
} else {
rowState.setStatus(RowStateStatus.AT_FINAL);
return PageChannel.narrowBuffer(rowBuffer, rowStart, rowEnd);
}
}
}
public Iterator iterator() {
return getDefaultCursor().iterator();
}
/**
* Writes a new table defined by the given TableCreator to the database.
* @usage _advanced_method_
*/
protected static void writeTableDefinition(TableCreator creator)
throws IOException
{
// first, create the usage map page
createUsageMapDefinitionBuffer(creator);
// next, determine how big the table def will be (in case it will be more
// than one page)
JetFormat format = creator.getFormat();
int idxDataLen = (creator.getIndexCount() *
(format.SIZE_INDEX_DEFINITION +
format.SIZE_INDEX_COLUMN_BLOCK)) +
(creator.getLogicalIndexCount() * format.SIZE_INDEX_INFO_BLOCK);
int colUmapLen = creator.getLongValueColumns().size() * 10;
int totalTableDefSize = format.SIZE_TDEF_HEADER +
(format.SIZE_COLUMN_DEF_BLOCK * creator.getColumns().size()) +
idxDataLen + colUmapLen + format.SIZE_TDEF_TRAILER;
// total up the amount of space used by the column and index names (2
// bytes per char + 2 bytes for the length)
for(ColumnBuilder col : creator.getColumns()) {
totalTableDefSize += DBMutator.calculateNameLength(col.getName());
}
for(IndexBuilder idx : creator.getIndexes()) {
totalTableDefSize += DBMutator.calculateNameLength(idx.getName());
}
// now, create the table definition
ByteBuffer buffer = PageChannel.createBuffer(Math.max(totalTableDefSize,
format.PAGE_SIZE));
writeTableDefinitionHeader(creator, buffer, totalTableDefSize);
if(creator.hasIndexes()) {
// index row counts
IndexData.writeRowCountDefinitions(creator, buffer);
}
// column definitions
ColumnImpl.writeDefinitions(creator, buffer);
if(creator.hasIndexes()) {
// index and index data definitions
IndexData.writeDefinitions(creator, buffer);
IndexImpl.writeDefinitions(creator, buffer);
}
// column usage map references
ColumnImpl.writeColUsageMapDefinitions(creator, buffer);
//End of tabledef
buffer.put((byte) 0xff);
buffer.put((byte) 0xff);
buffer.flip();
// write table buffer to database
writeTableDefinitionBuffer(buffer, creator.getTdefPageNumber(), creator,
Collections.emptyList());
}
private static void writeTableDefinitionBuffer(
ByteBuffer buffer, int tdefPageNumber,
TableMutator mutator, List reservedPages)
throws IOException
{
buffer.rewind();
int totalTableDefSize = buffer.remaining();
System.out.println("FOO writing tdef to " + tdefPageNumber + " and " +
reservedPages + " tot size " + totalTableDefSize + " " +
buffer.remaining());
JetFormat format = mutator.getFormat();
PageChannel pageChannel = mutator.getPageChannel();
// write table buffer to database
if(totalTableDefSize <= format.PAGE_SIZE) {
// easy case, fits on one page
// overwrite page free space
buffer.putShort(format.OFFSET_FREE_SPACE,
(short)(Math.max(
format.PAGE_SIZE - totalTableDefSize - 8, 0)));
// Write the tdef page to disk.
buffer.clear();
pageChannel.writePage(buffer, tdefPageNumber);
} else {
System.out.println("FOO splitting tdef");
// need to split across multiple pages
ByteBuffer partialTdef = pageChannel.createPageBuffer();
buffer.rewind();
int nextTdefPageNumber = PageChannel.INVALID_PAGE_NUMBER;
while(buffer.hasRemaining()) {
// reset for next write
partialTdef.clear();
if(nextTdefPageNumber == PageChannel.INVALID_PAGE_NUMBER) {
// this is the first page. note, the first page already has the
// page header, so no need to write it here
nextTdefPageNumber = tdefPageNumber;
} else {
// write page header
writeTablePageHeader(partialTdef);
}
// copy the next page of tdef bytes
int curTdefPageNumber = nextTdefPageNumber;
int writeLen = Math.min(partialTdef.remaining(), buffer.remaining());
partialTdef.put(buffer.array(), buffer.position(), writeLen);
ByteUtil.forward(buffer, writeLen);
if(buffer.hasRemaining()) {
// need a next page
if(reservedPages.isEmpty()) {
nextTdefPageNumber = pageChannel.allocateNewPage();
} else {
nextTdefPageNumber = reservedPages.remove(0);
}
partialTdef.putInt(format.OFFSET_NEXT_TABLE_DEF_PAGE,
nextTdefPageNumber);
}
// update page free space
partialTdef.putShort(format.OFFSET_FREE_SPACE,
(short)(Math.max(
partialTdef.remaining() - 8, 0)));
// write partial page to disk
pageChannel.writePage(partialTdef, curTdefPageNumber);
}
}
}
/**
* Writes a column defined by the given TableUpdater to this table.
* @usage _advanced_method_
*/
protected ColumnImpl mutateAddColumn(TableUpdater mutator) throws IOException
{
ColumnBuilder column = mutator.getColumn();
JetFormat format = mutator.getFormat();
boolean isVarCol = column.isVariableLength();
boolean isLongVal = column.getType().isLongValue();
////
// calculate how much more space we need in the table def
if(isLongVal) {
mutator.addTdefLen(10);
}
mutator.addTdefLen(format.SIZE_COLUMN_DEF_BLOCK);
int nameByteLen = DBMutator.calculateNameLength(column.getName());
mutator.addTdefLen(nameByteLen);
////
// load current table definition and add space for new info
ByteBuffer tableBuffer = loadCompleteTableDefinitionBufferForUpdate(
mutator);
ColumnImpl newCol = null;
int umapPos = -1;
boolean success = false;
try {
////
// update various bits of the table def
ByteUtil.forward(tableBuffer, 29);
tableBuffer.putShort((short)(_maxColumnCount + 1));
short varColCount = (short)(_varColumns.size() + (isVarCol ? 1 : 0));
tableBuffer.putShort(varColCount);
tableBuffer.putShort((short)(_columns.size() + 1));
// move to end of column def blocks
tableBuffer.position(format.SIZE_TDEF_HEADER +
(_indexCount * format.SIZE_INDEX_DEFINITION) +
(_columns.size() * format.SIZE_COLUMN_DEF_BLOCK));
// figure out the data offsets for the new column
int fixedOffset = 0;
int varOffset = 0;
if(column.isVariableLength()) {
// find the variable offset
for(ColumnImpl col : _varColumns) {
if(col.getVarLenTableIndex() >= varOffset) {
varOffset = col.getVarLenTableIndex() + 1;
}
}
} else {
// find the fixed offset
for(ColumnImpl col : _columns) {
if(!col.isVariableLength() &&
(col.getFixedDataOffset() >= fixedOffset)) {
fixedOffset = col.getFixedDataOffset() +
col.getType().getFixedSize(col.getLength());
}
}
}
mutator.setColumnOffsets(fixedOffset, varOffset, varOffset);
// insert space for the column definition and write it
int colDefPos = tableBuffer.position();
ByteUtil.insertEmptyData(tableBuffer, format.SIZE_COLUMN_DEF_BLOCK);
ColumnImpl.writeDefinition(mutator, column, tableBuffer);
// skip existing column names and write new name
skipNames(tableBuffer, _columns.size());
ByteUtil.insertEmptyData(tableBuffer, nameByteLen);
System.out.println("FOO pre name " + tableBuffer.position());
writeName(tableBuffer, column.getName(), mutator.getCharset());
System.out.println("FOO post name " + tableBuffer.position());
if(isLongVal) {
// allocate usage maps for the long value col
Map.Entry umapInfo = addUsageMaps(2, null);
System.out.println("FOO created umap " + umapInfo);
TableMutator.ColumnState colState = mutator.getColumnState(column);
colState.setUmapPageNumber(umapInfo.getKey());
byte rowNum = umapInfo.getValue().byteValue();
colState.setUmapOwnedRowNumber(rowNum);
colState.setUmapFreeRowNumber((byte)(rowNum + 1));
// skip past index defs
System.out.println("FOO pre move " + tableBuffer.position());
ByteUtil.forward(tableBuffer, (_indexCount *
format.SIZE_INDEX_COLUMN_BLOCK));
System.out.println("FOO moved to " + tableBuffer.position());
ByteUtil.forward(tableBuffer,
(_logicalIndexCount * format.SIZE_INDEX_INFO_BLOCK));
System.out.println("FOO moved to " + tableBuffer.position());
skipNames(tableBuffer, _logicalIndexCount);
// skip existing usage maps
while(tableBuffer.remaining() >= 2) {
if(tableBuffer.getShort() == IndexData.COLUMN_UNUSED) {
// found end of tdef, we want to insert before this
ByteUtil.forward(tableBuffer, -2);
break;
}
ByteUtil.forward(tableBuffer, 8);
// keep reading ...
}
// write new column usage map info
System.out.println("FOO about to write " + tableBuffer.position());
umapPos = tableBuffer.position();
ByteUtil.insertEmptyData(tableBuffer, 10);
ColumnImpl.writeColUsageMapDefinition(
mutator, column, tableBuffer);
}
// sanity check the updates
validateTableDefUpdate(mutator, tableBuffer);
// before writing the new table def, create the column
newCol = ColumnImpl.create(this, tableBuffer, colDefPos,
column.getName(), _columns.size());
newCol.setColumnIndex(_columns.size());
////
// write updated table def back to the database
writeTableDefinitionBuffer(tableBuffer, _tableDefPageNumber, mutator,
mutator.getNextPages());
success = true;
} finally {
if(!success) {
// need to discard modified table buffer
_tableDefBufferH.invalidate();
}
}
////
// now, update current TableImpl
_columns.add(newCol);
++_maxColumnCount;
if(newCol.isVariableLength()) {
_varColumns.add(newCol);
++_maxVarColumnCount;
}
if(newCol.isAutoNumber()) {
_autoNumColumns.add(newCol);
}
if(umapPos >= 0) {
// read column usage map
tableBuffer.position(umapPos);
readColumnUsageMaps(tableBuffer);
}
newCol.postTableLoadInit();
if(!isSystem()) {
// after fully constructed, allow column validator to be configured (but
// only for user tables)
newCol.setColumnValidator(null);
}
// save any column properties
Map colProps = column.getProperties();
if(colProps != null) {
newCol.getProperties().putAll(colProps.values());
getProperties().save();
}
completeTableMutation(tableBuffer);
return newCol;
}
/**
* Writes a index defined by the given TableUpdater to this table.
* @usage _advanced_method_
*/
protected IndexData mutateAddIndexData(TableUpdater mutator) throws IOException
{
IndexBuilder index = mutator.getIndex();
JetFormat format = mutator.getFormat();
////
// calculate how much more space we need in the table def
mutator.addTdefLen(format.SIZE_INDEX_DEFINITION +
format.SIZE_INDEX_COLUMN_BLOCK);
////
// load current table definition and add space for new info
ByteBuffer tableBuffer = loadCompleteTableDefinitionBufferForUpdate(
mutator);
IndexData newIdxData = null;
boolean success = false;
try {
////
// update various bits of the table def
ByteUtil.forward(tableBuffer, 39);
tableBuffer.putInt(_indexCount + 1);
// move to end of index data def blocks
tableBuffer.position(format.SIZE_TDEF_HEADER +
(_indexCount * format.SIZE_INDEX_DEFINITION));
// write index row count definition (empty initially)
ByteUtil.insertEmptyData(tableBuffer, format.SIZE_INDEX_DEFINITION);
IndexData.writeRowCountDefinitions(mutator, tableBuffer, 1);
// skip columns and column names
ByteUtil.forward(tableBuffer,
(_columns.size() * format.SIZE_COLUMN_DEF_BLOCK));
skipNames(tableBuffer, _columns.size());
// move to end of current index datas
ByteUtil.forward(tableBuffer, (_indexCount *
format.SIZE_INDEX_COLUMN_BLOCK));
// allocate usage maps and root page
TableMutator.IndexDataState idxDataState = mutator.getIndexDataState(index);
int rootPageNumber = getPageChannel().allocateNewPage();
Map.Entry umapInfo = addUsageMaps(1, rootPageNumber);
System.out.println("FOO created umap " + umapInfo);
idxDataState.setRootPageNumber(rootPageNumber);
idxDataState.setUmapPageNumber(umapInfo.getKey());
idxDataState.setUmapRowNumber(umapInfo.getValue().byteValue());
// write index data def
int idxDataDefPos = tableBuffer.position();
ByteUtil.insertEmptyData(tableBuffer, format.SIZE_INDEX_COLUMN_BLOCK);
IndexData.writeDefinition(mutator, tableBuffer, idxDataState, null);
// sanity check the updates
validateTableDefUpdate(mutator, tableBuffer);
// before writing the new table def, create the index data
tableBuffer.position(0);
newIdxData = IndexData.create(
this, tableBuffer, idxDataState.getIndexDataNumber(), format);
tableBuffer.position(idxDataDefPos);
newIdxData.read(tableBuffer, _columns);
////
// write updated table def back to the database
writeTableDefinitionBuffer(tableBuffer, _tableDefPageNumber, mutator,
mutator.getNextPages());
success = true;
} finally {
if(!success) {
// need to discard modified table buffer
_tableDefBufferH.invalidate();
}
}
////
// now, update current TableImpl
for(IndexData.ColumnDescriptor iCol : newIdxData.getColumns()) {
_indexColumns.add(iCol.getColumn());
}
++_indexCount;
_indexDatas.add(newIdxData);
completeTableMutation(tableBuffer);
// don't forget to populate the new index
populateIndexData(newIdxData);
return newIdxData;
}
private void populateIndexData(IndexData idxData)
throws IOException
{
// grab the columns involved in this index
List idxCols = new ArrayList();
for(IndexData.ColumnDescriptor col : idxData.getColumns()) {
idxCols.add(col.getColumn());
}
// iterate through all the rows and add them to the index
Object[] rowVals = new Object[_columns.size()];
for(Row row : getDefaultCursor().newIterable().addColumns(idxCols)) {
for(Column col : idxCols) {
col.setRowValue(rowVals, col.getRowValue(row));
}
IndexData.commitAll(
idxData.prepareAddRow(rowVals, (RowIdImpl)row.getId(), null));
}
updateTableDefinition(0);
}
/**
* Writes a index defined by the given TableUpdater to this table.
* @usage _advanced_method_
*/
protected IndexImpl mutateAddIndex(TableUpdater mutator) throws IOException
{
IndexBuilder index = mutator.getIndex();
JetFormat format = mutator.getFormat();
////
// calculate how much more space we need in the table def
mutator.addTdefLen(format.SIZE_INDEX_INFO_BLOCK);
int nameByteLen = DBMutator.calculateNameLength(index.getName());
mutator.addTdefLen(nameByteLen);
////
// load current table definition and add space for new info
ByteBuffer tableBuffer = loadCompleteTableDefinitionBufferForUpdate(
mutator);
IndexImpl newIdx = null;
boolean success = false;
try {
////
// update various bits of the table def
ByteUtil.forward(tableBuffer, 35);
tableBuffer.putInt(_logicalIndexCount + 1);
// move to end of index data def blocks
tableBuffer.position(format.SIZE_TDEF_HEADER +
(_indexCount * format.SIZE_INDEX_DEFINITION));
// skip columns and column names
ByteUtil.forward(tableBuffer,
(_columns.size() * format.SIZE_COLUMN_DEF_BLOCK));
skipNames(tableBuffer, _columns.size());
// move to end of current index datas
ByteUtil.forward(tableBuffer, (_indexCount *
format.SIZE_INDEX_COLUMN_BLOCK));
// move to end of current indexes
ByteUtil.forward(tableBuffer, (_logicalIndexCount *
format.SIZE_INDEX_INFO_BLOCK));
int idxDefPos = tableBuffer.position();
ByteUtil.insertEmptyData(tableBuffer, format.SIZE_INDEX_INFO_BLOCK);
IndexImpl.writeDefinition(mutator, index, tableBuffer);
// skip existing index names and write new name
skipNames(tableBuffer, _logicalIndexCount);
ByteUtil.insertEmptyData(tableBuffer, nameByteLen);
writeName(tableBuffer, index.getName(), mutator.getCharset());
// sanity check the updates
validateTableDefUpdate(mutator, tableBuffer);
// before writing the new table def, create the index
tableBuffer.position(idxDefPos);
newIdx = new IndexImpl(tableBuffer, _indexDatas, format);
newIdx.setName(index.getName());
////
// write updated table def back to the database
writeTableDefinitionBuffer(tableBuffer, _tableDefPageNumber, mutator,
mutator.getNextPages());
success = true;
} finally {
if(!success) {
// need to discard modified table buffer
_tableDefBufferH.invalidate();
}
}
////
// now, update current TableImpl
++_logicalIndexCount;
_indexes.add(newIdx);
completeTableMutation(tableBuffer);
return newIdx;
}
private void validateTableDefUpdate(TableUpdater mutator, ByteBuffer tableBuffer)
throws IOException
{
if(!mutator.validateUpdatedTdef(tableBuffer)) {
throw new IllegalStateException(
withErrorContext("Failed updating table definition (unexpected length)"));
}
}
private void completeTableMutation(ByteBuffer tableBuffer) throws IOException
{
// lastly, may need to clear table def buffer
_tableDefBufferH.possiblyInvalidate(_tableDefPageNumber, tableBuffer);
// update any foreign key enforcing
_fkEnforcer.reset();
// update modification count so any active RowStates can keep themselves
// up-to-date
++_modCount;
}
/**
* Skips the given number of names in the table buffer.
*/
private static void skipNames(ByteBuffer tableBuffer, int count) {
for(int i = 0; i < count; ++i) {
ByteUtil.forward(tableBuffer, tableBuffer.getShort());
}
}
private ByteBuffer loadCompleteTableDefinitionBufferForUpdate(
TableUpdater mutator)
throws IOException
{
// load complete table definition
ByteBuffer tableBuffer = _tableDefBufferH.setPage(getPageChannel(),
_tableDefPageNumber);
tableBuffer = loadCompleteTableDefinitionBuffer(
tableBuffer, mutator.getNextPages());
// make sure the table buffer has enough room for the new info
int addedLen = mutator.getAddedTdefLen();
int origTdefLen = tableBuffer.getInt(8);
mutator.setOrigTdefLen(origTdefLen);
int newTdefLen = origTdefLen + addedLen;
System.out.println("FOO new " + newTdefLen + " add " + addedLen);
while(newTdefLen > tableBuffer.capacity()) {
tableBuffer = expandTableBuffer(tableBuffer);
tableBuffer.flip();
}
tableBuffer.limit(origTdefLen);
// set new tdef length
tableBuffer.position(8);
tableBuffer.putInt(newTdefLen);
return tableBuffer;
}
private Map.Entry addUsageMaps(
int numMaps, Integer firstUsedPage)
throws IOException
{
JetFormat format = getFormat();
PageChannel pageChannel = getPageChannel();
int umapRowLength = format.OFFSET_USAGE_MAP_START +
format.USAGE_MAP_TABLE_BYTE_LENGTH;
int totalUmapSpaceUsage = getRowSpaceUsage(umapRowLength, format) * numMaps;
int umapPageNumber = PageChannel.INVALID_PAGE_NUMBER;
int firstRowNum = -1;
int freeSpace = 0;
// search currently known usage map buffers to find one with enough free
// space (the numMaps should always be small enough to put them all on one
// page). pages will free space will probaby be newer pages (higher
// numbers), so we sort in reverse order.
Set knownPages = new TreeSet(Collections.reverseOrder());
collectUsageMapPages(knownPages);
System.out.println("FOO found umap pages " + knownPages);
ByteBuffer umapBuf = pageChannel.createPageBuffer();
for(Integer pageNum : knownPages) {
pageChannel.readPage(umapBuf, pageNum);
freeSpace = umapBuf.getShort(format.OFFSET_FREE_SPACE);
if(freeSpace >= totalUmapSpaceUsage) {
// found a page!
umapPageNumber = pageNum;
firstRowNum = getRowsOnDataPage(umapBuf, format);
break;
}
}
if(umapPageNumber == PageChannel.INVALID_PAGE_NUMBER) {
// didn't find any existing pages, need to create a new one
freeSpace = format.DATA_PAGE_INITIAL_FREE_SPACE;
firstRowNum = 0;
umapBuf = createUsageMapDefPage(pageChannel, freeSpace);
}
// write the actual usage map defs
int rowStart = findRowEnd(umapBuf, firstRowNum, format) - umapRowLength;
int umapRowNum = firstRowNum;
for(int i = 0; i < numMaps; ++i) {
umapBuf.putShort(getRowStartOffset(umapRowNum, format), (short)rowStart);
umapBuf.put(rowStart, UsageMap.MAP_TYPE_INLINE);
if(firstUsedPage != null) {
// fill in the first used page of the usage map
umapBuf.putInt(rowStart + 1, firstUsedPage);
umapBuf.put(rowStart + 5, (byte)1);
}
rowStart -= umapRowLength;
++umapRowNum;
}
// finish the page
freeSpace -= totalUmapSpaceUsage;
umapBuf.putShort(format.OFFSET_FREE_SPACE, (short)freeSpace);
umapBuf.putShort(format.OFFSET_NUM_ROWS_ON_DATA_PAGE,
(short)umapRowNum);
pageChannel.writePage(umapBuf, umapPageNumber);
return new AbstractMap.SimpleImmutableEntry(
umapPageNumber, firstRowNum);
}
void collectUsageMapPages(Collection pages) {
pages.add(_ownedPages.getTablePageNumber());
pages.add(_freeSpacePages.getTablePageNumber());
for(IndexData idx : _indexDatas) {
idx.collectUsageMapPages(pages);
}
for(ColumnImpl col : _columns) {
col.collectUsageMapPages(pages);
}
}
/**
* @param buffer Buffer to write to
*/
private static void writeTableDefinitionHeader(
TableCreator creator, ByteBuffer buffer, int totalTableDefSize)
throws IOException
{
List columns = creator.getColumns();
//Start writing the tdef
writeTablePageHeader(buffer);
buffer.putInt(totalTableDefSize); //Length of table def
buffer.putInt(MAGIC_TABLE_NUMBER); // seemingly constant magic value
buffer.putInt(0); //Number of rows
buffer.putInt(0); //Last Autonumber
buffer.put((byte) 1); // this makes autonumbering work in access
for (int i = 0; i < 15; i++) { //Unknown
buffer.put((byte) 0);
}
buffer.put(TYPE_USER); //Table type
buffer.putShort((short) columns.size()); //Max columns a row will have
buffer.putShort(ColumnImpl.countVariableLength(columns)); //Number of variable columns in table
buffer.putShort((short) columns.size()); //Number of columns in table
buffer.putInt(creator.getLogicalIndexCount()); //Number of logical indexes in table
buffer.putInt(creator.getIndexCount()); //Number of indexes in table
buffer.put((byte) 0); //Usage map row number
ByteUtil.put3ByteInt(buffer, creator.getUmapPageNumber()); //Usage map page number
buffer.put((byte) 1); //Free map row number
ByteUtil.put3ByteInt(buffer, creator.getUmapPageNumber()); //Free map page number
}
/**
* Writes the page header for a table definition page
* @param buffer Buffer to write to
*/
private static void writeTablePageHeader(ByteBuffer buffer)
{
buffer.put(PageTypes.TABLE_DEF); //Page type
buffer.put((byte) 0x01); //Unknown
buffer.put((byte) 0); //Unknown
buffer.put((byte) 0); //Unknown
buffer.putInt(0); //Next TDEF page pointer
}
/**
* Writes the given name into the given buffer in the format as expected by
* {@link #readName}.
*/
static void writeName(ByteBuffer buffer, String name, Charset charset)
{
ByteBuffer encName = ColumnImpl.encodeUncompressedText(name, charset);
buffer.putShort((short) encName.remaining());
buffer.put(encName);
}
/**
* Create the usage map definition page buffer. The "used pages" map is in
* row 0, the "pages with free space" map is in row 1. Index usage maps are
* in subsequent rows.
*/
private static void createUsageMapDefinitionBuffer(TableCreator creator)
throws IOException
{
List lvalCols = creator.getLongValueColumns();
// 2 table usage maps plus 1 for each index and 2 for each lval col
int indexUmapEnd = 2 + creator.getIndexCount();
int umapNum = indexUmapEnd + (lvalCols.size() * 2);
JetFormat format = creator.getFormat();
int umapRowLength = format.OFFSET_USAGE_MAP_START +
format.USAGE_MAP_TABLE_BYTE_LENGTH;
int umapSpaceUsage = getRowSpaceUsage(umapRowLength, format);
PageChannel pageChannel = creator.getPageChannel();
int umapPageNumber = PageChannel.INVALID_PAGE_NUMBER;
ByteBuffer umapBuf = null;
int freeSpace = 0;
int rowStart = 0;
int umapRowNum = 0;
for(int i = 0; i < umapNum; ++i) {
if(umapBuf == null) {
// need new page for usage maps
if(umapPageNumber == PageChannel.INVALID_PAGE_NUMBER) {
// first umap page has already been reserved
umapPageNumber = creator.getUmapPageNumber();
} else {
// need another umap page
umapPageNumber = creator.reservePageNumber();
}
freeSpace = format.DATA_PAGE_INITIAL_FREE_SPACE;
umapBuf = createUsageMapDefPage(pageChannel, freeSpace);
rowStart = findRowEnd(umapBuf, 0, format) - umapRowLength;
umapRowNum = 0;
}
umapBuf.putShort(getRowStartOffset(umapRowNum, format), (short)rowStart);
if(i == 0) {
// table "owned pages" map definition
umapBuf.put(rowStart, UsageMap.MAP_TYPE_REFERENCE);
} else if(i == 1) {
// table "free space pages" map definition
umapBuf.put(rowStart, UsageMap.MAP_TYPE_INLINE);
} else if(i < indexUmapEnd) {
// index umap
int indexIdx = i - 2;
TableMutator.IndexDataState idxDataState =
creator.getIndexDataStates().get(indexIdx);
// allocate root page for the index
int rootPageNumber = pageChannel.allocateNewPage();
// stash info for later use
idxDataState.setRootPageNumber(rootPageNumber);
idxDataState.setUmapRowNumber((byte)umapRowNum);
idxDataState.setUmapPageNumber(umapPageNumber);
// index map definition, including initial root page
umapBuf.put(rowStart, UsageMap.MAP_TYPE_INLINE);
umapBuf.putInt(rowStart + 1, rootPageNumber);
umapBuf.put(rowStart + 5, (byte)1);
} else {
// long value column umaps
int lvalColIdx = i - indexUmapEnd;
int umapType = lvalColIdx % 2;
lvalColIdx /= 2;
ColumnBuilder lvalCol = lvalCols.get(lvalColIdx);
TableMutator.ColumnState colState =
creator.getColumnState(lvalCol);
umapBuf.put(rowStart, UsageMap.MAP_TYPE_INLINE);
if((umapType == 1) &&
(umapPageNumber != colState.getUmapPageNumber())) {
// we want to force both usage maps for a column to be on the same
// data page, so just discard the previous one we wrote
--i;
umapType = 0;
}
if(umapType == 0) {
// lval column "owned pages" usage map
colState.setUmapOwnedRowNumber((byte)umapRowNum);
colState.setUmapPageNumber(umapPageNumber);
} else {
// lval column "free space pages" usage map (always on same page)
colState.setUmapFreeRowNumber((byte)umapRowNum);
}
}
rowStart -= umapRowLength;
freeSpace -= umapSpaceUsage;
++umapRowNum;
if((freeSpace <= umapSpaceUsage) || (i == (umapNum - 1))) {
// finish current page
umapBuf.putShort(format.OFFSET_FREE_SPACE, (short)freeSpace);
umapBuf.putShort(format.OFFSET_NUM_ROWS_ON_DATA_PAGE,
(short)umapRowNum);
pageChannel.writePage(umapBuf, umapPageNumber);
umapBuf = null;
}
}
}
private static ByteBuffer createUsageMapDefPage(
PageChannel pageChannel, int freeSpace)
{
ByteBuffer umapBuf = pageChannel.createPageBuffer();
umapBuf.put(PageTypes.DATA);
umapBuf.put((byte) 0x1); //Unknown
umapBuf.putShort((short)freeSpace); //Free space in page
umapBuf.putInt(0); //Table definition
umapBuf.putInt(0); //Unknown
umapBuf.putShort((short)0); //Number of records on this page
return umapBuf;
}
/**
* Returns a single ByteBuffer which contains the entire table definition
* (which may span multiple database pages).
*/
private ByteBuffer loadCompleteTableDefinitionBuffer(
ByteBuffer tableBuffer, List pages)
throws IOException
{
int nextPage = tableBuffer.getInt(getFormat().OFFSET_NEXT_TABLE_DEF_PAGE);
ByteBuffer nextPageBuffer = null;
while (nextPage != 0) {
if(pages != null) {
pages.add(nextPage);
}
if (nextPageBuffer == null) {
nextPageBuffer = getPageChannel().createPageBuffer();
}
getPageChannel().readPage(nextPageBuffer, nextPage);
nextPage = nextPageBuffer.getInt(getFormat().OFFSET_NEXT_TABLE_DEF_PAGE);
System.out.println("FOO next page free " + nextPageBuffer.getShort(getFormat().OFFSET_FREE_SPACE));
tableBuffer = expandTableBuffer(tableBuffer);
tableBuffer.put(nextPageBuffer.array(), 8, getFormat().PAGE_SIZE - 8);
tableBuffer.flip();
}
return tableBuffer;
}
private ByteBuffer expandTableBuffer(ByteBuffer tableBuffer) {
ByteBuffer newBuffer = PageChannel.createBuffer(
tableBuffer.capacity() + getFormat().PAGE_SIZE - 8);
newBuffer.put(tableBuffer);
return newBuffer;
}
private void readColumnDefinitions(ByteBuffer tableBuffer, short columnCount)
throws IOException
{
int colOffset = getFormat().OFFSET_INDEX_DEF_BLOCK +
_indexCount * getFormat().SIZE_INDEX_DEFINITION;
tableBuffer.position(colOffset +
(columnCount * getFormat().SIZE_COLUMN_HEADER));
List colNames = new ArrayList(columnCount);
for (int i = 0; i < columnCount; i++) {
colNames.add(readName(tableBuffer));
}
int dispIndex = 0;
for (int i = 0; i < columnCount; i++) {
ColumnImpl column = ColumnImpl.create(this, tableBuffer,
colOffset + (i * getFormat().SIZE_COLUMN_HEADER), colNames.get(i),
dispIndex++);
_columns.add(column);
if(column.isVariableLength()) {
// also shove it in the variable columns list, which is ordered
// differently from the _columns list
_varColumns.add(column);
}
}
Collections.sort(_columns);
initAutoNumberColumns();
// setup the data index for the columns
int colIdx = 0;
for(ColumnImpl col : _columns) {
col.setColumnIndex(colIdx++);
}
// sort variable length columns based on their index into the variable
// length offset table, because we will write the columns in this order
Collections.sort(_varColumns, VAR_LEN_COLUMN_COMPARATOR);
}
private void readIndexDefinitions(ByteBuffer tableBuffer) throws IOException
{
// read index column information
for (int i = 0; i < _indexCount; i++) {
IndexData idxData = _indexDatas.get(i);
idxData.read(tableBuffer, _columns);
// keep track of all columns involved in indexes
for(IndexData.ColumnDescriptor iCol : idxData.getColumns()) {
_indexColumns.add(iCol.getColumn());
}
}
// read logical index info (may be more logical indexes than index datas)
for (int i = 0; i < _logicalIndexCount; i++) {
_indexes.add(new IndexImpl(tableBuffer, _indexDatas, getFormat()));
}
// read logical index names
for (int i = 0; i < _logicalIndexCount; i++) {
_indexes.get(i).setName(readName(tableBuffer));
}
Collections.sort(_indexes);
}
private boolean readColumnUsageMaps(ByteBuffer tableBuffer)
throws IOException
{
short umapColNum = tableBuffer.getShort();
if(umapColNum == IndexData.COLUMN_UNUSED) {
return false;
}
int pos = tableBuffer.position();
UsageMap colOwnedPages = null;
UsageMap colFreeSpacePages = null;
try {
colOwnedPages = UsageMap.read(getDatabase(), tableBuffer, false);
colFreeSpacePages = UsageMap.read(getDatabase(), tableBuffer, false);
} catch(IllegalStateException e) {
// ignore invalid usage map info
colOwnedPages = null;
colFreeSpacePages = null;
tableBuffer.position(pos + 8);
LOG.warn(withErrorContext("Invalid column " + umapColNum +
" usage map definition: " + e));
}
for(ColumnImpl col : _columns) {
if(col.getColumnNumber() == umapColNum) {
col.setUsageMaps(colOwnedPages, colFreeSpacePages);
break;
}
}
return true;
}
/**
* Writes the given page data to the given page number, clears any other
* relevant buffers.
*/
private void writeDataPage(ByteBuffer pageBuffer, int pageNumber)
throws IOException
{
// write the page data
getPageChannel().writePage(pageBuffer, pageNumber);
// possibly invalidate the add row buffer if a different data buffer is
// being written (e.g. this happens during deleteRow)
_addRowBufferH.possiblyInvalidate(pageNumber, pageBuffer);
// update modification count so any active RowStates can keep themselves
// up-to-date
++_modCount;
}
/**
* Returns a name read from the buffer at the current position. The
* expected name format is the name length followed by the name
* encoded using the {@link JetFormat#CHARSET}
*/
private String readName(ByteBuffer buffer) {
int nameLength = readNameLength(buffer);
byte[] nameBytes = ByteUtil.getBytes(buffer, nameLength);
return ColumnImpl.decodeUncompressedText(nameBytes,
getDatabase().getCharset());
}
/**
* Returns a name length read from the buffer at the current position.
*/
private int readNameLength(ByteBuffer buffer) {
return ByteUtil.getUnsignedVarInt(buffer, getFormat().SIZE_NAME_LENGTH);
}
public Object[] asRow(Map rowMap) {
return asRow(rowMap, null, false);
}
/**
* Converts a map of columnName -> columnValue to an array of row values
* appropriate for a call to {@link #addRow(Object...)}, where the generated
* RowId will be an extra value at the end of the array.
* @see ColumnImpl#RETURN_ROW_ID
* @usage _intermediate_method_
*/
public Object[] asRowWithRowId(Map rowMap) {
return asRow(rowMap, null, true);
}
public Object[] asUpdateRow(Map rowMap) {
return asRow(rowMap, Column.KEEP_VALUE, false);
}
/**
* @return the generated RowId added to a row of values created via {@link
* #asRowWithRowId}
* @usage _intermediate_method_
*/
public RowId getRowId(Object[] row) {
return (RowId)row[_columns.size()];
}
/**
* Converts a map of columnName -> columnValue to an array of row values.
*/
private Object[] asRow(Map rowMap, Object defaultValue,
boolean returnRowId)
{
int len = _columns.size();
if(returnRowId) {
++len;
}
Object[] row = new Object[len];
if(defaultValue != null) {
Arrays.fill(row, defaultValue);
}
if(returnRowId) {
row[len - 1] = ColumnImpl.RETURN_ROW_ID;
}
if(rowMap == null) {
return row;
}
for(ColumnImpl col : _columns) {
if(rowMap.containsKey(col.getName())) {
col.setRowValue(row, col.getRowValue(rowMap));
}
}
return row;
}
public Object[] addRow(Object... row) throws IOException {
return addRows(Collections.singletonList(row), false).get(0);
}
public > M addRowFromMap(M row)
throws IOException
{
Object[] rowValues = asRow(row);
addRow(rowValues);
returnRowValues(row, rowValues, _autoNumColumns);
return row;
}
public List extends Object[]> addRows(List extends Object[]> rows)
throws IOException
{
return addRows(rows, true);
}
public > List addRowsFromMaps(List rows)
throws IOException
{
List