diff options
author | James Ahlborn <jtahlborn@yahoo.com> | 2013-06-14 18:06:10 +0000 |
---|---|---|
committer | James Ahlborn <jtahlborn@yahoo.com> | 2013-06-14 18:06:10 +0000 |
commit | 1f914eacafab90bd13fa53eb6306ced2de72105f (patch) | |
tree | 31b5394aed35e2d6c76910864bf78da4e43fc706 | |
parent | 20e18a5a4f8607cb27aa08dc350225f3ca3d2135 (diff) | |
download | jackcess-1f914eacafab90bd13fa53eb6306ced2de72105f.tar.gz jackcess-1f914eacafab90bd13fa53eb6306ced2de72105f.zip |
use logical write operations for auto-sync to make writing more efficient
git-svn-id: https://svn.code.sf.net/p/jackcess/code/jackcess/branches/jackcess-2@736 f203690c-595d-4dc9-a70b-905162fa7fd2
7 files changed, 285 insertions, 217 deletions
@@ -50,6 +50,8 @@ Refactor goals: - remove unnecessary iterator class from impl classes? (what does this mean?) - change CodecHandler usage to handle not-inline decoding - implement page buffering in PageChannel + * need to implement logical flushing in update code (startUpdate/finishUpdate) +- limit size of IndexPageCache? * public api final cleanup: * Database diff --git a/src/java/com/healthmarketscience/jackcess/DatabaseBuilder.java b/src/java/com/healthmarketscience/jackcess/DatabaseBuilder.java index d338b83..e9ea26e 100644 --- a/src/java/com/healthmarketscience/jackcess/DatabaseBuilder.java +++ b/src/java/com/healthmarketscience/jackcess/DatabaseBuilder.java @@ -85,12 +85,13 @@ public class DatabaseBuilder /** * Sets whether or not to enable auto-syncing on write. if {@code true}, - * writes will be immediately flushed to disk. This leaves the database in - * a (fairly) consistent state on each write, but can be very inefficient - * for many updates. if {@code false}, flushing to disk happens at the - * jvm's leisure, which can be much faster, but may leave the database in an - * inconsistent state if failures are encountered during writing. Writes - * may be flushed at any time using {@link Database#flush}. + * write operations will be immediately flushed to disk upon completion. + * This leaves the database in a (fairly) consistent state on each write, + * but can be very inefficient for many updates. if {@code false}, flushing + * to disk happens at the jvm's leisure, which can be much faster, but may + * leave the database in an inconsistent state if failures are encountered + * during writing. Writes may be flushed at any time using {@link + * Database#flush}. * @usage _intermediate_method_ */ public DatabaseBuilder setAutoSync(boolean autoSync) { diff --git a/src/java/com/healthmarketscience/jackcess/impl/DatabaseImpl.java b/src/java/com/healthmarketscience/jackcess/impl/DatabaseImpl.java index ee2b5ab..dafa0c5 100644 --- a/src/java/com/healthmarketscience/jackcess/impl/DatabaseImpl.java +++ b/src/java/com/healthmarketscience/jackcess/impl/DatabaseImpl.java @@ -956,10 +956,17 @@ public class DatabaseImpl implements Database validateIdentifierName(linkedTableName, getFormat().MAX_TABLE_NAME_LENGTH, "linked table"); - int linkedTableId = _tableFinder.getNextFreeSyntheticId(); + getPageChannel().startWrite(); + try { + + int linkedTableId = _tableFinder.getNextFreeSyntheticId(); + + addNewTable(name, linkedTableId, TYPE_LINKED_TABLE, linkedDbName, + linkedTableName); - addNewTable(name, linkedTableId, TYPE_LINKED_TABLE, linkedDbName, - linkedTableName); + } finally { + getPageChannel().finishWrite(); + } } /** diff --git a/src/java/com/healthmarketscience/jackcess/impl/IndexPageCache.java b/src/java/com/healthmarketscience/jackcess/impl/IndexPageCache.java index 4559988..ef585db 100644 --- a/src/java/com/healthmarketscience/jackcess/impl/IndexPageCache.java +++ b/src/java/com/healthmarketscience/jackcess/impl/IndexPageCache.java @@ -575,7 +575,7 @@ public class IndexPageCache * @throws IllegalStateException if the entry type does not match the page * type */ - private void validateEntryForPage(DataPageMain dpMain, Entry entry) { + private static void validateEntryForPage(DataPageMain dpMain, Entry entry) { if(dpMain._leaf != entry.isLeafEntry()) { throw new IllegalStateException( "Trying to update page with wrong entry type; pageLeaf " + @@ -978,7 +978,7 @@ public class IndexPageCache * * @param dpExtra the entries to validate */ - private void validateEntries(DataPageExtra dpExtra) throws IOException { + private static void validateEntries(DataPageExtra dpExtra) throws IOException { int entrySize = 0; Entry prevEntry = IndexData.FIRST_ENTRY; for(Entry e : dpExtra._entries) { @@ -1019,7 +1019,7 @@ public class IndexPageCache DataPageMain childMain = _dataPages.get(subPageNumber); if(childMain != null) { if(childMain._parentPageNumber != null) { - if((int)childMain._parentPageNumber != dpMain._pageNumber) { + if(childMain._parentPageNumber != dpMain._pageNumber) { throw new IllegalStateException("Child's parent is incorrect " + childMain); } @@ -1069,7 +1069,7 @@ public class IndexPageCache * @param dpMain the index page * @param peerMain the peer index page */ - private void validatePeerStatus(DataPageMain dpMain, DataPageMain peerMain) + private static void validatePeerStatus(DataPageMain dpMain, DataPageMain peerMain) throws IOException { if(dpMain._leaf != peerMain._leaf) { @@ -1289,7 +1289,7 @@ public class IndexPageCache /** * IndexPageCache implementation of an Index {@link DataPage}. */ - public static final class CacheDataPage + private static final class CacheDataPage extends IndexData.DataPage { public final DataPageMain _main; @@ -1460,10 +1460,6 @@ public class IndexPageCache _childTailEntry = newEntry; return old; } - - public Entry getChildTailEntry() { - return _childTailEntry; - } private boolean hasChildTail() { return(_childTailEntry != null); diff --git a/src/java/com/healthmarketscience/jackcess/impl/PageChannel.java b/src/java/com/healthmarketscience/jackcess/impl/PageChannel.java index 79dff31..03611da 100644 --- a/src/java/com/healthmarketscience/jackcess/impl/PageChannel.java +++ b/src/java/com/healthmarketscience/jackcess/impl/PageChannel.java @@ -81,6 +81,7 @@ public class PageChannel implements Channel, Flushable { /** temp page buffer used when pages cannot be partially encoded */ private final TempPageHolder _fullPageEncodeBufferH = TempPageHolder.newHolder(TempBufferHolder.Type.SOFT); + private int _writeCount; /** * Only used by unit tests @@ -133,6 +134,28 @@ public class PageChannel implements Channel, Flushable { } /** + * Begins a "logical" write operation. See {@link #finishWrite} for more + * details. + */ + public void startWrite() { + ++_writeCount; + } + + /** + * Completes a "logical" write operation. This method should be called in + * finally block which wraps a logical write operation (which is preceded by + * a {@link #startWrite} call). Logical write operations may be nested. If + * the database is configured for "auto-sync", the channel will be flushed + * when the outermost operation is complete, + */ + public void finishWrite() throws IOException { + assertWriting(); + if((--_writeCount == 0) && _autoSync) { + flush(); + } + } + + /** * Returns the next page number based on the given file size. */ private int getNextPageNumber(long size) { @@ -203,6 +226,7 @@ public class PageChannel implements Channel, Flushable { public void writePage(ByteBuffer page, int pageNumber, int pageOffset) throws IOException { + assertWriting(); validatePageNumber(pageNumber); page.rewind().position(pageOffset); @@ -253,9 +277,6 @@ public class PageChannel implements Channel, Flushable { try { _channel.write(encodedPage, (getPageOffset(pageNumber) + pageOffset)); - if(_autoSync) { - flush(); - } } finally { if(pageNumber == 0) { // de-mask header @@ -269,6 +290,8 @@ public class PageChannel implements Channel, Flushable { * until it is written in a call to {@link #writePage(ByteBuffer,int)}. */ public int allocateNewPage() throws IOException { + assertWriting(); + // this will force the file to be extended with mostly undefined bytes long size = _channel.size(); if(size >= getFormat().MAX_DATABASE_SIZE) { @@ -303,6 +326,8 @@ public class PageChannel implements Channel, Flushable { * Deallocate a previously used page in the database. */ public void deallocatePage(int pageNumber) throws IOException { + assertWriting(); + validatePageNumber(pageNumber); // don't write the whole page, just wipe out the header (which should be @@ -364,6 +389,15 @@ public class PageChannel implements Channel, Flushable { } /** + * Asserts that a write operation is in progress. + */ + private void assertWriting() { + if(_writeCount <= 0) { + throw new IllegalStateException("No write operation in progress"); + } + } + + /** * @return a duplicate of the current buffer narrowed to the given position * and limit. mark will be set at the current position. */ diff --git a/src/java/com/healthmarketscience/jackcess/impl/TableCreator.java b/src/java/com/healthmarketscience/jackcess/impl/TableCreator.java index 194074f..2311d36 100644 --- a/src/java/com/healthmarketscience/jackcess/impl/TableCreator.java +++ b/src/java/com/healthmarketscience/jackcess/impl/TableCreator.java @@ -129,15 +129,22 @@ class TableCreator } } - // reserve some pages - _tdefPageNumber = reservePageNumber(); - _umapPageNumber = reservePageNumber(); + getPageChannel().startWrite(); + try { + + // reserve some pages + _tdefPageNumber = reservePageNumber(); + _umapPageNumber = reservePageNumber(); - //Write the tdef page to disk. - TableImpl.writeTableDefinition(this); + //Write the tdef page to disk. + TableImpl.writeTableDefinition(this); - // update the database with the new table info - _database.addNewTable(_name, _tdefPageNumber, DatabaseImpl.TYPE_TABLE, null, null); + // update the database with the new table info + _database.addNewTable(_name, _tdefPageNumber, DatabaseImpl.TYPE_TABLE, null, null); + + } finally { + getPageChannel().finishWrite(); + } } /** diff --git a/src/java/com/healthmarketscience/jackcess/impl/TableImpl.java b/src/java/com/healthmarketscience/jackcess/impl/TableImpl.java index 6d26014..f69e8dd 100644 --- a/src/java/com/healthmarketscience/jackcess/impl/TableImpl.java +++ b/src/java/com/healthmarketscience/jackcess/impl/TableImpl.java @@ -505,54 +505,61 @@ public class TableImpl implements Table { requireValidRowId(rowId); - // ensure that the relevant row state is up-to-date - ByteBuffer rowBuffer = positionAtRowHeader(rowState, rowId); + getPageChannel().startWrite(); + try { + + // ensure that the relevant row state is up-to-date + ByteBuffer rowBuffer = positionAtRowHeader(rowState, rowId); - if(rowState.isDeleted()) { - // don't care about duplicate deletion - return; - } - requireNonDeletedRow(rowState, rowId); + if(rowState.isDeleted()) { + // don't care about duplicate deletion + return; + } + requireNonDeletedRow(rowState, rowId); - // delete flag always gets set in the "header" row (even if data is on - // overflow row) - int pageNumber = rowState.getHeaderRowId().getPageNumber(); - int rowNumber = rowState.getHeaderRowId().getRowNumber(); + // delete flag always gets set in the "header" row (even if data is on + // overflow row) + int pageNumber = rowState.getHeaderRowId().getPageNumber(); + int rowNumber = rowState.getHeaderRowId().getRowNumber(); - // attempt to fill in index column values - Object[] rowValues = null; - if(!_indexDatas.isEmpty()) { + // attempt to fill in index column values + Object[] rowValues = null; + if(!_indexDatas.isEmpty()) { - // move to row data to get index values - rowBuffer = positionAtRowData(rowState, rowId); + // move to row data to get index values + rowBuffer = positionAtRowData(rowState, rowId); - for(ColumnImpl idxCol : _indexColumns) { - getRowColumn(getFormat(), rowBuffer, idxCol, rowState, null); - } + for(ColumnImpl idxCol : _indexColumns) { + getRowColumn(getFormat(), rowBuffer, idxCol, rowState, null); + } - // use any read rowValues to help update the indexes - rowValues = rowState.getRowValues(); + // use any read rowValues to help update the indexes + rowValues = rowState.getRowValues(); - // check foreign keys before proceeding w/ deletion - _fkEnforcer.deleteRow(rowValues); + // check foreign keys before proceeding w/ deletion + _fkEnforcer.deleteRow(rowValues); - // move back to the header - rowBuffer = positionAtRowHeader(rowState, rowId); - } + // move back to the header + rowBuffer = positionAtRowHeader(rowState, rowId); + } - // finally, pull the trigger - int rowIndex = getRowStartOffset(rowNumber, getFormat()); - rowBuffer.putShort(rowIndex, (short)(rowBuffer.getShort(rowIndex) - | DELETED_ROW_MASK | OVERFLOW_ROW_MASK)); - writeDataPage(rowBuffer, pageNumber); + // finally, pull the trigger + int rowIndex = getRowStartOffset(rowNumber, getFormat()); + rowBuffer.putShort(rowIndex, (short)(rowBuffer.getShort(rowIndex) + | DELETED_ROW_MASK | OVERFLOW_ROW_MASK)); + writeDataPage(rowBuffer, pageNumber); - // update the indexes - for(IndexData indexData : _indexDatas) { - indexData.deleteRow(rowValues, rowId); - } + // update the indexes + for(IndexData indexData : _indexDatas) { + indexData.deleteRow(rowValues, rowId); + } - // make sure table def gets updated - updateTableDefinition(-1); + // make sure table def gets updated + updateTableDefinition(-1); + + } finally { + getPageChannel().finishWrite(); + } } public Row getNextRow() throws IOException { @@ -1387,77 +1394,84 @@ public class TableImpl implements Table return rows; } - List<Object[]> dupeRows = null; - ByteBuffer[] rowData = new ByteBuffer[rows.size()]; - int numCols = _columns.size(); - for (int i = 0; i < rows.size(); i++) { - - // we need to make sure the row is the right length and is an Object[] - // (fill with null if too short). note, if the row is copied the caller - // will not be able to access any generated auto-number value, but if - // they need that info they should use a row array of the right - // size/type! - Object[] row = rows.get(i); - if((row.length < numCols) || (row.getClass() != Object[].class)) { - row = dupeRow(row, numCols); - // copy the input rows to a modifiable list so we can update the - // elements - if(dupeRows == null) { - dupeRows = new ArrayList<Object[]>(rows); - rows = dupeRows; + getPageChannel().startWrite(); + try { + + List<Object[]> dupeRows = null; + ByteBuffer[] rowData = new ByteBuffer[rows.size()]; + int numCols = _columns.size(); + for (int i = 0; i < rows.size(); i++) { + + // we need to make sure the row is the right length and is an Object[] + // (fill with null if too short). note, if the row is copied the caller + // will not be able to access any generated auto-number value, but if + // they need that info they should use a row array of the right + // size/type! + Object[] row = rows.get(i); + if((row.length < numCols) || (row.getClass() != Object[].class)) { + row = dupeRow(row, numCols); + // copy the input rows to a modifiable list so we can update the + // elements + if(dupeRows == null) { + dupeRows = new ArrayList<Object[]>(rows); + rows = dupeRows; + } + // we copied the row, so put the copy back into the rows list + dupeRows.set(i, row); } - // we copied the row, so put the copy back into the rows list - dupeRows.set(i, row); - } - // fill in autonumbers - handleAutoNumbersForAdd(row); + // fill in autonumbers + handleAutoNumbersForAdd(row); - // write the row of data to a temporary buffer - rowData[i] = createRow(row, - writeRowBufferH.getPageBuffer(getPageChannel())); + // write the row of data to a temporary buffer + rowData[i] = createRow(row, + writeRowBufferH.getPageBuffer(getPageChannel())); - if (rowData[i].limit() > getFormat().MAX_ROW_SIZE) { - throw new IOException("Row size " + rowData[i].limit() + - " is too large"); + if (rowData[i].limit() > getFormat().MAX_ROW_SIZE) { + throw new IOException("Row size " + rowData[i].limit() + + " is too large"); + } } - } - ByteBuffer dataPage = null; - int pageNumber = PageChannel.INVALID_PAGE_NUMBER; + ByteBuffer dataPage = null; + int pageNumber = PageChannel.INVALID_PAGE_NUMBER; - for (int i = 0; i < rowData.length; i++) { - int rowSize = rowData[i].remaining(); - Object[] row = rows.get(i); + for (int i = 0; i < rowData.length; i++) { + int rowSize = rowData[i].remaining(); + Object[] row = rows.get(i); - // handle foreign keys before adding to table - _fkEnforcer.addRow(row); + // handle foreign keys before adding to table + _fkEnforcer.addRow(row); - // get page with space - dataPage = findFreeRowSpace(rowSize, dataPage, pageNumber); - pageNumber = _addRowBufferH.getPageNumber(); + // get page with space + dataPage = findFreeRowSpace(rowSize, dataPage, pageNumber); + pageNumber = _addRowBufferH.getPageNumber(); - // write out the row data - int rowNum = addDataPageRow(dataPage, rowSize, getFormat(), 0); - dataPage.put(rowData[i]); + // write out the row data + int rowNum = addDataPageRow(dataPage, rowSize, getFormat(), 0); + dataPage.put(rowData[i]); - // update the indexes - RowIdImpl rowId = new RowIdImpl(pageNumber, rowNum); - for(IndexData indexData : _indexDatas) { - indexData.addRow(row, rowId); - } + // update the indexes + RowIdImpl rowId = new RowIdImpl(pageNumber, rowNum); + for(IndexData indexData : _indexDatas) { + indexData.addRow(row, rowId); + } - // return rowTd if desired - if((row.length > numCols) && (row[numCols] == ColumnImpl.RETURN_ROW_ID)) { - row[numCols] = rowId; + // return rowTd if desired + if((row.length > numCols) && (row[numCols] == ColumnImpl.RETURN_ROW_ID)) { + row[numCols] = rowId; + } } - } - writeDataPage(dataPage, pageNumber); + writeDataPage(dataPage, pageNumber); - // Update tdef page - updateTableDefinition(rows.size()); + // Update tdef page + updateTableDefinition(rows.size()); + } finally { + getPageChannel().finishWrite(); + } + return rows; } @@ -1496,128 +1510,135 @@ public class TableImpl implements Table { requireValidRowId(rowId); - // ensure that the relevant row state is up-to-date - ByteBuffer rowBuffer = positionAtRowData(rowState, rowId); - int oldRowSize = rowBuffer.remaining(); - - requireNonDeletedRow(rowState, rowId); + getPageChannel().startWrite(); + try { + + // ensure that the relevant row state is up-to-date + ByteBuffer rowBuffer = positionAtRowData(rowState, rowId); + int oldRowSize = rowBuffer.remaining(); - // we need to make sure the row is the right length & type (fill with - // null if too short). - if((row.length < _columns.size()) || (row.getClass() != Object[].class)) { - row = dupeRow(row, _columns.size()); - } + requireNonDeletedRow(rowState, rowId); - // hang on to the raw values of var length columns we are "keeping". this - // will allow us to re-use pre-written var length data, which can save - // space for things like long value columns. - Map<ColumnImpl,byte[]> keepRawVarValues = - (!_varColumns.isEmpty() ? new HashMap<ColumnImpl,byte[]>() : null); + // we need to make sure the row is the right length & type (fill with + // null if too short). + if((row.length < _columns.size()) || (row.getClass() != Object[].class)) { + row = dupeRow(row, _columns.size()); + } - for(ColumnImpl column : _columns) { - if(_autoNumColumns.contains(column)) { - // fill in any auto-numbers (we don't allow autonumber values to be - // modified) - column.setRowValue(row, getRowColumn(getFormat(), rowBuffer, column, - rowState, null)); - } else if(column.getRowValue(row) == Column.KEEP_VALUE) { - // fill in any "keep value" fields - column.setRowValue(row, getRowColumn(getFormat(), rowBuffer, column, - rowState, keepRawVarValues)); - } else if(_indexColumns.contains(column)) { - // read row value to help update indexes - getRowColumn(getFormat(), rowBuffer, column, rowState, null); + // hang on to the raw values of var length columns we are "keeping". this + // will allow us to re-use pre-written var length data, which can save + // space for things like long value columns. + Map<ColumnImpl,byte[]> keepRawVarValues = + (!_varColumns.isEmpty() ? new HashMap<ColumnImpl,byte[]>() : null); + + for(ColumnImpl column : _columns) { + if(_autoNumColumns.contains(column)) { + // fill in any auto-numbers (we don't allow autonumber values to be + // modified) + column.setRowValue(row, getRowColumn(getFormat(), rowBuffer, column, + rowState, null)); + } else if(column.getRowValue(row) == Column.KEEP_VALUE) { + // fill in any "keep value" fields + column.setRowValue(row, getRowColumn(getFormat(), rowBuffer, column, + rowState, keepRawVarValues)); + } else if(_indexColumns.contains(column)) { + // read row value to help update indexes + getRowColumn(getFormat(), rowBuffer, column, rowState, null); + } } - } - // generate new row bytes - ByteBuffer newRowData = createRow( - row, _singleRowBufferH.getPageBuffer(getPageChannel()), oldRowSize, - keepRawVarValues); + // generate new row bytes + ByteBuffer newRowData = createRow( + row, _singleRowBufferH.getPageBuffer(getPageChannel()), oldRowSize, + keepRawVarValues); - if (newRowData.limit() > getFormat().MAX_ROW_SIZE) { - throw new IOException("Row size " + newRowData.limit() + - " is too large"); - } + if (newRowData.limit() > getFormat().MAX_ROW_SIZE) { + throw new IOException("Row size " + newRowData.limit() + + " is too large"); + } - if(!_indexDatas.isEmpty()) { + if(!_indexDatas.isEmpty()) { - Object[] oldRowValues = rowState.getRowValues(); + Object[] oldRowValues = rowState.getRowValues(); - // check foreign keys before actually updating - _fkEnforcer.updateRow(oldRowValues, row); + // check foreign keys before actually updating + _fkEnforcer.updateRow(oldRowValues, row); - // delete old values from indexes - for(IndexData indexData : _indexDatas) { - indexData.deleteRow(oldRowValues, rowId); + // delete old values from indexes + for(IndexData indexData : _indexDatas) { + indexData.deleteRow(oldRowValues, rowId); + } } - } - // see if we can squeeze the new row data into the existing row - rowBuffer.reset(); - int rowSize = newRowData.remaining(); + // see if we can squeeze the new row data into the existing row + rowBuffer.reset(); + int rowSize = newRowData.remaining(); - ByteBuffer dataPage = null; - int pageNumber = PageChannel.INVALID_PAGE_NUMBER; + ByteBuffer dataPage = null; + int pageNumber = PageChannel.INVALID_PAGE_NUMBER; - if(oldRowSize >= rowSize) { + if(oldRowSize >= rowSize) { - // awesome, slap it in! - rowBuffer.put(newRowData); + // awesome, slap it in! + rowBuffer.put(newRowData); - // grab the page we just updated - dataPage = rowState.getFinalPage(); - pageNumber = rowState.getFinalRowId().getPageNumber(); + // grab the page we just updated + dataPage = rowState.getFinalPage(); + pageNumber = rowState.getFinalRowId().getPageNumber(); - } else { + } else { + + // bummer, need to find a new page for the data + dataPage = findFreeRowSpace(rowSize, null, + PageChannel.INVALID_PAGE_NUMBER); + pageNumber = _addRowBufferH.getPageNumber(); - // bummer, need to find a new page for the data - dataPage = findFreeRowSpace(rowSize, null, - PageChannel.INVALID_PAGE_NUMBER); - pageNumber = _addRowBufferH.getPageNumber(); + RowIdImpl headerRowId = rowState.getHeaderRowId(); + ByteBuffer headerPage = rowState.getHeaderPage(); + if(pageNumber == headerRowId.getPageNumber()) { + // new row is on the same page as header row, share page + dataPage = headerPage; + } - RowIdImpl headerRowId = rowState.getHeaderRowId(); - ByteBuffer headerPage = rowState.getHeaderPage(); - if(pageNumber == headerRowId.getPageNumber()) { - // new row is on the same page as header row, share page - dataPage = headerPage; + // write out the new row data (set the deleted flag on the new data row + // so that it is ignored during normal table traversal) + int rowNum = addDataPageRow(dataPage, rowSize, getFormat(), + DELETED_ROW_MASK); + dataPage.put(newRowData); + + // write the overflow info into the header row and clear out the + // remaining header data + rowBuffer = PageChannel.narrowBuffer( + headerPage, + findRowStart(headerPage, headerRowId.getRowNumber(), getFormat()), + findRowEnd(headerPage, headerRowId.getRowNumber(), getFormat())); + rowBuffer.put((byte)rowNum); + ByteUtil.put3ByteInt(rowBuffer, pageNumber); + ByteUtil.clearRemaining(rowBuffer); + + // set the overflow flag on the header row + int headerRowIndex = getRowStartOffset(headerRowId.getRowNumber(), + getFormat()); + headerPage.putShort(headerRowIndex, + (short)(headerPage.getShort(headerRowIndex) + | OVERFLOW_ROW_MASK)); + if(pageNumber != headerRowId.getPageNumber()) { + writeDataPage(headerPage, headerRowId.getPageNumber()); + } } - // write out the new row data (set the deleted flag on the new data row - // so that it is ignored during normal table traversal) - int rowNum = addDataPageRow(dataPage, rowSize, getFormat(), - DELETED_ROW_MASK); - dataPage.put(newRowData); - - // write the overflow info into the header row and clear out the - // remaining header data - rowBuffer = PageChannel.narrowBuffer( - headerPage, - findRowStart(headerPage, headerRowId.getRowNumber(), getFormat()), - findRowEnd(headerPage, headerRowId.getRowNumber(), getFormat())); - rowBuffer.put((byte)rowNum); - ByteUtil.put3ByteInt(rowBuffer, pageNumber); - ByteUtil.clearRemaining(rowBuffer); - - // set the overflow flag on the header row - int headerRowIndex = getRowStartOffset(headerRowId.getRowNumber(), - getFormat()); - headerPage.putShort(headerRowIndex, - (short)(headerPage.getShort(headerRowIndex) - | OVERFLOW_ROW_MASK)); - if(pageNumber != headerRowId.getPageNumber()) { - writeDataPage(headerPage, headerRowId.getPageNumber()); + // update the indexes + for(IndexData indexData : _indexDatas) { + indexData.addRow(row, rowId); } - } - // update the indexes - for(IndexData indexData : _indexDatas) { - indexData.addRow(row, rowId); - } + writeDataPage(dataPage, pageNumber); - writeDataPage(dataPage, pageNumber); + updateTableDefinition(0); - updateTableDefinition(0); + } finally { + getPageChannel().finishWrite(); + } return row; } |