</properties>
<body>
<release version="2.0.2" date="TBD">
+ <action dev="jahlborn" type="fix" system="SourceForge2" issue="99">
+ Rework row add/update so that constraint violations do not leave
+ behind partially written rows.
+ </action>
<action dev="jahlborn" type="update">
Add ConstraintViolationException to distinguish exceptions due to
violating database constraints from other random errors.
--- /dev/null
+/*
+Copyright (c) 2013 James Ahlborn
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 2.1 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Lesser General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with this library; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+USA
+*/
+
+package com.healthmarketscience.jackcess;
+
+/**
+ * JackcessException which is thrown from multi-add-row {@link Table} methods
+ * which indicates how many rows were successfully written before the
+ * underlying failure was encountered.
+ *
+ * @author James Ahlborn
+ */
+public class BatchUpdateException extends JackcessException
+{
+ private static final long serialVersionUID = 20131123L;
+
+ private final int _updateCount;
+
+ public BatchUpdateException(int updateCount, Throwable cause) {
+ super(cause);
+ _updateCount = updateCount;
+ }
+
+ public int getUpdateCount() {
+ return _updateCount;
+ }
+}
package com.healthmarketscience.jackcess;
-import java.io.IOException;
-
/**
- * IOException which indicates that the failure was caused by a database
+ * JackcessException which indicates that the failure was caused by a database
* constraint violation.
*
* @author James Ahlborn
*/
-public class ConstraintViolationException extends IOException
+public class ConstraintViolationException extends JackcessException
{
- private static final long serialVersionUID = 20131113L;
+ private static final long serialVersionUID = 20131123L;
public ConstraintViolationException(String msg) {
super(msg);
--- /dev/null
+/*
+Copyright (c) 2013 James Ahlborn
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 2.1 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Lesser General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with this library; if not, write to the Free Software
+Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+USA
+*/
+
+package com.healthmarketscience.jackcess;
+
+import java.io.IOException;
+
+/**
+ * Base class for specific exceptions thrown by Jackcess.
+ *
+ * @author James Ahlborn
+ */
+public class JackcessException extends IOException
+{
+ private static final long serialVersionUID = 20131123L;
+
+ public JackcessException(String message) {
+ super(message);
+ }
+
+ public JackcessException(Throwable cause) {
+ super(cause);
+ }
+
+ public JackcessException(String message, Throwable cause) {
+ super(message, cause);
+ }
+}
* Note, if this table has an auto-number column, the values written will be
* put back into the given row arrays (assuming the given row array is at
* least as long as the number of Columns in this Table).
+ * <p>
+ * Most exceptions thrown from this method will be wrapped with a {@link
+ * BatchUpdateException} which gives useful information in the case of a
+ * partially successful write.
*
* @see #addRow(Object...) for more details on row arrays
*
* <p/>
* Note, if this table has an auto-number column, the values generated will
* be put back into the appropriate row maps.
+ * <p>
+ * Most exceptions thrown from this method will be wrapped with a {@link
+ * BatchUpdateException} which gives useful information in the case of a
+ * partially successful write.
+ *
* @return the given row map list, where the row maps will contain any
* autonumbers generated
* @usage _general_method_
}
/**
- * Adds a row to this index
+ * Prepares to add a row to this index. All constraints are checked before
+ * this method returns.
* <p>
* Forces index initialization.
*
* @param row Row to add
* @param rowId rowId of the row to be added
+ *
+ * @return a PendingChange which can complete the addition or roll it back
*/
- public void addRow(Object[] row, RowIdImpl rowId)
+ public PendingChange prepareAddRow(Object[] row, RowIdImpl rowId,
+ PendingChange nextChange)
+ throws IOException
+ {
+ return prepareAddRow(row, rowId, new AddRowPendingChange(nextChange));
+ }
+
+ private PendingChange prepareAddRow(Object[] row, RowIdImpl rowId,
+ AddRowPendingChange change)
throws IOException
{
int nullCount = countNullValues(row);
boolean isNullEntry = (nullCount == _columns.size());
if(shouldIgnoreNulls() && isNullEntry) {
// nothing to do
- return;
+ return change;
}
if(isBackingPrimaryKey() && (nullCount > 0)) {
throw new ConstraintViolationException(
// make sure we've parsed the entries
initialize();
- Entry newEntry = new Entry(createEntryBytes(row), rowId);
- if(addEntry(newEntry, isNullEntry, row)) {
- ++_modCount;
- } else {
- LOG.warn("Added duplicate index entry " + newEntry + " for row: " +
- Arrays.asList(row));
- }
+ return prepareAddEntry(new Entry(createEntryBytes(row), rowId), isNullEntry,
+ row, change);
}
/**
* Adds an entry to the correct index dataPage, maintaining the order.
*/
- private boolean addEntry(Entry newEntry, boolean isNullEntry, Object[] row)
+ private PendingChange prepareAddEntry(Entry newEntry, boolean isNullEntry,
+ Object[] row, AddRowPendingChange change)
throws IOException
{
DataPage dataPage = findDataPage(newEntry);
int idx = dataPage.findEntry(newEntry);
if(idx < 0) {
+
// this is a new entry
idx = missingIndexToInsertionPoint(idx);
" violates uniqueness constraint for index " + this);
}
+ change.setAddRow(newEntry, dataPage, idx, isDupeEntry);
+ }
+ return change;
+ }
+
+ /**
+ * Completes a prepared row addition.
+ */
+ private void commitAddRow(Entry newEntry, DataPage dataPage, int idx,
+ boolean isDupeEntry)
+ throws IOException
+ {
+ if(newEntry != null) {
+ dataPage.addEntry(idx, newEntry);
if(!isDupeEntry) {
++_uniqueEntryCount;
}
+ ++_modCount;
+ } else {
+ LOG.warn("Added duplicate index entry " + newEntry);
+ }
+ }
- dataPage.addEntry(idx, newEntry);
- return true;
+ /**
+ * Prepares to update a row in this index. All constraints are checked
+ * before this method returns.
+ * <p>
+ * Forces index initialization.
+ *
+ * @param oldRow Row to be removed
+ * @param newRow Row to be added
+ * @param rowId rowId of the row to be updated
+ *
+ * @return a PendingChange which can complete the update or roll it back
+ */
+ public PendingChange prepareUpdateRow(Object[] oldRow, RowIdImpl rowId,
+ Object[] newRow,
+ PendingChange nextChange)
+ throws IOException
+ {
+ UpdateRowPendingChange change = new UpdateRowPendingChange(nextChange);
+ change.setDeletedRow(deleteRowImpl(oldRow, rowId));
+
+ try {
+ prepareAddRow(newRow, rowId, change);
+ return change;
+ } catch(ConstraintViolationException e) {
+ // need to undo the deletion before bailing
+ change.rollback();
+ throw e;
}
- return false;
}
/**
*/
public void deleteRow(Object[] row, RowIdImpl rowId)
throws IOException
+ {
+ deleteRowImpl(row, rowId);
+ }
+
+ private Entry deleteRowImpl(Object[] row, RowIdImpl rowId)
+ throws IOException
{
int nullCount = countNullValues(row);
if(shouldIgnoreNulls() && (nullCount == _columns.size())) {
// nothing to do
- return;
+ return null;
}
// make sure we've parsed the entries
initialize();
Entry oldEntry = new Entry(createEntryBytes(row), rowId);
- if(removeEntry(oldEntry)) {
+ Entry removedEntry = removeEntry(oldEntry);
+ if(removedEntry != null) {
++_modCount;
} else {
LOG.warn("Failed removing index entry " + oldEntry + " for row: " +
Arrays.asList(row));
}
+ return removedEntry;
}
+ /**
+ * Undoes a previous row deletion.
+ */
+ private void rollbackDeletedRow(Entry removedEntry)
+ throws IOException
+ {
+ if(removedEntry == null) {
+ // no change was made
+ return;
+ }
+
+ // unfortunately, stuff might have shuffled around when we first removed
+ // the row, so in order to re-insert it, we need to re-find and insert it.
+ DataPage dataPage = findDataPage(removedEntry);
+ int idx = dataPage.findEntry(removedEntry);
+ if(idx < 0) {
+ dataPage.addEntry(missingIndexToInsertionPoint(idx), removedEntry);
+ }
+ }
+
/**
* Removes an entry from the relevant index dataPage, maintaining the order.
* Will search by RowId if entry is not found (in case a partial entry was
* provided).
*/
- private boolean removeEntry(Entry oldEntry)
+ private Entry removeEntry(Entry oldEntry)
throws IOException
{
DataPage dataPage = findDataPage(oldEntry);
doRemove = true;
}
+ Entry removedEntry = null;
if(doRemove) {
// found it!
- dataPage.removeEntry(idx);
+ removedEntry = dataPage.removeEntry(idx);
}
- return doRemove;
+ return removedEntry;
+ }
+
+ public static void commitAll(PendingChange change) throws IOException {
+ while(change != null) {
+ change.commit();
+ change = change.getNext();
+ }
+ }
+
+ public static void rollbackAll(PendingChange change) throws IOException {
+ while(change != null) {
+ change.rollback();
+ change = change.getNext();
+ }
}
/**
public abstract void addEntry(int idx, Entry entry)
throws IOException;
- public abstract void removeEntry(int idx)
+ public abstract Entry removeEntry(int idx)
throws IOException;
public final boolean isEmpty() {
@Override
public void addEntry(int idx, Entry entry) { }
@Override
- public void removeEntry(int idx) { }
+ public Entry removeEntry(int idx) { return null; }
+ }
+
+ /**
+ * Utility class which maintains information about a pending index update.
+ * An instance of this class can be used to complete the change (by calling
+ * {@link #commit}) or undo the change (by calling {@link #rollback}).
+ */
+ public static abstract class PendingChange
+ {
+ private final PendingChange _next;
+
+ private PendingChange(PendingChange next) {
+ _next = next;
+ }
+
+ /**
+ * Returns the next pending change, if any
+ */
+ public PendingChange getNext() {
+ return _next;
+ }
+
+ /**
+ * Completes the pending change.
+ */
+ public abstract void commit() throws IOException;
+
+ /**
+ * Undoes the pending change.
+ */
+ public abstract void rollback() throws IOException;
+ }
+
+ /**
+ * PendingChange for a row addition.
+ */
+ private class AddRowPendingChange extends PendingChange
+ {
+ private Entry _addEntry;
+ private DataPage _addDataPage;
+ private int _addIdx;
+ private boolean _isDupe;
+
+ private AddRowPendingChange(PendingChange next) {
+ super(next);
+ }
+
+ public void setAddRow(Entry addEntry, DataPage dataPage, int idx,
+ boolean isDupe) {
+ _addEntry = addEntry;
+ _addDataPage = dataPage;
+ _addIdx = idx;
+ _isDupe = isDupe;
+ }
+
+ @Override
+ public void commit() throws IOException {
+ commitAddRow(_addEntry, _addDataPage, _addIdx, _isDupe);
+ }
+
+ @Override
+ public void rollback() throws IOException {
+ _addEntry = null;
+ _addDataPage = null;
+ _addIdx = -1;
+ }
+ }
+
+ /**
+ * PendingChange for a row update (which is essentially a deletion followed
+ * by an addition).
+ */
+ private class UpdateRowPendingChange extends AddRowPendingChange
+ {
+ private Entry _removedEntry;
+
+ private UpdateRowPendingChange(PendingChange next) {
+ super(next);
+ }
+
+ public void setDeletedRow(Entry removedEntry) {
+ _removedEntry = removedEntry;
+ }
+
+ @Override
+ public void rollback() throws IOException {
+ super.rollback();
+ rollbackDeletedRow(_removedEntry);
+ }
}
}
* @param cacheDataPage the page from which to remove the entry
* @param entryIdx the index of the entry to remove
*/
- private void removeEntry(CacheDataPage cacheDataPage, int entryIdx)
+ private Entry removeEntry(CacheDataPage cacheDataPage, int entryIdx)
throws IOException
{
- updateEntry(cacheDataPage, entryIdx, null, UpdateType.REMOVE);
+ return updateEntry(cacheDataPage, entryIdx, null, UpdateType.REMOVE);
}
/**
* @param newEntry the entry to add/replace
* @param upType the type of update to make
*/
- private void updateEntry(CacheDataPage cacheDataPage,
- int entryIdx,
- Entry newEntry,
- UpdateType upType)
+ private Entry updateEntry(CacheDataPage cacheDataPage,
+ int entryIdx,
+ Entry newEntry,
+ UpdateType upType)
throws IOException
{
DataPageMain dpMain = cacheDataPage._main;
if(dpExtra._entryView.isEmpty()) {
// this page is dead
removeDataPage(parentDataPage, cacheDataPage, oldLastEntry);
- return;
+ return oldEntry;
}
// determine if we need to update our parent page
if(!updateLast || dpMain.isRoot()) {
// no parent
- return;
+ return oldEntry;
}
// the update to the last entry needs to be propagated to our parent
replaceParentEntry(parentDataPage, cacheDataPage, oldLastEntry);
+ return oldEntry;
}
/**
}
@Override
- public void removeEntry(int idx) throws IOException {
- _main.getCache().removeEntry(this, idx);
+ public Entry removeEntry(int idx) throws IOException {
+ return _main.getCache().removeEntry(this, idx);
}
}
import java.util.Map;
import java.util.Set;
+import com.healthmarketscience.jackcess.BatchUpdateException;
import com.healthmarketscience.jackcess.Column;
import com.healthmarketscience.jackcess.ColumnBuilder;
+import com.healthmarketscience.jackcess.ConstraintViolationException;
import com.healthmarketscience.jackcess.CursorBuilder;
import com.healthmarketscience.jackcess.DataType;
import com.healthmarketscience.jackcess.IndexBuilder;
+import com.healthmarketscience.jackcess.JackcessException;
import com.healthmarketscience.jackcess.PropertyMap;
import com.healthmarketscience.jackcess.Row;
import com.healthmarketscience.jackcess.RowId;
/** page buffer used to update the table def page */
private final TempPageHolder _tableDefBufferH =
TempPageHolder.newHolder(TempBufferHolder.Type.SOFT);
- /** buffer used to writing single rows of data */
- private final TempBufferHolder _singleRowBufferH =
+ /** buffer used to writing rows of data */
+ private final TempBufferHolder _writeRowBufferH =
TempBufferHolder.newHolder(TempBufferHolder.Type.SOFT, true);
- /** "buffer" used to writing multi rows of data (will create new buffer on
- every call) */
- private final TempBufferHolder _multiRowBufferH =
- TempBufferHolder.newHolder(TempBufferHolder.Type.NONE, true);
/** page buffer used to write out-of-row "long value" data */
private final TempPageHolder _longValueBufferH =
TempPageHolder.newHolder(TempBufferHolder.Type.SOFT);
}
public Object[] addRow(Object... row) throws IOException {
- return addRows(Collections.singletonList(row), _singleRowBufferH).get(0);
+ return addRows(Collections.singletonList(row), false).get(0);
}
public <M extends Map<String,Object>> M addRowFromMap(M row)
public List<? extends Object[]> addRows(List<? extends Object[]> rows)
throws IOException
{
- return addRows(rows, _multiRowBufferH);
+ return addRows(rows, true);
}
public <M extends Map<String,Object>> List<M> addRowsFromMaps(List<M> rows)
* Add multiple rows to this table, only writing to disk after all
* rows have been written, and every time a data page is filled.
* @param inRows List of Object[] row values
- * @param writeRowBufferH TempBufferHolder used to generate buffers for
- * writing the row data
*/
private List<? extends Object[]> addRows(List<? extends Object[]> rows,
- TempBufferHolder writeRowBufferH)
+ final boolean isBatchWrite)
throws IOException
{
if(rows.isEmpty()) {
getPageChannel().startWrite();
try {
- List<Object[]> dupeRows = null;
- ByteBuffer[] rowData = new ByteBuffer[rows.size()];
- int numCols = _columns.size();
- for (int i = 0; i < rows.size(); i++) {
-
- // we need to make sure the row is the right length and is an Object[]
- // (fill with null if too short). note, if the row is copied the caller
- // will not be able to access any generated auto-number value, but if
- // they need that info they should use a row array of the right
- // size/type!
- Object[] row = rows.get(i);
- if((row.length < numCols) || (row.getClass() != Object[].class)) {
- row = dupeRow(row, numCols);
- // copy the input rows to a modifiable list so we can update the
- // elements
- if(dupeRows == null) {
- dupeRows = new ArrayList<Object[]>(rows);
- rows = dupeRows;
+ ByteBuffer dataPage = null;
+ int pageNumber = PageChannel.INVALID_PAGE_NUMBER;
+ int updateCount = 0;
+ try {
+
+ List<Object[]> dupeRows = null;
+ final int numCols = _columns.size();
+ for (int i = 0; i < rows.size(); i++) {
+
+ // we need to make sure the row is the right length and is an
+ // Object[] (fill with null if too short). note, if the row is
+ // copied the caller will not be able to access any generated
+ // auto-number value, but if they need that info they should use a
+ // row array of the right size/type!
+ Object[] row = rows.get(i);
+ if((row.length < numCols) || (row.getClass() != Object[].class)) {
+ row = dupeRow(row, numCols);
+ // copy the input rows to a modifiable list so we can update the
+ // elements
+ if(dupeRows == null) {
+ dupeRows = new ArrayList<Object[]>(rows);
+ rows = dupeRows;
+ }
+ // we copied the row, so put the copy back into the rows list
+ dupeRows.set(i, row);
}
- // we copied the row, so put the copy back into the rows list
- dupeRows.set(i, row);
- }
- // fill in autonumbers
- handleAutoNumbersForAdd(row);
+ // fill in autonumbers
+ handleAutoNumbersForAdd(row);
- // write the row of data to a temporary buffer
- rowData[i] = createRow(row,
- writeRowBufferH.getPageBuffer(getPageChannel()));
+ // write the row of data to a temporary buffer
+ ByteBuffer rowData = createRow(
+ row, _writeRowBufferH.getPageBuffer(getPageChannel()));
- if (rowData[i].limit() > getFormat().MAX_ROW_SIZE) {
- throw new IOException("Row size " + rowData[i].limit() +
- " is too large");
- }
- }
+ int rowSize = rowData.remaining();
+ if (rowSize > getFormat().MAX_ROW_SIZE) {
+ throw new IOException("Row size " + rowSize + " is too large");
+ }
- ByteBuffer dataPage = null;
- int pageNumber = PageChannel.INVALID_PAGE_NUMBER;
-
- for (int i = 0; i < rowData.length; i++) {
- int rowSize = rowData[i].remaining();
- Object[] row = rows.get(i);
+ // get page with space
+ dataPage = findFreeRowSpace(rowSize, dataPage, pageNumber);
+ pageNumber = _addRowBufferH.getPageNumber();
- // handle foreign keys before adding to table
- _fkEnforcer.addRow(row);
+ // determine where this row will end up on the page
+ int rowNum = getRowsOnDataPage(dataPage, getFormat());
- // get page with space
- dataPage = findFreeRowSpace(rowSize, dataPage, pageNumber);
- pageNumber = _addRowBufferH.getPageNumber();
+ RowIdImpl rowId = new RowIdImpl(pageNumber, rowNum);
+
+ // before we actually write the row data, we verify all the database
+ // constraints.
+ if(!_indexDatas.isEmpty()) {
- // write out the row data
- int rowNum = addDataPageRow(dataPage, rowSize, getFormat(), 0);
- dataPage.put(rowData[i]);
+ IndexData.PendingChange idxChange = null;
+ try {
- // update the indexes
- RowIdImpl rowId = new RowIdImpl(pageNumber, rowNum);
- for(IndexData indexData : _indexDatas) {
- indexData.addRow(row, rowId);
+ // handle foreign keys before adding to table
+ _fkEnforcer.addRow(row);
+
+ // prepare index updates
+ for(IndexData indexData : _indexDatas) {
+ idxChange = indexData.prepareAddRow(row, rowId, idxChange);
+ }
+
+ // complete index updates
+ IndexData.commitAll(idxChange);
+
+ } catch(ConstraintViolationException ce) {
+ IndexData.rollbackAll(idxChange);
+ throw ce;
+ }
+ }
+
+ // we have satisfied all the constraints, write the row
+ addDataPageRow(dataPage, rowSize, getFormat(), 0);
+ dataPage.put(rowData);
+
+ // return rowTd if desired
+ if((row.length > numCols) &&
+ (row[numCols] == ColumnImpl.RETURN_ROW_ID)) {
+ row[numCols] = rowId;
+ }
+
+ ++updateCount;
}
- // return rowTd if desired
- if((row.length > numCols) && (row[numCols] == ColumnImpl.RETURN_ROW_ID)) {
- row[numCols] = rowId;
+ writeDataPage(dataPage, pageNumber);
+
+ // Update tdef page
+ updateTableDefinition(rows.size());
+
+ } catch(Exception rowWriteFailure) {
+
+ if(!isBatchWrite) {
+ // just re-throw the original exception
+ if(rowWriteFailure instanceof IOException) {
+ throw (IOException)rowWriteFailure;
+ }
+ throw (RuntimeException)rowWriteFailure;
}
- }
- writeDataPage(dataPage, pageNumber);
+ // attempt to resolve a partial batch write
+ if(isWriteFailure(rowWriteFailure)) {
+
+ // we don't really know the status of any of the rows, so clear the
+ // update count
+ updateCount = 0;
+
+ } else if(updateCount > 0) {
+
+ // attempt to flush the rows already written to disk
+ try {
+
+ writeDataPage(dataPage, pageNumber);
- // Update tdef page
- updateTableDefinition(rows.size());
+ // Update tdef page
+ updateTableDefinition(updateCount);
+
+ } catch(Exception flushFailure) {
+ // the flush failure is "worse" as it implies possible database
+ // corruption (failed write vs. a row failure which was not a
+ // write failure). we don't know the status of any rows at this
+ // point (and the original failure is probably irrelevant)
+ LOG.warn("Secondary row failure which preceded the write failure",
+ rowWriteFailure);
+ updateCount = 0;
+ rowWriteFailure = flushFailure;
+ }
+ }
+
+ throw new BatchUpdateException(updateCount, rowWriteFailure);
+ }
} finally {
getPageChannel().finishWrite();
return rows;
}
+
+ private static boolean isWriteFailure(Throwable t) {
+ while(t != null) {
+ if((t instanceof IOException) && !(t instanceof JackcessException)) {
+ return true;
+ }
+ t = t.getCause();
+ }
+ // some other sort of exception which is not a write failure
+ return false;
+ }
public Row updateRow(Row row) throws IOException {
return updateRowFromMap(
// generate new row bytes
ByteBuffer newRowData = createRow(
- row, _singleRowBufferH.getPageBuffer(getPageChannel()), oldRowSize,
+ row, _writeRowBufferH.getPageBuffer(getPageChannel()), oldRowSize,
keepRawVarValues);
if (newRowData.limit() > getFormat().MAX_ROW_SIZE) {
if(!_indexDatas.isEmpty()) {
- Object[] oldRowValues = rowState.getRowValues();
+ IndexData.PendingChange idxChange = null;
+ try {
+
+ Object[] oldRowValues = rowState.getRowValues();
+
+ // check foreign keys before actually updating
+ _fkEnforcer.updateRow(oldRowValues, row);
- // check foreign keys before actually updating
- _fkEnforcer.updateRow(oldRowValues, row);
+ // prepare index updates
+ for(IndexData indexData : _indexDatas) {
+ idxChange = indexData.prepareUpdateRow(oldRowValues, rowId, row,
+ idxChange);
+ }
+
+ // complete index updates
+ IndexData.commitAll(idxChange);
- // delete old values from indexes
- for(IndexData indexData : _indexDatas) {
- indexData.deleteRow(oldRowValues, rowId);
+ } catch(ConstraintViolationException ce) {
+ IndexData.rollbackAll(idxChange);
+ throw ce;
}
}
}
}
- // update the indexes
- for(IndexData indexData : _indexDatas) {
- indexData.addRow(row, rowId);
- }
-
writeDataPage(dataPage, pageNumber);
updateTableDefinition(0);
IOException failure = null;
try {
- ((IndexImpl)index).getIndexData().addRow(row, new RowIdImpl(400 + i, 0));
+ ((IndexImpl)index).getIndexData().prepareAddRow(
+ row, new RowIdImpl(400 + i, 0), null).commit();
} catch(IOException e) {
failure = e;
}
}
}
+ public void testConstraintViolation() throws Exception
+ {
+ for (final FileFormat fileFormat : SUPPORTED_FILEFORMATS) {
+ Database db = create(fileFormat);
+
+ Table t = new TableBuilder("TestTable")
+ .addColumn(new ColumnBuilder("id", DataType.LONG))
+ .addColumn(new ColumnBuilder("data", DataType.TEXT))
+ .addIndex(new IndexBuilder(IndexBuilder.PRIMARY_KEY_NAME)
+ .addColumns("id").setPrimaryKey())
+ .addIndex(new IndexBuilder("data_ind")
+ .addColumns("data").setUnique())
+ .toTable(db);
+
+ for(int i = 0; i < 5; ++i) {
+ t.addRow(i, "row" + i);
+ }
+
+ try {
+ t.addRow(3, "badrow");
+ fail("ConstraintViolationException should have been thrown");
+ } catch(ConstraintViolationException ce) {
+ // success
+ }
+
+ assertEquals(5, t.getRowCount());
+
+ List<Row> expectedRows =
+ createExpectedTable(
+ createExpectedRow(
+ "id", 0, "data", "row0"),
+ createExpectedRow(
+ "id", 1, "data", "row1"),
+ createExpectedRow(
+ "id", 2, "data", "row2"),
+ createExpectedRow(
+ "id", 3, "data", "row3"),
+ createExpectedRow(
+ "id", 4, "data", "row4"));
+
+ assertTable(expectedRows, t);
+
+ IndexCursor pkCursor = CursorBuilder.createCursor(t.getPrimaryKeyIndex());
+ assertCursor(expectedRows, pkCursor);
+
+ assertCursor(expectedRows,
+ CursorBuilder.createCursor(t.getIndex("data_ind")));
+
+ List<Object[]> batch = new ArrayList<Object[]>();
+ batch.add(new Object[]{5, "row5"});
+ batch.add(new Object[]{6, "row6"});
+ batch.add(new Object[]{7, "row2"});
+ batch.add(new Object[]{8, "row8"});
+
+ try {
+ t.addRows(batch);
+ fail("BatchUpdateException should have been thrown");
+ } catch(BatchUpdateException be) {
+ // success
+ assertTrue(be.getCause() instanceof ConstraintViolationException);
+ assertEquals(2, be.getUpdateCount());
+ }
+
+ expectedRows = new ArrayList<Row>(expectedRows);
+ expectedRows.add(createExpectedRow("id", 5, "data", "row5"));
+ expectedRows.add(createExpectedRow("id", 6, "data", "row6"));
+
+ assertTable(expectedRows, t);
+
+ assertCursor(expectedRows, pkCursor);
+
+ assertCursor(expectedRows,
+ CursorBuilder.createCursor(t.getIndex("data_ind")));
+
+ pkCursor.findFirstRowByEntry(4);
+ Row row4 = pkCursor.getCurrentRow();
+
+ row4.put("id", 3);
+
+ try {
+ t.updateRow(row4);
+ fail("ConstraintViolationException should have been thrown");
+ } catch(ConstraintViolationException ce) {
+ // success
+ }
+
+ assertTable(expectedRows, t);
+
+ assertCursor(expectedRows, pkCursor);
+
+ assertCursor(expectedRows,
+ CursorBuilder.createCursor(t.getIndex("data_ind")));
+
+ db.close();
+ }
+ }
+
private void doCheckForeignKeyIndex(Table ta, Index ia, Table tb)
throws Exception
{