private final Table _table;
/** Page number of the index data */
private int _pageNumber;
- /** Number of rows in the index
- NOTE: this does not actually seem to be the row count, unclear what the
- value means*/
- private int _rowCount;
+ /** offset within the tableDefinition buffer of the uniqueEntryCount for
+ this index */
+ private final int _uniqueEntryCountOffset;
+ /** The number of unique entries which have been added to this index. note,
+ however, that it is never decremented, only incremented (as observed in
+ Access). */
+ private int _uniqueEntryCount;
/** sorted collection of index entries. this is kept in a list instead of a
SortedSet because the SortedSet has lame traversal utilities */
private final List<Entry> _entries = new ArrayList<Entry>();
/** FIXME, for now, we can't write multi-page indexes or indexes using the funky primary key compression scheme */
boolean _readOnly;
- public Index(Table table) {
+ public Index(Table table, int uniqueEntryCount, int uniqueEntryCountOffset) {
_table = table;
+ _uniqueEntryCount = uniqueEntryCount;
+ _uniqueEntryCountOffset = uniqueEntryCountOffset;
}
public Table getTable() {
return _indexFlags;
}
- public void setRowCount(int rowCount) {
- _rowCount = rowCount;
+ public int getUniqueEntryCount() {
+ return _uniqueEntryCount;
}
- public int getRowCount() {
- return _rowCount;
+ public int getUniqueEntryCountOffset() {
+ return _uniqueEntryCountOffset;
}
public String getName() {
Entry newEntry = new Entry(createEntryBytes(row), rowId);
if(addEntry(newEntry, isNullEntry, row)) {
- ++_rowCount;
++_modCount;
} else {
LOG.warn("Added duplicate index entry " + newEntry + " for row: " +
Entry oldEntry = new Entry(createEntryBytes(row), rowId);
if(removeEntry(oldEntry)) {
- --_rowCount;
++_modCount;
} else {
LOG.warn("Failed removing index entry " + oldEntry + " for row: " +
// determine if the addition of this entry would break the uniqueness
// constraint. See isUnique() for some notes about uniqueness as
// defined by Access.
- if(isUnique() && !isNullEntry &&
- (((idx > 0) &&
- newEntry.equalsEntryBytes(_entries.get(idx - 1))) ||
+ boolean isDupeEntry =
+ (((idx > 0) &&
+ newEntry.equalsEntryBytes(_entries.get(idx - 1))) ||
((idx < _entries.size()) &&
- newEntry.equalsEntryBytes(_entries.get(idx)))))
+ newEntry.equalsEntryBytes(_entries.get(idx))));
+ if(isUnique() && !isNullEntry && isDupeEntry)
{
throw new IOException(
"New row " + Arrays.asList(row) +
" violates uniqueness constraint for index " + this);
}
+
+ if(!isDupeEntry) {
+ ++_uniqueEntryCount;
+ }
_entries.add(idx, newEntry);
return true;
_freeSpacePages = UsageMap.read(getDatabase(), pageNum, rowNum, false);
for (int i = 0; i < _indexCount; i++) {
- Index index = new Index(this);
- _indexes.add(index);
- index.setRowCount(tableBuffer.getInt(getFormat().OFFSET_INDEX_DEF_BLOCK +
- i * getFormat().SIZE_INDEX_DEFINITION + 4));
+ int uniqueEntryCountOffset =
+ (getFormat().OFFSET_INDEX_DEF_BLOCK +
+ (i * getFormat().SIZE_INDEX_DEFINITION) + 4);
+ int uniqueEntryCount = tableBuffer.getInt(uniqueEntryCountOffset);
+ _indexes.add(new Index(this, uniqueEntryCount, uniqueEntryCountOffset));
}
int colOffset = getFormat().OFFSET_INDEX_DEF_BLOCK +
// write any index changes
Iterator<Index> indIter = _indexes.iterator();
for (int i = 0; i < _indexes.size(); i++) {
- tdefPage.putInt(getFormat().OFFSET_INDEX_DEF_BLOCK +
- (i * getFormat().SIZE_INDEX_DEFINITION) + 4, _rowCount);
Index index = indIter.next();
+ // write the unique entry count for the index to the table definition
+ // page
+ tdefPage.putInt(index.getUniqueEntryCountOffset(),
+ index.getUniqueEntryCount());
+ // write the entry page for the index
index.update();
}