@InterfaceAudience.Private @InterfaceStability.Evolving public interface Store extends HeapSize, StoreConfigInformation
Modifier and Type | Field and Description |
---|---|
static int |
NO_PRIORITY |
static int |
PRIORITY_USER |
Modifier and Type | Method and Description |
---|---|
long |
add(KeyValue kv)
Adds a value to the memstore
|
void |
addChangedReaderObserver(ChangedReadersObserver o) |
boolean |
areWritesEnabled() |
void |
assertBulkLoadHFileOk(org.apache.hadoop.fs.Path srcPath)
This throws a WrongRegionException if the HFile does not fit in this region, or an
InvalidHFileException if the HFile is not valid.
|
void |
bulkLoadHFile(String srcPathStr,
long sequenceId)
This method should only be called from HRegion.
|
void |
cancelRequestedCompaction(CompactionContext compaction) |
boolean |
canSplit() |
Collection<StoreFile> |
close()
Close all the readers We don't need to worry about subsequent requests because the HRegion
holds a write lock that will prevent any more reads or writes.
|
List<StoreFile> |
compact(CompactionContext compaction) |
void |
completeCompactionMarker(WALProtos.CompactionDescriptor compaction)
Call to complete a compaction.
|
org.apache.hadoop.hbase.regionserver.StoreFlushContext |
createFlushContext(long cacheFlushId) |
StoreFile.Writer |
createWriterInTmp(long maxKeyCount,
Compression.Algorithm compression,
boolean isCompaction,
boolean includeMVCCReadpoint,
boolean includesTags) |
void |
deleteChangedReaderObserver(ChangedReadersObserver o) |
CacheConfig |
getCacheConfig()
Used for tests.
|
String |
getColumnFamilyName() |
long |
getCompactedCellsCount() |
long |
getCompactedCellsSize() |
CompactionProgress |
getCompactionProgress()
getter for CompactionProgress object
|
int |
getCompactPriority() |
KeyValue.KVComparator |
getComparator() |
RegionCoprocessorHost |
getCoprocessorHost() |
HFileDataBlockEncoder |
getDataBlockEncoder() |
HColumnDescriptor |
getFamily() |
org.apache.hadoop.fs.FileSystem |
getFileSystem() |
long |
getFlushableSize() |
long |
getFlushedCellsCount() |
long |
getFlushedCellsSize() |
long |
getLastCompactSize() |
long |
getMajorCompactedCellsCount() |
long |
getMajorCompactedCellsSize() |
long |
getMaxMemstoreTS() |
long |
getMemStoreSize() |
HRegionInfo |
getRegionInfo() |
KeyValue |
getRowKeyAtOrBefore(byte[] row)
Find the key that matches row exactly, or the one that immediately precedes it.
|
ScanInfo |
getScanInfo() |
KeyValueScanner |
getScanner(Scan scan,
NavigableSet<byte[]> targetCols,
long readPt)
Return a scanner for both the memstore and the HStore files.
|
List<KeyValueScanner> |
getScanners(boolean cacheBlocks,
boolean isGet,
boolean usePread,
boolean isCompaction,
ScanQueryMatcher matcher,
byte[] startRow,
byte[] stopRow,
long readPt)
Get all scanners with no filtering based on TTL (that happens further down
the line).
|
long |
getSize() |
long |
getSmallestReadPoint() |
byte[] |
getSplitPoint()
Determines if Store should be split
|
Collection<StoreFile> |
getStorefiles() |
int |
getStorefilesCount() |
long |
getStorefilesIndexSize() |
long |
getStorefilesSize() |
long |
getStoreSizeUncompressed() |
TableName |
getTableName() |
long |
getTotalStaticBloomSize()
Returns the total byte size of all Bloom filter bit arrays.
|
long |
getTotalStaticIndexSize()
Returns the total size of all index blocks in the data block indexes, including the root level,
intermediate levels, and the leaf level for multi-level indexes, or just the root level for
single-level indexes.
|
boolean |
hasReferences() |
boolean |
hasTooManyStoreFiles() |
boolean |
isMajorCompaction() |
boolean |
needsCompaction()
See if there's too much store files in this store
|
CompactionContext |
requestCompaction() |
CompactionContext |
requestCompaction(int priority,
CompactionRequest baseRequest) |
void |
rollback(KeyValue kv)
Removes a kv from the memstore.
|
boolean |
throttleCompaction(long compactionSize) |
long |
timeOfOldestEdit()
When was the last edit done in the memstore
|
void |
triggerMajorCompaction() |
long |
upsert(Iterable<Cell> cells,
long readpoint)
Adds or replaces the specified KeyValues.
|
getBlockingFileCount, getCompactionCheckMultiplier, getMemstoreFlushSize, getStoreFileTtl
static final int PRIORITY_USER
static final int NO_PRIORITY
KeyValue.KVComparator getComparator()
Collection<StoreFile> getStorefiles()
Collection<StoreFile> close() throws IOException
StoreFiles
that were previously being used.IOException
- on failureKeyValueScanner getScanner(Scan scan, NavigableSet<byte[]> targetCols, long readPt) throws IOException
scan
- Scan to apply when scanning the storestargetCols
- columns to scanIOException
- on failureList<KeyValueScanner> getScanners(boolean cacheBlocks, boolean isGet, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, byte[] stopRow, long readPt) throws IOException
cacheBlocks
- isGet
- usePread
- isCompaction
- matcher
- startRow
- stopRow
- readPt
- IOException
ScanInfo getScanInfo()
long upsert(Iterable<Cell> cells, long readpoint) throws IOException
For each KeyValue specified, if a cell with the same row, family, and qualifier exists in MemStore, it will be replaced. Otherwise, it will just be inserted to MemStore.
This operation is atomic on each KeyValue (row/family/qualifier) but not necessarily atomic across all of them.
cells
- readpoint
- readpoint below which we can safely remove duplicate KVsIOException
long add(KeyValue kv)
kv
- long timeOfOldestEdit()
void rollback(KeyValue kv)
kv
- KeyValue getRowKeyAtOrBefore(byte[] row) throws IOException
row
- The row key of the targeted row.IOException
org.apache.hadoop.fs.FileSystem getFileSystem()
StoreFile.Writer createWriterInTmp(long maxKeyCount, Compression.Algorithm compression, boolean isCompaction, boolean includeMVCCReadpoint, boolean includesTags) throws IOException
IOException
boolean throttleCompaction(long compactionSize)
CompactionProgress getCompactionProgress()
CompactionContext requestCompaction() throws IOException
IOException
CompactionContext requestCompaction(int priority, CompactionRequest baseRequest) throws IOException
IOException
void cancelRequestedCompaction(CompactionContext compaction)
List<StoreFile> compact(CompactionContext compaction) throws IOException
IOException
boolean isMajorCompaction() throws IOException
IOException
void triggerMajorCompaction()
boolean needsCompaction()
int getCompactPriority()
org.apache.hadoop.hbase.regionserver.StoreFlushContext createFlushContext(long cacheFlushId)
void completeCompactionMarker(WALProtos.CompactionDescriptor compaction) throws IOException
compaction
- IOException
boolean canSplit()
byte[] getSplitPoint()
void assertBulkLoadHFileOk(org.apache.hadoop.fs.Path srcPath) throws IOException
IOException
void bulkLoadHFile(String srcPathStr, long sequenceId) throws IOException
srcPathStr
- sequenceId
- sequence Id associated with the HFileIOException
boolean hasReferences()
long getMemStoreSize()
long getFlushableSize()
getMemStoreSize()
unless we are carrying snapshots and then it will be the size of
outstanding snapshots.HColumnDescriptor getFamily()
long getMaxMemstoreTS()
HFileDataBlockEncoder getDataBlockEncoder()
long getLastCompactSize()
long getSize()
int getStorefilesCount()
long getStoreSizeUncompressed()
long getStorefilesSize()
long getStorefilesIndexSize()
long getTotalStaticIndexSize()
long getTotalStaticBloomSize()
CacheConfig getCacheConfig()
HRegionInfo getRegionInfo()
RegionCoprocessorHost getCoprocessorHost()
boolean areWritesEnabled()
long getSmallestReadPoint()
String getColumnFamilyName()
TableName getTableName()
long getFlushedCellsCount()
long getFlushedCellsSize()
long getCompactedCellsCount()
long getCompactedCellsSize()
long getMajorCompactedCellsCount()
long getMajorCompactedCellsSize()
void addChangedReaderObserver(ChangedReadersObserver o)
void deleteChangedReaderObserver(ChangedReadersObserver o)
boolean hasTooManyStoreFiles()
Copyright © 2014 The Apache Software Foundation. All rights reserved.