org.apache.hadoop.hbase.regionserver.wal.WALEdit.add(KeyValue)
|
org.apache.hadoop.hbase.thrift.ThriftServerRunner.HBaseHandler.atomicIncrement(ByteBuffer, ByteBuffer, ByteBuffer, long) |
org.apache.hadoop.hbase.client.HTable.batch(List)
If any exception is thrown by one of the actions, there is no way to
retrieve the partially executed results. Use HTable.batch(List, Object[]) instead.
|
org.apache.hadoop.hbase.client.HTableInterface.batch(List)
|
org.apache.hadoop.hbase.client.HTable.batchCallback(List, Batch.Callback)
|
org.apache.hadoop.hbase.client.HTableInterface.batchCallback(List, Batch.Callback)
|
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.clearClusterId() |
org.apache.hadoop.hbase.client.HConnection.clearRegionCache(byte[]) |
org.apache.hadoop.hbase.RemoteExceptionHandler.decodeRemoteException(RemoteException)
Use RemoteException.unwrapRemoteException() instead.
In fact we should look into deprecating this whole class - St.Ack 2010929
|
org.apache.hadoop.hbase.client.HConnectionManager.deleteAllConnections()
kept for backward compatibility, but the behavior is broken. HBASE-8983
|
org.apache.hadoop.hbase.client.HConnectionManager.deleteAllConnections(boolean) |
org.apache.hadoop.hbase.client.HConnectionManager.deleteConnection(Configuration) |
org.apache.hadoop.hbase.client.HConnectionManager.deleteStaleConnection(HConnection) |
org.apache.hadoop.hbase.filter.FilterBase.filterRow(List) |
org.apache.hadoop.hbase.filter.Filter.filterRow(List) |
org.apache.hadoop.hbase.filter.FilterList.filterRow(List) |
org.apache.hadoop.hbase.filter.FilterWrapper.filterRow(List) |
org.apache.hadoop.hbase.thrift.ThriftServerRunner.HBaseHandler.get(ByteBuffer, ByteBuffer, ByteBuffer, Map) |
org.apache.hadoop.hbase.client.Query.getACLStrategy()
No effect
|
org.apache.hadoop.hbase.client.Mutation.getACLStrategy()
No effect
|
org.apache.hadoop.hbase.client.HConnection.getAdmin(ServerName, boolean)
You can pass master flag but nothing special is done.
|
org.apache.hadoop.hbase.KeyValue.getBuffer()
Since 0.98.0. Use Cell Interface instead. Do not presume single backing buffer.
|
org.apache.hadoop.hbase.zookeeper.ZKUtil.getChildDataAndWatchForNewChildren(ZooKeeperWatcher, String)
Unused
|
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKeyOrBuilder.getClusterId() |
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.getClusterId() |
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.getClusterId() |
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.getClusterIdBuilder() |
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKeyOrBuilder.getClusterIdOrBuilder() |
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.getClusterIdOrBuilder() |
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.getClusterIdOrBuilder() |
org.apache.hadoop.hbase.client.Result.getColumn(byte[], byte[])
|
org.apache.hadoop.hbase.client.Result.getColumnLatest(byte[], byte[])
|
org.apache.hadoop.hbase.client.Result.getColumnLatest(byte[], int, int, byte[], int, int)
|
org.apache.hadoop.hbase.client.HTable.getConnection()
This method will be changed from public to package protected.
|
org.apache.hadoop.hbase.client.HConnectionManager.getConnection(Configuration) |
org.apache.hadoop.hbase.client.HConnection.getCurrentNrHRS()
This method will be changed from public to package protected.
|
org.apache.hadoop.hbase.HColumnDescriptor.getDataBlockEncodingOnDisk() |
org.apache.hadoop.hbase.Cell.getFamily()
|
org.apache.hadoop.hbase.KeyValue.getFamily() |
org.apache.hadoop.hbase.client.Mutation.getFamilyMap()
|
org.apache.hadoop.hbase.client.HConnection.getHTableDescriptor(byte[]) |
org.apache.hadoop.hbase.client.HConnection.getHTableDescriptors(List) |
org.apache.hadoop.hbase.client.HConnection.getKeepAliveMasterService()
Since 0.96.0
|
org.apache.hadoop.hbase.regionserver.wal.WALEdit.getKeyValues()
|
org.apache.hadoop.hbase.filter.FilterBase.getNextKeyHint(KeyValue) |
org.apache.hadoop.hbase.filter.Filter.getNextKeyHint(KeyValue) |
org.apache.hadoop.hbase.filter.FilterList.getNextKeyHint(KeyValue) |
org.apache.hadoop.hbase.filter.FilterWrapper.getNextKeyHint(KeyValue) |
org.apache.hadoop.hbase.HTableDescriptor.getOwnerString() |
org.apache.hadoop.hbase.Cell.getQualifier()
|
org.apache.hadoop.hbase.KeyValue.getQualifier() |
org.apache.hadoop.hbase.regionserver.HRegion.getRegionDir(Path, HRegionInfo) |
org.apache.hadoop.hbase.regionserver.HRegion.getRegionDir(Path, String) |
org.apache.hadoop.hbase.client.HConnection.getRegionLocation(byte[], byte[], boolean) |
org.apache.hadoop.hbase.Cell.getRow()
|
org.apache.hadoop.hbase.KeyValue.getRow() |
org.apache.hadoop.hbase.client.HTableInterface.getRowOrBefore(byte[], byte[])
As of version 0.92 this method is deprecated without
replacement.
getRowOrBefore is used internally to find entries in hbase:meta and makes
various assumptions about the table (which are true for hbase:meta but not
in general) to be efficient.
|
org.apache.hadoop.hbase.regionserver.StoreFile.Reader.getScanner(boolean, boolean) |
org.apache.hadoop.hbase.regionserver.StoreFile.Reader.getScanner(boolean, boolean, boolean) |
org.apache.hadoop.hbase.client.HTable.getScannerCaching()
|
org.apache.hadoop.hbase.ClusterStatus.getServerInfo()
|
org.apache.hadoop.hbase.io.ImmutableBytesWritable.getSize()
|
org.apache.hadoop.hbase.regionserver.HStore.getStoreHomedir(Path, HRegionInfo, byte[]) |
org.apache.hadoop.hbase.regionserver.HStore.getStoreHomedir(Path, String, byte[]) |
org.apache.hadoop.hbase.HTableDescriptor.getTableDir(Path, byte[]) |
org.apache.hadoop.hbase.HRegionInfo.getTableName()
Since 0.96.0; use #getTable()
|
org.apache.hadoop.hbase.client.ClientScanner.getTableName()
|
org.apache.hadoop.hbase.HRegionInfo.getTableName(byte[])
Since 0.96.0; use #getTable(byte[])
|
org.apache.hadoop.hbase.client.HBaseAdmin.getTableNames() |
org.apache.hadoop.hbase.client.HConnection.getTableNames() |
org.apache.hadoop.hbase.client.HBaseAdmin.getTableNames(Pattern) |
org.apache.hadoop.hbase.client.HBaseAdmin.getTableNames(String) |
org.apache.hadoop.hbase.Cell.getTagsLength()
|
org.apache.hadoop.hbase.KeyValue.getTagsLength() |
org.apache.hadoop.hbase.codec.prefixtree.decode.PrefixTreeCell.getTagsLength() |
org.apache.hadoop.hbase.Cell.getTagsLengthUnsigned()
From next major version this will be renamed to getTagsLength() which returns int.
|
org.apache.hadoop.hbase.KeyValue.getTagsLengthUnsigned() |
org.apache.hadoop.hbase.codec.prefixtree.decode.PrefixTreeCell.getTagsLengthUnsigned() |
org.apache.hadoop.hbase.KeyValue.getType() |
org.apache.hadoop.hbase.Cell.getValue()
|
org.apache.hadoop.hbase.KeyValue.getValue() |
org.apache.hadoop.hbase.coprocessor.ColumnInterpreter.getValue(byte[], byte[], KeyValue) |
org.apache.hadoop.hbase.thrift.ThriftServerRunner.HBaseHandler.getVer(ByteBuffer, ByteBuffer, ByteBuffer, int, Map) |
org.apache.hadoop.hbase.HRegionInfo.getVersion()
HRI is no longer a VersionedWritable
|
org.apache.hadoop.hbase.thrift.ThriftServerRunner.HBaseHandler.getVerTs(ByteBuffer, ByteBuffer, ByteBuffer, long, int, Map) |
org.apache.hadoop.hbase.client.HTable.getWriteBuffer()
since 0.96. This is an internal buffer that should not be read nor write.
|
org.apache.hadoop.hbase.client.Mutation.getWriteToWAL()
|
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKeyOrBuilder.hasClusterId() |
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.hasClusterId() |
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.hasClusterId() |
org.apache.hadoop.hbase.client.HTable.incrementColumnValue(byte[], byte[], byte[], long, boolean)
|
org.apache.hadoop.hbase.client.HTableInterface.incrementColumnValue(byte[], byte[], byte[], long, boolean)
|
org.apache.hadoop.hbase.regionserver.HRegion.initialize()
use HRegion.createHRegion() or HRegion.openHRegion()
|
org.apache.hadoop.hbase.HTableDescriptor.isDeferredLogFlush() |
org.apache.hadoop.hbase.KeyValue.isDelete() |
org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate.isLogDeletable(FileStatus) |
org.apache.hadoop.hbase.client.HConnection.isTableAvailable(byte[]) |
org.apache.hadoop.hbase.client.HConnection.isTableAvailable(byte[], byte[][]) |
org.apache.hadoop.hbase.client.HConnection.isTableDisabled(byte[]) |
org.apache.hadoop.hbase.client.HConnection.isTableEnabled(byte[]) |
org.apache.hadoop.hbase.client.HTable.isTableEnabled(byte[])
|
org.apache.hadoop.hbase.client.HTable.isTableEnabled(Configuration, byte[])
|
org.apache.hadoop.hbase.client.HTable.isTableEnabled(Configuration, String)
|
org.apache.hadoop.hbase.client.HTable.isTableEnabled(Configuration, TableName)
|
org.apache.hadoop.hbase.client.HTable.isTableEnabled(String)
|
org.apache.hadoop.hbase.client.HTable.isTableEnabled(TableName)
|
org.apache.hadoop.hbase.client.Result.list()
|
org.apache.hadoop.hbase.client.HConnection.locateRegion(byte[], byte[]) |
org.apache.hadoop.hbase.client.HConnection.locateRegions(byte[]) |
org.apache.hadoop.hbase.client.HConnection.locateRegions(byte[], boolean, boolean) |
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.mergeClusterId(HBaseProtos.UUID) |
org.apache.hadoop.hbase.KeyValue.oswrite(KeyValue, OutputStream) |
org.apache.hadoop.hbase.zookeeper.ZKConfig.parseZooCfg(Configuration, InputStream)
in 0.96 onwards. HBase will no longer rely on zoo.cfg
availability.
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.postCompact(ObserverContext, Store, StoreFile)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.postCompactSelection(ObserverContext, Store, ImmutableList)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.postFlush(ObserverContext)
|
org.apache.hadoop.hbase.coprocessor.BaseRegionObserver.postGet(ObserverContext, Get, List) |
org.apache.hadoop.hbase.coprocessor.RegionObserver.postGet(ObserverContext, Get, List) |
org.apache.hadoop.hbase.coprocessor.RegionObserver.postIncrementColumnValue(ObserverContext, byte[], byte[], byte[], long, boolean, long)
This hook is no longer called by the RegionServer
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.postSplit(ObserverContext, HRegion, HRegion)
Use postCompleteSplit() instead
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.preCompact(ObserverContext, Store, InternalScanner, ScanType)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.preCompactScannerOpen(ObserverContext, Store, List, ScanType, long, InternalScanner)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.preCompactSelection(ObserverContext, Store, List)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.preFlush(ObserverContext)
|
org.apache.hadoop.hbase.coprocessor.BaseRegionObserver.preGet(ObserverContext, Get, List) |
org.apache.hadoop.hbase.coprocessor.RegionObserver.preGet(ObserverContext, Get, List) |
org.apache.hadoop.hbase.coprocessor.RegionObserver.preIncrementColumnValue(ObserverContext, byte[], byte[], byte[], long, boolean)
This hook is no longer called by the RegionServer
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.preSplit(ObserverContext)
Use preSplit(
final ObserverContext c, byte[] splitRow)
|
org.apache.hadoop.hbase.client.HConnection.processBatch(List, byte[], ExecutorService, Object[]) |
org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation.processBatch(List, byte[], ExecutorService, Object[]) |
org.apache.hadoop.hbase.client.HConnection.processBatch(List, TableName, ExecutorService, Object[])
|
org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation.processBatch(List, TableName, ExecutorService, Object[]) |
org.apache.hadoop.hbase.client.HConnection.processBatchCallback(List, byte[], ExecutorService, Object[], Batch.Callback) |
org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation.processBatchCallback(List, byte[], ExecutorService, Object[], Batch.Callback) |
org.apache.hadoop.hbase.client.HConnection.processBatchCallback(List, TableName, ExecutorService, Object[], Batch.Callback)
|
org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation.processBatchCallback(List, TableName, ExecutorService, Object[], Batch.Callback)
|
org.apache.hadoop.hbase.client.HTableMultiplexer.put(byte[], List) |
org.apache.hadoop.hbase.client.HTableMultiplexer.put(byte[], Put) |
org.apache.hadoop.hbase.client.HTableMultiplexer.put(byte[], Put, int) |
org.apache.hadoop.hbase.client.HTablePool.putTable(HTableInterface) |
org.apache.hadoop.hbase.client.Result.raw()
|
org.apache.hadoop.hbase.HColumnDescriptor.readFields(DataInput)
|
org.apache.hadoop.hbase.HTableDescriptor.readFields(DataInput)
|
org.apache.hadoop.hbase.HRegionInfo.readFields(DataInput)
Use protobuf deserialization instead.
|
org.apache.hadoop.hbase.io.Reference.readFields(DataInput)
Writables are going away. Use the pb serialization methods instead.
Remove in a release after 0.96 goes out. This is here only to migrate
old Reference files written with Writables before 0.96.
|
org.apache.hadoop.hbase.client.HConnection.relocateRegion(byte[], byte[]) |
org.apache.hadoop.hbase.util.PoolMap.remove(K, V)
|
org.apache.hadoop.hbase.client.Query.setACLStrategy(boolean)
No effect
|
org.apache.hadoop.hbase.client.Mutation.setACLStrategy(boolean)
No effect
|
org.apache.hadoop.hbase.client.HTable.setAutoFlush(boolean) |
org.apache.hadoop.hbase.client.HTableInterface.setAutoFlush(boolean)
in 0.96. When called with setAutoFlush(false), this function also
set clearBufferOnFail to true, which is unexpected but kept for historical reasons.
Replace it with setAutoFlush(false, false) if this is exactly what you want, or by
HTableInterface.setAutoFlushTo(boolean) for all other cases.
|
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.setClusterId(HBaseProtos.UUID.Builder) |
org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey.Builder.setClusterId(HBaseProtos.UUID) |
org.apache.hadoop.hbase.HTableDescriptor.setDeferredLogFlush(boolean) |
org.apache.hadoop.hbase.HColumnDescriptor.setEncodeOnDisk(boolean) |
org.apache.hadoop.hbase.client.Mutation.setFamilyMap(NavigableMap>)
|
org.apache.hadoop.hbase.HTableDescriptor.setName(byte[]) |
org.apache.hadoop.hbase.HTableDescriptor.setName(TableName) |
org.apache.hadoop.hbase.HTableDescriptor.setOwner(User) |
org.apache.hadoop.hbase.HTableDescriptor.setOwnerString(String) |
org.apache.hadoop.hbase.client.HTable.setScannerCaching(int)
|
org.apache.hadoop.hbase.client.Mutation.setWriteToWAL(boolean)
|
org.apache.hadoop.hbase.filter.FilterBase.transform(KeyValue) |
org.apache.hadoop.hbase.filter.Filter.transform(KeyValue) |
org.apache.hadoop.hbase.filter.FilterList.transform(KeyValue) |
org.apache.hadoop.hbase.filter.FilterWrapper.transform(KeyValue) |
org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.tryAtomicRegionLoad(HConnection, byte[], byte[], Collection)
|
org.apache.hadoop.hbase.client.HConnection.updateCachedLocations(byte[], byte[], Object, HRegionLocation) |
org.apache.hadoop.hbase.zookeeper.ZKUtil.updateExistingNodeData(ZooKeeperWatcher, String, byte[], int)
Unused
|
org.apache.hadoop.hbase.catalog.CatalogTracker.waitForMetaServerConnection(long)
Use #getMetaServerConnection(long)
|
org.apache.hadoop.hbase.HColumnDescriptor.write(DataOutput)
|
org.apache.hadoop.hbase.HTableDescriptor.write(DataOutput)
Writables are going away.
Use MessageLite.toByteArray() instead.
|
org.apache.hadoop.hbase.HRegionInfo.write(DataOutput)
|
org.apache.hadoop.hbase.regionserver.wal.HLogKey.write(DataOutput) |