Package org.apache.cassandra.bridge
Class CassandraBridge
- java.lang.Object
-
- org.apache.cassandra.bridge.CassandraBridge
-
public abstract class CassandraBridge extends java.lang.ObjectProvides an abstract interface for all calls to the Cassandra code of a specific version
-
-
Nested Class Summary
Nested Classes Modifier and Type Class Description static interfaceCassandraBridge.Writer
-
Field Summary
Fields Modifier and Type Field Description static java.lang.StringIMPLEMENTATION_FQCN
-
Constructor Summary
Constructors Constructor Description CassandraBridge()
-
Method Summary
All Methods Instance Methods Abstract Methods Concrete Methods Modifier and Type Method Description org.apache.cassandra.spark.data.CqlField.NativeTypeaDouble()org.apache.cassandra.spark.data.CqlField.NativeTypeaFloat()org.apache.cassandra.spark.data.CqlField.NativeTypeaInt()java.util.List<org.apache.cassandra.spark.data.CqlField.NativeType>allTypes()org.apache.cassandra.spark.data.CqlField.NativeTypeascii()org.apache.cassandra.spark.data.CqlField.NativeTypebigint()org.apache.cassandra.spark.data.CqlField.NativeTypeblob()org.apache.cassandra.spark.data.CqlField.NativeTypebool()org.apache.cassandra.spark.data.CqlTablebuildSchema(java.lang.String createStatement, java.lang.String keyspace)org.apache.cassandra.spark.data.CqlTablebuildSchema(java.lang.String createStatement, java.lang.String keyspace, org.apache.cassandra.spark.data.ReplicationFactor replicationFactor)org.apache.cassandra.spark.data.CqlTablebuildSchema(java.lang.String createStatement, java.lang.String keyspace, org.apache.cassandra.spark.data.ReplicationFactor replicationFactor, org.apache.cassandra.spark.data.partitioner.Partitioner partitioner)org.apache.cassandra.spark.data.CqlTablebuildSchema(java.lang.String createStatement, java.lang.String keyspace, org.apache.cassandra.spark.data.ReplicationFactor replicationFactor, org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.util.Set<java.lang.String> udts)abstract org.apache.cassandra.spark.data.CqlTablebuildSchema(java.lang.String createStatement, java.lang.String keyspace, org.apache.cassandra.spark.data.ReplicationFactor replicationFactor, org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.util.Set<java.lang.String> udts, java.util.UUID tableId, int indexCount, boolean enableCdc)abstract org.apache.cassandra.spark.data.CassandraTypescassandraTypes()org.apache.cassandra.spark.data.CqlField.CqlTypecollection(java.lang.String name, org.apache.cassandra.spark.data.CqlField.CqlType... types)java.nio.ByteBuffercompress(byte[] bytes)java.nio.ByteBuffercompress(java.nio.ByteBuffer input)abstract org.apache.cassandra.util.CompressionUtilcompressionUtil()abstract java.util.List<java.lang.Boolean>contains(org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.lang.String keyspace, java.lang.String table, org.apache.cassandra.spark.data.SSTable ssTable, java.util.List<java.nio.ByteBuffer> partitionKeys)org.apache.cassandra.spark.data.CqlField.NativeTypecounter()org.apache.cassandra.spark.data.CqlField.NativeTypedate()org.apache.cassandra.spark.data.CqlField.NativeTypedecimal()org.apache.cassandra.spark.data.CqlField.NativeTypeduration()org.apache.cassandra.spark.data.CqlField.NativeTypeempty()java.nio.ByteBufferencodePartitionKey(org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.lang.String keyspace, java.lang.String createTableStmt, java.util.List<java.lang.String> partitionKey)abstract java.util.List<java.nio.ByteBuffer>encodePartitionKeys(org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.lang.String keyspace, java.lang.String createTableStmt, java.util.List<java.util.List<java.lang.String>> partitionKeys)org.apache.cassandra.spark.data.CqlField.CqlTypefrozen(org.apache.cassandra.spark.data.CqlField.CqlType type)abstract org.apache.cassandra.spark.reader.StreamScanner<org.apache.cassandra.spark.reader.RowData>getCompactionScanner(org.apache.cassandra.spark.data.CqlTable table, org.apache.cassandra.spark.data.partitioner.Partitioner partitionerType, org.apache.cassandra.spark.data.SSTablesSupplier ssTables, org.apache.cassandra.spark.sparksql.filters.SparkRangeFilter sparkRangeFilter, java.util.Collection<org.apache.cassandra.spark.sparksql.filters.PartitionKeyFilter> partitionKeyFilters, org.apache.cassandra.spark.sparksql.filters.PruneColumnFilter columnFilter, org.apache.cassandra.spark.utils.TimeProvider timeProvider, boolean readIndexOffset, boolean useIncrementalRepair, org.apache.cassandra.analytics.stats.Stats stats)abstract java.util.AbstractMap.SimpleEntry<java.nio.ByteBuffer,java.math.BigInteger>getPartitionKey(org.apache.cassandra.spark.data.CqlTable table, org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.util.List<java.lang.String> keys)abstract org.apache.cassandra.spark.reader.StreamScanner<org.apache.cassandra.spark.reader.IndexEntry>getPartitionSizeIterator(org.apache.cassandra.spark.data.CqlTable table, org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, org.apache.cassandra.spark.data.SSTablesSupplier ssTables, org.apache.cassandra.spark.sparksql.filters.SparkRangeFilter rangeFilter, org.apache.cassandra.spark.utils.TimeProvider timeProvider, org.apache.cassandra.analytics.stats.Stats stats, java.util.concurrent.ExecutorService executor)abstract org.apache.cassandra.bridge.SSTableSummarygetSSTableSummary(java.lang.String keyspace, java.lang.String table, org.apache.cassandra.spark.data.SSTable ssTable)abstract org.apache.cassandra.bridge.SSTableSummarygetSSTableSummary(org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, org.apache.cassandra.spark.data.SSTable ssTable, int minIndexInterval, int maxIndexInterval)abstract org.apache.cassandra.bridge.SSTableWritergetSSTableWriter(java.lang.String inDirectory, java.lang.String partitioner, java.lang.String createStatement, java.lang.String insertStatement, java.util.Set<java.lang.String> userDefinedTypeStatements, int bufferSizeMB)abstract java.util.UUIDgetTimeUUID()abstract org.apache.cassandra.bridge.CassandraVersiongetVersion()abstract java.math.BigIntegerhash(org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.nio.ByteBuffer key)org.apache.cassandra.spark.data.CqlField.NativeTypeinet()<T> TjavaDeserialize(byte[] bytes, java.lang.Class<T> type)abstract <T> TjavaDeserialize(java.io.ObjectInputStream in, java.lang.Class<T> type)abstract voidjavaSerialize(java.io.ObjectOutputStream out, java.io.Serializable object)byte[]javaSerialize(java.io.Serializable object)abstract voidkryoRegister(com.esotericsoftware.kryo.Kryo kryo)abstract longlastRepairTime(java.lang.String keyspace, java.lang.String table, org.apache.cassandra.spark.data.SSTable ssTable)org.apache.cassandra.spark.data.CqlField.CqlListlist(org.apache.cassandra.spark.data.CqlField.CqlType type)org.apache.cassandra.spark.data.CqlField.CqlMapmap(org.apache.cassandra.spark.data.CqlField.CqlType keyType, org.apache.cassandra.spark.data.CqlField.CqlType valueType)java.lang.StringmaybeQuoteIdentifier(java.lang.String identifier)Returns the quoted identifier, if theidentifierhas mixed case or if theidentifieris a reserved word.org.apache.cassandra.spark.data.CqlField.NativeTypenativeType(java.lang.String name)java.util.Map<java.lang.String,? extends org.apache.cassandra.spark.data.CqlField.NativeType>nativeTypeNames()abstract BloomFilteropenBloomFilter(org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.lang.String keyspace, java.lang.String table, org.apache.cassandra.spark.data.SSTable ssTable)abstract java.util.List<java.lang.Boolean>overlaps(org.apache.cassandra.spark.data.SSTable ssTable, org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, int minIndexInterval, int maxIndexInterval, java.util.List<org.apache.cassandra.bridge.TokenRange> ranges)org.apache.cassandra.spark.data.CqlField.CqlTypeparseType(java.lang.String type)org.apache.cassandra.spark.data.CqlField.CqlTypeparseType(java.lang.String type, java.util.Map<java.lang.String,org.apache.cassandra.spark.data.CqlField.CqlUdt> udts)voidreadPartitionKeys(org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.lang.String keyspace, java.lang.String createStmt, java.util.Set<org.apache.cassandra.spark.data.SSTable> ssTables, java.util.function.Consumer<java.util.Map<java.lang.String,java.lang.Object>> rowConsumer)voidreadPartitionKeys(org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.lang.String keyspace, java.lang.String createStmt, java.util.Set<org.apache.cassandra.spark.data.SSTable> ssTables, org.apache.cassandra.bridge.TokenRange tokenRange, java.util.List<java.nio.ByteBuffer> partitionKeys, java.lang.String[] pruneColumnFilter, java.util.function.Consumer<java.util.Map<java.lang.String,java.lang.Object>> rowConsumer)abstract voidreadPartitionKeys(org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.lang.String keyspace, java.lang.String createStmt, org.apache.cassandra.spark.data.SSTablesSupplier ssTables, org.apache.cassandra.bridge.TokenRange tokenRange, java.util.List<java.nio.ByteBuffer> partitionKeys, java.lang.String[] pruneColumnFilter, java.util.function.Consumer<java.util.Map<java.lang.String,java.lang.Object>> rowConsumer)voidreadStringPartitionKeys(org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.lang.String keyspace, java.lang.String createStmt, java.util.Set<org.apache.cassandra.spark.data.SSTable> ssTables, java.util.function.Consumer<java.util.Map<java.lang.String,java.lang.Object>> rowConsumer)Convenience method around `readPartitionKeys` to accept partition keys as string values and encode with the correct types.voidreadStringPartitionKeys(org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.lang.String keyspace, java.lang.String createStmt, java.util.Set<org.apache.cassandra.spark.data.SSTable> ssTables, org.apache.cassandra.bridge.TokenRange tokenRange, java.util.List<java.util.List<java.lang.String>> partitionKeys, java.lang.String[] pruneColumnFilter, java.util.function.Consumer<java.util.Map<java.lang.String,java.lang.Object>> rowConsumer)Convenience method around `readPartitionKeys` to accept partition keys as string values and encode with the correct types.org.apache.cassandra.spark.data.CqlField.CqlTypereadType(org.apache.cassandra.spark.data.CqlField.CqlType.InternalType type, com.esotericsoftware.kryo.io.Input input)org.apache.cassandra.spark.data.CqlField.CqlSetset(org.apache.cassandra.spark.data.CqlField.CqlType type)org.apache.cassandra.spark.data.CqlField.NativeTypesmallint()abstract voidsstableToJson(java.nio.file.Path dataDbFile, java.io.OutputStream output)java.util.List<org.apache.cassandra.spark.data.CqlField.NativeType>supportedTypes()org.apache.cassandra.spark.data.CqlField.NativeTypetext()org.apache.cassandra.spark.data.CqlField.NativeTypetime()org.apache.cassandra.spark.data.CqlField.NativeTypetimestamp()org.apache.cassandra.spark.data.CqlField.NativeTypetimeuuid()org.apache.cassandra.spark.data.CqlField.NativeTypetinyint()abstract Tokenizertokenizer(org.apache.cassandra.spark.data.partitioner.Partitioner partitioner)java.util.List<java.math.BigInteger>toTokens(org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.lang.String keyspace, java.lang.String createTableStmt, java.util.List<java.util.List<java.lang.String>> partitionKeys)java.util.List<java.math.BigInteger>toTokens(org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.util.List<java.nio.ByteBuffer> partitionKeys)abstract java.lang.ObjecttoTupleValue(org.apache.cassandra.spark.data.CqlField.CqlTuple type, java.lang.Object[] values)abstract java.lang.ObjecttoUserTypeValue(org.apache.cassandra.spark.data.CqlField.CqlUdt type, java.util.Map<java.lang.String,java.lang.Object> values)org.apache.cassandra.spark.data.CqlField.CqlTupletuple(org.apache.cassandra.spark.data.CqlField.CqlType... types)org.apache.cassandra.spark.data.CqlField.CqlUdtBuilderudt(java.lang.String keyspace, java.lang.String name)java.nio.ByteBufferuncompress(byte[] bytes)java.nio.ByteBufferuncompress(java.nio.ByteBuffer input)org.apache.cassandra.spark.data.CqlField.NativeTypeuuid()org.apache.cassandra.spark.data.CqlField.NativeTypevarchar()org.apache.cassandra.spark.data.CqlField.NativeTypevarint()abstract voidwriteSSTable(org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.lang.String keyspace, java.lang.String table, java.nio.file.Path directory, java.lang.String createStatement, java.lang.String insertStatement, java.lang.String updateStatement, boolean upsert, java.util.Set<org.apache.cassandra.spark.data.CqlField.CqlUdt> udts, java.util.function.Consumer<CassandraBridge.Writer> writer)voidwriteSSTable(org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.lang.String keyspace, java.lang.String table, java.nio.file.Path directory, java.lang.String createStatement, java.lang.String insertStatement, java.util.function.Consumer<CassandraBridge.Writer> writer)abstract voidwriteTombstoneSSTable(org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.nio.file.Path directory, java.lang.String createStatement, java.lang.String deleteStatement, java.util.function.Consumer<CassandraBridge.Writer> writer)
-
-
-
Field Detail
-
IMPLEMENTATION_FQCN
public static final java.lang.String IMPLEMENTATION_FQCN
- See Also:
- Constant Field Values
-
-
Method Detail
-
cassandraTypes
public abstract org.apache.cassandra.spark.data.CassandraTypes cassandraTypes()
-
getPartitionKey
public abstract java.util.AbstractMap.SimpleEntry<java.nio.ByteBuffer,java.math.BigInteger> getPartitionKey(@NotNull org.apache.cassandra.spark.data.CqlTable table, @NotNull org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, @NotNull java.util.List<java.lang.String> keys)
-
getCompactionScanner
public abstract org.apache.cassandra.spark.reader.StreamScanner<org.apache.cassandra.spark.reader.RowData> getCompactionScanner(@NotNull org.apache.cassandra.spark.data.CqlTable table, @NotNull org.apache.cassandra.spark.data.partitioner.Partitioner partitionerType, @NotNull org.apache.cassandra.spark.data.SSTablesSupplier ssTables, @Nullable org.apache.cassandra.spark.sparksql.filters.SparkRangeFilter sparkRangeFilter, @NotNull java.util.Collection<org.apache.cassandra.spark.sparksql.filters.PartitionKeyFilter> partitionKeyFilters, @Nullable org.apache.cassandra.spark.sparksql.filters.PruneColumnFilter columnFilter, @NotNull org.apache.cassandra.spark.utils.TimeProvider timeProvider, boolean readIndexOffset, boolean useIncrementalRepair, @NotNull org.apache.cassandra.analytics.stats.Stats stats)
-
getPartitionSizeIterator
public abstract org.apache.cassandra.spark.reader.StreamScanner<org.apache.cassandra.spark.reader.IndexEntry> getPartitionSizeIterator(@NotNull org.apache.cassandra.spark.data.CqlTable table, @NotNull org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, @NotNull org.apache.cassandra.spark.data.SSTablesSupplier ssTables, @Nullable org.apache.cassandra.spark.sparksql.filters.SparkRangeFilter rangeFilter, @NotNull org.apache.cassandra.spark.utils.TimeProvider timeProvider, @NotNull org.apache.cassandra.analytics.stats.Stats stats, @NotNull java.util.concurrent.ExecutorService executor)
-
getVersion
public abstract org.apache.cassandra.bridge.CassandraVersion getVersion()
-
hash
public abstract java.math.BigInteger hash(org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.nio.ByteBuffer key)
-
getTimeUUID
public abstract java.util.UUID getTimeUUID()
-
buildSchema
public org.apache.cassandra.spark.data.CqlTable buildSchema(java.lang.String createStatement, java.lang.String keyspace)
-
buildSchema
public org.apache.cassandra.spark.data.CqlTable buildSchema(java.lang.String createStatement, java.lang.String keyspace, org.apache.cassandra.spark.data.ReplicationFactor replicationFactor)
-
buildSchema
public org.apache.cassandra.spark.data.CqlTable buildSchema(java.lang.String createStatement, java.lang.String keyspace, org.apache.cassandra.spark.data.ReplicationFactor replicationFactor, org.apache.cassandra.spark.data.partitioner.Partitioner partitioner)
-
buildSchema
public org.apache.cassandra.spark.data.CqlTable buildSchema(java.lang.String createStatement, java.lang.String keyspace, org.apache.cassandra.spark.data.ReplicationFactor replicationFactor, org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.util.Set<java.lang.String> udts)
-
buildSchema
public abstract org.apache.cassandra.spark.data.CqlTable buildSchema(java.lang.String createStatement, java.lang.String keyspace, org.apache.cassandra.spark.data.ReplicationFactor replicationFactor, org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.util.Set<java.lang.String> udts, @Nullable java.util.UUID tableId, int indexCount, boolean enableCdc)
-
maybeQuoteIdentifier
public java.lang.String maybeQuoteIdentifier(java.lang.String identifier)
Returns the quoted identifier, if theidentifierhas mixed case or if theidentifieris a reserved word.- Parameters:
identifier- the identifier- Returns:
- the quoted identifier when the input is mixed case or a reserved word, the original input otherwise
-
readType
public org.apache.cassandra.spark.data.CqlField.CqlType readType(org.apache.cassandra.spark.data.CqlField.CqlType.InternalType type, com.esotericsoftware.kryo.io.Input input)
-
allTypes
public java.util.List<org.apache.cassandra.spark.data.CqlField.NativeType> allTypes()
-
nativeTypeNames
public java.util.Map<java.lang.String,? extends org.apache.cassandra.spark.data.CqlField.NativeType> nativeTypeNames()
-
nativeType
public org.apache.cassandra.spark.data.CqlField.NativeType nativeType(java.lang.String name)
-
supportedTypes
public java.util.List<org.apache.cassandra.spark.data.CqlField.NativeType> supportedTypes()
-
ascii
public org.apache.cassandra.spark.data.CqlField.NativeType ascii()
-
blob
public org.apache.cassandra.spark.data.CqlField.NativeType blob()
-
bool
public org.apache.cassandra.spark.data.CqlField.NativeType bool()
-
counter
public org.apache.cassandra.spark.data.CqlField.NativeType counter()
-
bigint
public org.apache.cassandra.spark.data.CqlField.NativeType bigint()
-
date
public org.apache.cassandra.spark.data.CqlField.NativeType date()
-
decimal
public org.apache.cassandra.spark.data.CqlField.NativeType decimal()
-
aDouble
public org.apache.cassandra.spark.data.CqlField.NativeType aDouble()
-
duration
public org.apache.cassandra.spark.data.CqlField.NativeType duration()
-
empty
public org.apache.cassandra.spark.data.CqlField.NativeType empty()
-
aFloat
public org.apache.cassandra.spark.data.CqlField.NativeType aFloat()
-
inet
public org.apache.cassandra.spark.data.CqlField.NativeType inet()
-
aInt
public org.apache.cassandra.spark.data.CqlField.NativeType aInt()
-
smallint
public org.apache.cassandra.spark.data.CqlField.NativeType smallint()
-
text
public org.apache.cassandra.spark.data.CqlField.NativeType text()
-
time
public org.apache.cassandra.spark.data.CqlField.NativeType time()
-
timestamp
public org.apache.cassandra.spark.data.CqlField.NativeType timestamp()
-
timeuuid
public org.apache.cassandra.spark.data.CqlField.NativeType timeuuid()
-
tinyint
public org.apache.cassandra.spark.data.CqlField.NativeType tinyint()
-
uuid
public org.apache.cassandra.spark.data.CqlField.NativeType uuid()
-
varchar
public org.apache.cassandra.spark.data.CqlField.NativeType varchar()
-
varint
public org.apache.cassandra.spark.data.CqlField.NativeType varint()
-
collection
public org.apache.cassandra.spark.data.CqlField.CqlType collection(java.lang.String name, org.apache.cassandra.spark.data.CqlField.CqlType... types)
-
list
public org.apache.cassandra.spark.data.CqlField.CqlList list(org.apache.cassandra.spark.data.CqlField.CqlType type)
-
set
public org.apache.cassandra.spark.data.CqlField.CqlSet set(org.apache.cassandra.spark.data.CqlField.CqlType type)
-
map
public org.apache.cassandra.spark.data.CqlField.CqlMap map(org.apache.cassandra.spark.data.CqlField.CqlType keyType, org.apache.cassandra.spark.data.CqlField.CqlType valueType)
-
tuple
public org.apache.cassandra.spark.data.CqlField.CqlTuple tuple(org.apache.cassandra.spark.data.CqlField.CqlType... types)
-
frozen
public org.apache.cassandra.spark.data.CqlField.CqlType frozen(org.apache.cassandra.spark.data.CqlField.CqlType type)
-
udt
public org.apache.cassandra.spark.data.CqlField.CqlUdtBuilder udt(java.lang.String keyspace, java.lang.String name)
-
parseType
public org.apache.cassandra.spark.data.CqlField.CqlType parseType(java.lang.String type)
-
parseType
public org.apache.cassandra.spark.data.CqlField.CqlType parseType(java.lang.String type, java.util.Map<java.lang.String,org.apache.cassandra.spark.data.CqlField.CqlUdt> udts)
-
writeSSTable
public void writeSSTable(org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.lang.String keyspace, java.lang.String table, java.nio.file.Path directory, java.lang.String createStatement, java.lang.String insertStatement, java.util.function.Consumer<CassandraBridge.Writer> writer)
-
writeSSTable
public abstract void writeSSTable(org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.lang.String keyspace, java.lang.String table, java.nio.file.Path directory, java.lang.String createStatement, java.lang.String insertStatement, java.lang.String updateStatement, boolean upsert, java.util.Set<org.apache.cassandra.spark.data.CqlField.CqlUdt> udts, java.util.function.Consumer<CassandraBridge.Writer> writer)
-
getSSTableWriter
public abstract org.apache.cassandra.bridge.SSTableWriter getSSTableWriter(java.lang.String inDirectory, java.lang.String partitioner, java.lang.String createStatement, java.lang.String insertStatement, java.util.Set<java.lang.String> userDefinedTypeStatements, int bufferSizeMB)
-
getSSTableSummary
public abstract org.apache.cassandra.bridge.SSTableSummary getSSTableSummary(@NotNull java.lang.String keyspace, @NotNull java.lang.String table, @NotNull org.apache.cassandra.spark.data.SSTable ssTable)
-
getSSTableSummary
public abstract org.apache.cassandra.bridge.SSTableSummary getSSTableSummary(@NotNull org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, @NotNull org.apache.cassandra.spark.data.SSTable ssTable, int minIndexInterval, int maxIndexInterval)
-
writeTombstoneSSTable
public abstract void writeTombstoneSSTable(org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, java.nio.file.Path directory, java.lang.String createStatement, java.lang.String deleteStatement, java.util.function.Consumer<CassandraBridge.Writer> writer)
-
sstableToJson
public abstract void sstableToJson(java.nio.file.Path dataDbFile, java.io.OutputStream output) throws java.io.FileNotFoundException- Throws:
java.io.FileNotFoundException
-
toTupleValue
public abstract java.lang.Object toTupleValue(org.apache.cassandra.spark.data.CqlField.CqlTuple type, java.lang.Object[] values)
-
toUserTypeValue
public abstract java.lang.Object toUserTypeValue(org.apache.cassandra.spark.data.CqlField.CqlUdt type, java.util.Map<java.lang.String,java.lang.Object> values)
-
compress
public java.nio.ByteBuffer compress(byte[] bytes) throws java.io.IOException- Throws:
java.io.IOException
-
compress
public java.nio.ByteBuffer compress(java.nio.ByteBuffer input) throws java.io.IOException- Throws:
java.io.IOException
-
uncompress
public java.nio.ByteBuffer uncompress(byte[] bytes) throws java.io.IOException- Throws:
java.io.IOException
-
uncompress
public java.nio.ByteBuffer uncompress(java.nio.ByteBuffer input) throws java.io.IOException- Throws:
java.io.IOException
-
compressionUtil
public abstract org.apache.cassandra.util.CompressionUtil compressionUtil()
-
lastRepairTime
public abstract long lastRepairTime(@NotNull java.lang.String keyspace, @NotNull java.lang.String table, @NotNull org.apache.cassandra.spark.data.SSTable ssTable) throws java.io.IOException- Parameters:
keyspace- keyspace nametable- table namessTable- SSTable instance- Returns:
- last repair time for a given SSTable by reading the Statistics.db file.
- Throws:
java.io.IOException
-
overlaps
public abstract java.util.List<java.lang.Boolean> overlaps(@NotNull org.apache.cassandra.spark.data.SSTable ssTable, @NotNull org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, int minIndexInterval, int maxIndexInterval, @NotNull java.util.List<org.apache.cassandra.bridge.TokenRange> ranges) throws java.io.IOException- Parameters:
ssTable- SSTable instanceminIndexInterval- minIndexInterval configured in the TableMetaDatapartitioner- Cassandra partitionermaxIndexInterval- maxIndexInterval configured in the TableMetadataranges- a list of token ranges- Returns:
- a list boolean value if corresponding token range in `ranges` list parameter overlaps with the SSTable. The SSTable may or may not contain data for the range.
- Throws:
java.io.IOException
-
toTokens
public java.util.List<java.math.BigInteger> toTokens(@NotNull org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, @NotNull java.lang.String keyspace, @NotNull java.lang.String createTableStmt, @NotNull java.util.List<java.util.List<java.lang.String>> partitionKeys)- Parameters:
partitioner- Cassandra partitionerkeyspace- Cassandra keyspacecreateTableStmt- CQL table create statementpartitionKeys- list of- Returns:
- list of tokens corresponding to each input `partitionKeys`
-
toTokens
public java.util.List<java.math.BigInteger> toTokens(@NotNull org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, @NotNull java.util.List<java.nio.ByteBuffer> partitionKeys)- Parameters:
partitioner- Cassandra partitionerpartitionKeys- list of encoded partition keys- Returns:
- list of tokens corresponding to each input `partitionKeys`
-
tokenizer
public abstract Tokenizer tokenizer(@NotNull org.apache.cassandra.spark.data.partitioner.Partitioner partitioner)
- Parameters:
partitioner- Cassandra partitioner- Returns:
- a Tokenizer instance for the provided Partitioner that maps a partition key to the token.
-
encodePartitionKey
public java.nio.ByteBuffer encodePartitionKey(@NotNull org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, @NotNull java.lang.String keyspace, @NotNull java.lang.String createTableStmt, @NotNull java.util.List<java.lang.String> partitionKey)- Parameters:
partitioner- Cassandra partitionerkeyspace- keyspace namecreateTableStmt- CQL create table statementpartitionKey- partition key- Returns:
- encoded ByteBuffer for the input `partitionKey`
-
encodePartitionKeys
public abstract java.util.List<java.nio.ByteBuffer> encodePartitionKeys(@NotNull org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, @NotNull java.lang.String keyspace, @NotNull java.lang.String createTableStmt, @NotNull java.util.List<java.util.List<java.lang.String>> partitionKeys)- Parameters:
partitioner- Cassandra partitionerkeyspace- keyspace namecreateTableStmt- CQL create table statementpartitionKeys- list of partition keys- Returns:
- a list encoded ByteBuffers corresponding to the partition keys input in `partitionKeys`
-
openBloomFilter
public abstract BloomFilter openBloomFilter(@NotNull org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, @NotNull java.lang.String keyspace, @NotNull java.lang.String table, @NotNull org.apache.cassandra.spark.data.SSTable ssTable) throws java.io.IOException
- Parameters:
partitioner- Cassandra partitionerkeyspace- keyspace nametable- table namessTable- SSTable instance- Returns:
- version independent BloomFilter instance to answer if SSTable might contain a partition key (might return false-positives but never false-negatives)
- Throws:
java.io.IOException
-
contains
public abstract java.util.List<java.lang.Boolean> contains(@NotNull org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, @NotNull java.lang.String keyspace, @NotNull java.lang.String table, @NotNull org.apache.cassandra.spark.data.SSTable ssTable, @NotNull java.util.List<java.nio.ByteBuffer> partitionKeys) throws java.io.IOException- Parameters:
partitioner- Cassandra partitionerkeyspace- keyspace nametable- table namessTable- SSTable instancepartitionKeys- list of partition keys- Returns:
- list of booleans returning true if an SSTable contains a partition key, corresponding to the partition keys input in `partitionKeys`.
- Throws:
java.io.IOException
-
readStringPartitionKeys
public void readStringPartitionKeys(@NotNull org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, @NotNull java.lang.String keyspace, @NotNull java.lang.String createStmt, @NotNull java.util.Set<org.apache.cassandra.spark.data.SSTable> ssTables, @NotNull java.util.function.Consumer<java.util.Map<java.lang.String,java.lang.Object>> rowConsumer) throws java.io.IOExceptionConvenience method around `readPartitionKeys` to accept partition keys as string values and encode with the correct types.- Parameters:
partitioner- Cassandra partitionerkeyspace- keyspace namecreateStmt- create table CQL statementssTables- set of SSTables to readrowConsumer- Consumer interface to consume rows as they are read to avoid buffering all rows in memory for consumption.- Throws:
java.io.IOException
-
readStringPartitionKeys
public void readStringPartitionKeys(@NotNull org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, @NotNull java.lang.String keyspace, @NotNull java.lang.String createStmt, @NotNull java.util.Set<org.apache.cassandra.spark.data.SSTable> ssTables, @Nullable org.apache.cassandra.bridge.TokenRange tokenRange, @Nullable java.util.List<java.util.List<java.lang.String>> partitionKeys, @Nullable java.lang.String[] pruneColumnFilter, @NotNull java.util.function.Consumer<java.util.Map<java.lang.String,java.lang.Object>> rowConsumer) throws java.io.IOExceptionConvenience method around `readPartitionKeys` to accept partition keys as string values and encode with the correct types.- Parameters:
partitioner- Cassandra partitionerkeyspace- keyspace namecreateStmt- create table CQL statementssTables- set of SSTables to readtokenRange- optional token range to limit the bulk read to a restricted token range.partitionKeys- list of partition keys, if more than one partition keys they must be correctly ordered in the inner list.pruneColumnFilter- optional filter to select a subset of columns, this can offer performance improvement if skipping over large blobs or columns.rowConsumer- Consumer interface to consume rows as they are read to avoid buffering all rows in memory for consumption.- Throws:
java.io.IOException
-
readPartitionKeys
public void readPartitionKeys(@NotNull org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, @NotNull java.lang.String keyspace, @NotNull java.lang.String createStmt, @NotNull java.util.Set<org.apache.cassandra.spark.data.SSTable> ssTables, @NotNull java.util.function.Consumer<java.util.Map<java.lang.String,java.lang.Object>> rowConsumer) throws java.io.IOException- Throws:
java.io.IOException
-
readPartitionKeys
public void readPartitionKeys(@NotNull org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, @NotNull java.lang.String keyspace, @NotNull java.lang.String createStmt, @NotNull java.util.Set<org.apache.cassandra.spark.data.SSTable> ssTables, @Nullable org.apache.cassandra.bridge.TokenRange tokenRange, @Nullable java.util.List<java.nio.ByteBuffer> partitionKeys, @Nullable java.lang.String[] pruneColumnFilter, @NotNull java.util.function.Consumer<java.util.Map<java.lang.String,java.lang.Object>> rowConsumer) throws java.io.IOException- Throws:
java.io.IOException
-
readPartitionKeys
public abstract void readPartitionKeys(@NotNull org.apache.cassandra.spark.data.partitioner.Partitioner partitioner, @NotNull java.lang.String keyspace, @NotNull java.lang.String createStmt, @NotNull org.apache.cassandra.spark.data.SSTablesSupplier ssTables, @Nullable org.apache.cassandra.bridge.TokenRange tokenRange, @Nullable java.util.List<java.nio.ByteBuffer> partitionKeys, @Nullable java.lang.String[] pruneColumnFilter, @NotNull java.util.function.Consumer<java.util.Map<java.lang.String,java.lang.Object>> rowConsumer) throws java.io.IOException- Throws:
java.io.IOException
-
kryoRegister
public abstract void kryoRegister(com.esotericsoftware.kryo.Kryo kryo)
-
javaSerialize
public abstract void javaSerialize(java.io.ObjectOutputStream out, java.io.Serializable object)
-
javaDeserialize
public abstract <T> T javaDeserialize(java.io.ObjectInputStream in, java.lang.Class<T> type)
-
javaSerialize
public byte[] javaSerialize(java.io.Serializable object)
-
javaDeserialize
public <T> T javaDeserialize(byte[] bytes, java.lang.Class<T> type)
-
-