public static class CarbonVectorProxy.ColumnVectorProxy
extends org.apache.spark.sql.execution.vectorized.ColumnVector
| Constructor and Description |
|---|
ColumnVectorProxy(org.apache.spark.sql.execution.vectorized.ColumnVector columnVector,
int capacity,
org.apache.spark.memory.MemoryMode mode) |
| Modifier and Type | Method and Description |
|---|---|
void |
close() |
org.apache.spark.sql.types.DataType |
dataType(int ordinal) |
int |
getArrayLength(int rowId) |
int |
getArrayOffset(int rowId) |
boolean |
getBoolean(int i) |
byte |
getByte(int i) |
int |
getDictId(int rowId) |
double |
getDouble(int i) |
float |
getFloat(int i) |
int |
getInt(int i) |
long |
getLong(int i) |
short |
getShort(int i) |
org.apache.spark.sql.execution.vectorized.ColumnVector |
getVector() |
boolean |
hasDictionary() |
boolean |
isNullAt(int i) |
void |
loadBytes(org.apache.spark.sql.execution.vectorized.ColumnVector.Array array) |
long |
nullsNativeAddress() |
void |
putAllByteArray(byte[] data,
int offset,
int length)
It keeps all binary data of all rows to it.
|
void |
putArray(int rowId,
int offset,
int length) |
void |
putBoolean(int rowId,
boolean value) |
void |
putBooleans(int rowId,
int count,
boolean value) |
void |
putByte(int rowId,
byte value) |
int |
putByteArray(int rowId,
byte[] value,
int offset,
int count) |
void |
putBytes(int rowId,
int count,
byte value) |
void |
putBytes(int rowId,
int count,
byte[] src,
int srcIndex) |
void |
putDictionaryInt(int rowId,
int value) |
void |
putDouble(int rowId,
double value) |
void |
putDoubles(int rowId,
int count,
byte[] src,
int srcIndex) |
void |
putDoubles(int rowId,
int count,
double value) |
void |
putDoubles(int rowId,
int count,
double[] src,
int srcIndex) |
void |
putFloat(int rowId,
float value) |
void |
putFloats(int rowId,
int count,
byte[] src,
int srcIndex) |
void |
putFloats(int rowId,
int count,
float value) |
void |
putFloats(int rowId,
int count,
float[] src,
int srcIndex) |
void |
putInt(int rowId,
int value) |
void |
putInts(int rowId,
int count,
int value) |
void |
putInts(int rowId,
int count,
int[] src,
int srcIndex) |
void |
putIntsLittleEndian(int rowId,
int count,
byte[] src,
int srcIndex) |
void |
putLong(int rowId,
long value) |
void |
putLongs(int rowId,
int count,
long value) |
void |
putLongs(int rowId,
int count,
long[] src,
int srcIndex) |
void |
putLongsLittleEndian(int rowId,
int count,
byte[] src,
int srcIndex) |
void |
putNotNull(int rowId) |
void |
putNotNulls(int rowId,
int count) |
void |
putNull(int rowId) |
void |
putNulls(int rowId,
int count) |
void |
putRowToColumnBatch(int rowId,
Object value) |
void |
putShort(int rowId,
short value) |
void |
putShorts(int rowId,
int count,
short value) |
void |
putShorts(int rowId,
int count,
short[] src,
int srcIndex) |
void |
reserve(int requiredCapacity) |
org.apache.spark.sql.execution.vectorized.ColumnVector |
reserveDictionaryIds(int capacity) |
protected void |
reserveInternal(int capacity) |
void |
reset() |
void |
setDictionary(org.apache.carbondata.core.scan.result.vector.CarbonDictionary dictionary) |
void |
setLazyPage(org.apache.carbondata.core.scan.scanner.LazyPageLoader lazyPage) |
long |
valuesNativeAddress() |
allocate, anyNullsSet, appendArray, appendBoolean, appendBooleans, appendByte, appendByteArray, appendBytes, appendBytes, appendDouble, appendDoubles, appendDoubles, appendFloat, appendFloats, appendInt, appendInts, appendInts, appendLong, appendLongs, appendLongs, appendNotNull, appendNotNulls, appendNull, appendNulls, appendShort, appendShorts, appendShorts, appendStruct, arrayData, dataType, getArray, getBinary, getChildColumn, getDecimal, getDictionaryIds, getElementsAppended, getMap, getStruct, getStruct, getUTF8String, isArray, numNulls, putByteArray, putDecimal, setDictionary, setIsConstantpublic ColumnVectorProxy(org.apache.spark.sql.execution.vectorized.ColumnVector columnVector,
int capacity,
org.apache.spark.memory.MemoryMode mode)
public void putRowToColumnBatch(int rowId,
Object value)
public void putBoolean(int rowId,
boolean value)
putBoolean in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putByte(int rowId,
byte value)
putByte in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putBytes(int rowId,
int count,
byte[] src,
int srcIndex)
putBytes in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putShort(int rowId,
short value)
putShort in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putInt(int rowId,
int value)
putInt in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putFloat(int rowId,
float value)
putFloat in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putFloats(int rowId,
int count,
float[] src,
int srcIndex)
putFloats in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putLong(int rowId,
long value)
putLong in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putDouble(int rowId,
double value)
putDouble in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putInts(int rowId,
int count,
int value)
putInts in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putInts(int rowId,
int count,
int[] src,
int srcIndex)
putInts in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putShorts(int rowId,
int count,
short value)
putShorts in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putShorts(int rowId,
int count,
short[] src,
int srcIndex)
putShorts in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putLongs(int rowId,
int count,
long value)
putLongs in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putLongs(int rowId,
int count,
long[] src,
int srcIndex)
putLongs in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putDoubles(int rowId,
int count,
double value)
putDoubles in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putDoubles(int rowId,
int count,
double[] src,
int srcIndex)
putDoubles in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic org.apache.spark.sql.types.DataType dataType(int ordinal)
public void putNotNull(int rowId)
putNotNull in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putNotNulls(int rowId,
int count)
putNotNulls in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putDictionaryInt(int rowId,
int value)
public void setDictionary(org.apache.carbondata.core.scan.result.vector.CarbonDictionary dictionary)
public void putNull(int rowId)
putNull in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putNulls(int rowId,
int count)
putNulls in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic boolean hasDictionary()
hasDictionary in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic org.apache.spark.sql.execution.vectorized.ColumnVector reserveDictionaryIds(int capacity)
reserveDictionaryIds in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic boolean isNullAt(int i)
isNullAt in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic boolean getBoolean(int i)
getBoolean in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic byte getByte(int i)
getByte in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic short getShort(int i)
getShort in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic int getInt(int i)
getInt in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic long getLong(int i)
getLong in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic float getFloat(int i)
getFloat in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic double getDouble(int i)
getDouble in class org.apache.spark.sql.execution.vectorized.ColumnVectorprotected void reserveInternal(int capacity)
reserveInternal in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void reserve(int requiredCapacity)
reserve in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic long nullsNativeAddress()
nullsNativeAddress in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic long valuesNativeAddress()
valuesNativeAddress in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putBooleans(int rowId,
int count,
boolean value)
putBooleans in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putBytes(int rowId,
int count,
byte value)
putBytes in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putIntsLittleEndian(int rowId,
int count,
byte[] src,
int srcIndex)
putIntsLittleEndian in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic int getDictId(int rowId)
getDictId in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putLongsLittleEndian(int rowId,
int count,
byte[] src,
int srcIndex)
putLongsLittleEndian in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putFloats(int rowId,
int count,
float value)
putFloats in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putFloats(int rowId,
int count,
byte[] src,
int srcIndex)
putFloats in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putDoubles(int rowId,
int count,
byte[] src,
int srcIndex)
putDoubles in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putArray(int rowId,
int offset,
int length)
putArray in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic int getArrayLength(int rowId)
getArrayLength in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic int getArrayOffset(int rowId)
getArrayOffset in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void loadBytes(org.apache.spark.sql.execution.vectorized.ColumnVector.Array array)
loadBytes in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic int putByteArray(int rowId,
byte[] value,
int offset,
int count)
putByteArray in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void putAllByteArray(byte[] data,
int offset,
int length)
public void close()
close in interface AutoCloseableclose in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void reset()
reset in class org.apache.spark.sql.execution.vectorized.ColumnVectorpublic void setLazyPage(org.apache.carbondata.core.scan.scanner.LazyPageLoader lazyPage)
public org.apache.spark.sql.execution.vectorized.ColumnVector getVector()
Copyright © 2016–2019 The Apache Software Foundation. All rights reserved.