|
||||||||||
PREV CLASS NEXT CLASS | FRAMES NO FRAMES | |||||||||
SUMMARY: NESTED | FIELD | CONSTR | METHOD | DETAIL: FIELD | CONSTR | METHOD |
java.lang.Objectorg.apache.hadoop.hive.ql.exec.Utilities
public final class Utilities
Utilities.
Nested Class Summary | |
---|---|
static class |
Utilities.CollectionPersistenceDelegate
|
static class |
Utilities.EnumDelegate
Java 1.5 workaround. |
static class |
Utilities.ListDelegate
|
static class |
Utilities.MapDelegate
|
static class |
Utilities.ReduceField
ReduceField. |
static class |
Utilities.SetDelegate
|
static class |
Utilities.SQLCommand<T>
|
static class |
Utilities.StreamPrinter
StreamPrinter. |
static class |
Utilities.StreamStatus
StreamStatus. |
static class |
Utilities.Tuple<T,V>
Tuple. |
Field Summary | |
---|---|
static int |
carriageReturnCode
|
static int |
ctrlaCode
|
static TableDesc |
defaultTd
|
static Object |
getInputSummaryLock
|
static String |
HADOOP_LOCAL_FS
The object in the reducer are composed of these top level fields. |
static String |
INDENT
|
static int |
newLineCode
|
static String |
NSTR
|
static String |
nullStringOutput
|
static String |
nullStringStorage
|
static Random |
randGen
|
static char |
sqlEscapeChar
|
static String |
suffix
|
static int |
tabCode
|
Method Summary | ||
---|---|---|
static String |
abbreviate(String str,
int max)
convert "From src insert blah blah" to "From src insert ... |
|
static void |
addMapWork(MapredWork mr,
Table tbl,
String alias,
Operator<?> work)
|
|
static ClassLoader |
addToClassPath(ClassLoader cloader,
String[] newPaths)
Add new elements to the classpath. |
|
static String |
checkJDOPushDown(Table tab,
ExprNodeDesc expr)
Check if the partition pruning expression can be pushed down to JDO filtering. |
|
static void |
clearMapRedWork(org.apache.hadoop.conf.Configuration job)
|
|
static Connection |
connectWithRetry(String connectionString,
int waitWindow,
int maxRetries)
Retry connecting to a database with random backoff (same as the one implemented in HDFS-767). |
|
static boolean |
contentsEqual(InputStream is1,
InputStream is2,
boolean ignoreWhitespace)
|
|
static void |
copyTableJobPropertiesToConf(TableDesc tbl,
org.apache.hadoop.mapred.JobConf job)
Copies the storage handler properties configured for a table descriptor to a runtime job configuration. |
|
static OutputStream |
createCompressedStream(org.apache.hadoop.mapred.JobConf jc,
OutputStream out)
Convert an output stream to a compressed output stream based on codecs and compression options specified in the Job Configuration. |
|
static OutputStream |
createCompressedStream(org.apache.hadoop.mapred.JobConf jc,
OutputStream out,
boolean isCompressed)
Convert an output stream to a compressed output stream based on codecs codecs in the Job Configuration. |
|
static RCFile.Writer |
createRCFileWriter(org.apache.hadoop.mapred.JobConf jc,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path file,
boolean isCompressed)
Create a RCFile output stream based on job configuration Uses user supplied compression flag (rather than obtaining it from the Job Configuration). |
|
static org.apache.hadoop.io.SequenceFile.Writer |
createSequenceWriter(org.apache.hadoop.mapred.JobConf jc,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path file,
Class<?> keyClass,
Class<?> valClass)
Create a sequencefile output stream based on job configuration. |
|
static org.apache.hadoop.io.SequenceFile.Writer |
createSequenceWriter(org.apache.hadoop.mapred.JobConf jc,
org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path file,
Class<?> keyClass,
Class<?> valClass,
boolean isCompressed)
Create a sequencefile output stream based on job configuration Uses user supplied compression flag (rather than obtaining it from the Job Configuration). |
|
static ExprNodeDesc |
deserializeExpression(String s,
org.apache.hadoop.conf.Configuration conf)
|
|
static MapredLocalWork |
deserializeMapRedLocalWork(InputStream in,
org.apache.hadoop.conf.Configuration conf)
|
|
static MapredWork |
deserializeMapRedWork(InputStream in,
org.apache.hadoop.conf.Configuration conf)
|
|
static QueryPlan |
deserializeQueryPlan(InputStream in,
org.apache.hadoop.conf.Configuration conf)
Deserialize the whole query plan. |
|
static String |
escapeSqlLike(String key)
Escape the '_', '%', as well as the escape characters inside the string key. |
|
static
|
executeWithRetry(Utilities.SQLCommand<T> cmd,
PreparedStatement stmt,
int baseWindow,
int maxRetries)
Retry SQL execution with random backoff (same as the one implemented in HDFS-767). |
|
static String |
formatBinaryString(byte[] array,
int start,
int length)
|
|
static String |
formatMsecToStr(long msec)
Format number of milliseconds to strings |
|
static String |
generateFileName(Byte tag,
String bigBucketFileName)
|
|
static String |
generatePath(org.apache.hadoop.fs.Path baseURI,
String filename)
|
|
static String |
generatePath(String baseURI,
String dumpFilePrefix,
Byte tag,
String bigBucketFileName)
|
|
static String |
generateTarFileName(String name)
|
|
static String |
generateTarURI(org.apache.hadoop.fs.Path baseURI,
String filename)
|
|
static String |
generateTarURI(String baseURI,
String filename)
|
|
static String |
generateTmpURI(String baseURI,
String id)
|
|
static Class |
getBuiltinUtilsClass()
|
|
static List<String> |
getColumnNames(Properties props)
|
|
static List<String> |
getColumnNamesFromFieldSchema(List<FieldSchema> partCols)
|
|
static List<String> |
getColumnNamesFromSortCols(List<Order> sortCols)
|
|
static List<String> |
getColumnTypes(Properties props)
|
|
static int |
getDefaultNotificationInterval(org.apache.hadoop.conf.Configuration hconf)
Gets the default notification interval to send progress updates to the tracker. |
|
static List<String> |
getFieldSchemaString(List<FieldSchema> fl)
|
|
static String |
getFileExtension(org.apache.hadoop.mapred.JobConf jc,
boolean isCompressed)
Deprecated. Use getFileExtension(JobConf, boolean, HiveOutputFormat) |
|
static String |
getFileExtension(org.apache.hadoop.mapred.JobConf jc,
boolean isCompressed,
HiveOutputFormat<?,?> hiveOutputFormat)
Based on compression option, output format, and configured output codec - get extension for output file. |
|
static String |
getFileNameFromDirName(String dirName)
|
|
static org.apache.hadoop.fs.FileStatus[] |
getFileStatusRecurse(org.apache.hadoop.fs.Path path,
int level,
org.apache.hadoop.fs.FileSystem fs)
Get all file status from a root path and recursively go deep into certain levels. |
|
static List<LinkedHashMap<String,String>> |
getFullDPSpecs(org.apache.hadoop.conf.Configuration conf,
DynamicPartitionCtx dpCtx)
Construct a list of full partition spec from Dynamic Partition Context and the directory names corresponding to these dynamic partitions. |
|
static String |
getHiveJobID(org.apache.hadoop.conf.Configuration job)
|
|
static org.apache.hadoop.fs.ContentSummary |
getInputSummary(Context ctx,
MapredWork work,
org.apache.hadoop.fs.PathFilter filter)
Calculate the total size of input files. |
|
static MapredWork |
getMapRedWork(org.apache.hadoop.conf.Configuration job)
|
|
static List<ExecDriver> |
getMRTasks(List<Task<? extends Serializable>> tasks)
|
|
static String |
getNameMessage(Exception e)
|
|
static String |
getOpTreeSkel(Operator<?> op)
|
|
static PartitionDesc |
getPartitionDesc(Partition part)
|
|
static PartitionDesc |
getPartitionDescFromTableDesc(TableDesc tblDesc,
Partition part)
|
|
static String |
getPrefixedTaskIdFromFilename(String filename)
Get the part-spec + task id from the filename. |
|
static long |
getRandomWaitTime(int baseWindow,
int failures,
Random r)
Introducing a random factor to the wait time before another retry. |
|
static String |
getResourceFiles(org.apache.hadoop.conf.Configuration conf,
SessionState.ResourceType t)
|
|
static StatsPublisher |
getStatsPublisher(org.apache.hadoop.mapred.JobConf jc)
|
|
static TableDesc |
getTableDesc(String cols,
String colTypes)
|
|
static TableDesc |
getTableDesc(Table tbl)
|
|
static String |
getTaskId(org.apache.hadoop.conf.Configuration hconf)
Gets the task id if we are running as a Hadoop job. |
|
static String |
getTaskIdFromFilename(String filename)
Get the task id from the filename. |
|
static boolean |
isEmptyPath(org.apache.hadoop.mapred.JobConf job,
org.apache.hadoop.fs.Path dirPath)
|
|
static boolean |
isEmptyPath(org.apache.hadoop.mapred.JobConf job,
String dirPath,
Context ctx)
|
|
static boolean |
isTempPath(org.apache.hadoop.fs.FileStatus file)
Detect if the supplied file is a temporary path. |
|
static ArrayList |
makeList(Object... olist)
|
|
static HashMap |
makeMap(Object... olist)
|
|
static Properties |
makeProperties(String... olist)
|
|
static List<String> |
mergeUniqElems(List<String> src,
List<String> dest)
|
|
static void |
mvFileToFinalPath(String specPath,
org.apache.hadoop.conf.Configuration hconf,
boolean success,
org.apache.commons.logging.Log log,
DynamicPartitionCtx dpCtx,
FileSinkDesc conf,
org.apache.hadoop.mapred.Reporter reporter)
|
|
static String |
now()
|
|
static PreparedStatement |
prepareWithRetry(Connection conn,
String stmt,
int waitWindow,
int maxRetries)
Retry preparing a SQL statement with random backoff (same as the one implemented in HDFS-767). |
|
static Utilities.StreamStatus |
readColumn(DataInput in,
OutputStream out)
|
|
static String |
realFile(String newFile,
org.apache.hadoop.conf.Configuration conf)
Shamelessly cloned from GenericOptionsParser. |
|
static void |
removeFromClassPath(String[] pathsToRemove)
remove elements from the classpath. |
|
static HashMap<String,org.apache.hadoop.fs.FileStatus> |
removeTempOrDuplicateFiles(org.apache.hadoop.fs.FileStatus[] items,
org.apache.hadoop.fs.FileSystem fs)
|
|
static void |
removeTempOrDuplicateFiles(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path)
Remove all temporary files and duplicate (double-committed) files from a given directory. |
|
static ArrayList<String> |
removeTempOrDuplicateFiles(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path path,
DynamicPartitionCtx dpCtx)
Remove all temporary files and duplicate (double-committed) files from a given directory. |
|
static void |
rename(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.Path dst)
Rename src to dst, or in the case dst already exists, move files in src to dst. |
|
static void |
renameOrMoveFiles(org.apache.hadoop.fs.FileSystem fs,
org.apache.hadoop.fs.Path src,
org.apache.hadoop.fs.Path dst)
Rename src to dst, or in the case dst already exists, move files in src to dst. |
|
static String |
replaceTaskIdFromFilename(String filename,
int bucketNum)
Replace the task id from the filename. |
|
static String |
replaceTaskIdFromFilename(String filename,
String fileId)
|
|
static void |
reworkMapRedWork(Task<? extends Serializable> task,
boolean reworkMapredWork,
HiveConf conf)
The check here is kind of not clean. |
|
static String |
serializeExpression(ExprNodeDesc expr)
|
|
static void |
serializeMapRedLocalWork(MapredLocalWork w,
OutputStream out)
Serialize the mapredLocalWork object to an output stream. |
|
static void |
serializeMapRedWork(MapredWork w,
OutputStream out)
Serialize the mapredWork object to an output stream. |
|
static void |
serializeQueryPlan(QueryPlan plan,
OutputStream out)
Serialize the whole query plan. |
|
static void |
serializeTasks(Task<? extends Serializable> t,
OutputStream out)
Serialize a single Task. |
|
static void |
setColumnNameList(org.apache.hadoop.mapred.JobConf jobConf,
Operator op)
|
|
static void |
setColumnTypeList(org.apache.hadoop.mapred.JobConf jobConf,
Operator op)
|
|
static void |
setMapRedWork(org.apache.hadoop.conf.Configuration job,
MapredWork w,
String hiveScratchDir)
|
|
static void |
setWorkflowAdjacencies(org.apache.hadoop.conf.Configuration conf,
QueryPlan plan)
|
|
static double |
showTime(long time)
|
|
static boolean |
supportCombineFileInputFormat()
|
|
static org.apache.hadoop.fs.Path |
toTaskTempPath(org.apache.hadoop.fs.Path orig)
|
|
static org.apache.hadoop.fs.Path |
toTaskTempPath(String orig)
|
|
static org.apache.hadoop.fs.Path |
toTempPath(org.apache.hadoop.fs.Path orig)
|
|
static org.apache.hadoop.fs.Path |
toTempPath(String orig)
Given a path, convert to a temporary path. |
|
static void |
validateColumnNames(List<String> colNames,
List<String> checkCols)
|
|
static void |
validatePartSpec(Table tbl,
Map<String,String> partSpec)
|
Methods inherited from class java.lang.Object |
---|
clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait |
Field Detail |
---|
public static String HADOOP_LOCAL_FS
public static TableDesc defaultTd
public static final int carriageReturnCode
public static final int newLineCode
public static final int tabCode
public static final int ctrlaCode
public static final String INDENT
public static String nullStringStorage
public static String nullStringOutput
public static Random randGen
public static final String NSTR
public static Object getInputSummaryLock
public static String suffix
public static final char sqlEscapeChar
Method Detail |
---|
public static void clearMapRedWork(org.apache.hadoop.conf.Configuration job)
public static MapredWork getMapRedWork(org.apache.hadoop.conf.Configuration job)
public static void setWorkflowAdjacencies(org.apache.hadoop.conf.Configuration conf, QueryPlan plan)
public static List<String> getFieldSchemaString(List<FieldSchema> fl)
public static void setMapRedWork(org.apache.hadoop.conf.Configuration job, MapredWork w, String hiveScratchDir)
public static String getHiveJobID(org.apache.hadoop.conf.Configuration job)
public static String serializeExpression(ExprNodeDesc expr)
public static ExprNodeDesc deserializeExpression(String s, org.apache.hadoop.conf.Configuration conf)
public static void serializeTasks(Task<? extends Serializable> t, OutputStream out)
public static void serializeQueryPlan(QueryPlan plan, OutputStream out)
public static QueryPlan deserializeQueryPlan(InputStream in, org.apache.hadoop.conf.Configuration conf)
public static void serializeMapRedWork(MapredWork w, OutputStream out)
public static MapredWork deserializeMapRedWork(InputStream in, org.apache.hadoop.conf.Configuration conf)
public static void serializeMapRedLocalWork(MapredLocalWork w, OutputStream out)
public static MapredLocalWork deserializeMapRedLocalWork(InputStream in, org.apache.hadoop.conf.Configuration conf)
public static String getTaskId(org.apache.hadoop.conf.Configuration hconf)
public static HashMap makeMap(Object... olist)
public static Properties makeProperties(String... olist)
public static ArrayList makeList(Object... olist)
public static TableDesc getTableDesc(Table tbl)
public static TableDesc getTableDesc(String cols, String colTypes)
public static PartitionDesc getPartitionDesc(Partition part) throws HiveException
HiveException
public static PartitionDesc getPartitionDescFromTableDesc(TableDesc tblDesc, Partition part) throws HiveException
HiveException
public static void addMapWork(MapredWork mr, Table tbl, String alias, Operator<?> work)
public static String getOpTreeSkel(Operator<?> op)
public static boolean contentsEqual(InputStream is1, InputStream is2, boolean ignoreWhitespace) throws IOException
IOException
public static String abbreviate(String str, int max)
public static Utilities.StreamStatus readColumn(DataInput in, OutputStream out) throws IOException
IOException
public static OutputStream createCompressedStream(org.apache.hadoop.mapred.JobConf jc, OutputStream out) throws IOException
jc
- Job Configurationout
- Output Stream to be converted into compressed output stream
IOException
public static OutputStream createCompressedStream(org.apache.hadoop.mapred.JobConf jc, OutputStream out, boolean isCompressed) throws IOException
jc
- Job Configurationout
- Output Stream to be converted into compressed output streamisCompressed
- whether the output stream needs to be compressed or not
IOException
@Deprecated public static String getFileExtension(org.apache.hadoop.mapred.JobConf jc, boolean isCompressed)
getFileExtension(JobConf, boolean, HiveOutputFormat)
jc
- Job ConfigurationisCompressed
- Whether the output file is compressed or not
public static String getFileExtension(org.apache.hadoop.mapred.JobConf jc, boolean isCompressed, HiveOutputFormat<?,?> hiveOutputFormat)
The property hive.output.file.extension
is used to determine
the extension - if set, it will override other logic for choosing an
extension.
jc
- Job ConfigurationisCompressed
- Whether the output file is compressed or nothiveOutputFormat
- The output format, used to detect if the format is text
public static org.apache.hadoop.io.SequenceFile.Writer createSequenceWriter(org.apache.hadoop.mapred.JobConf jc, org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path file, Class<?> keyClass, Class<?> valClass) throws IOException
jc
- Job configurationfs
- File System to create file infile
- Path to be createdkeyClass
- Java Class for keyvalClass
- Java Class for value
IOException
public static org.apache.hadoop.io.SequenceFile.Writer createSequenceWriter(org.apache.hadoop.mapred.JobConf jc, org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path file, Class<?> keyClass, Class<?> valClass, boolean isCompressed) throws IOException
jc
- Job configurationfs
- File System to create file infile
- Path to be createdkeyClass
- Java Class for keyvalClass
- Java Class for value
IOException
public static RCFile.Writer createRCFileWriter(org.apache.hadoop.mapred.JobConf jc, org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path file, boolean isCompressed) throws IOException
jc
- Job configurationfs
- File System to create file infile
- Path to be created
IOException
public static String realFile(String newFile, org.apache.hadoop.conf.Configuration conf) throws IOException
IOException
public static List<String> mergeUniqElems(List<String> src, List<String> dest)
public static org.apache.hadoop.fs.Path toTaskTempPath(org.apache.hadoop.fs.Path orig)
public static org.apache.hadoop.fs.Path toTaskTempPath(String orig)
public static org.apache.hadoop.fs.Path toTempPath(org.apache.hadoop.fs.Path orig)
public static org.apache.hadoop.fs.Path toTempPath(String orig)
public static boolean isTempPath(org.apache.hadoop.fs.FileStatus file)
public static void rename(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path src, org.apache.hadoop.fs.Path dst) throws IOException, HiveException
fs
- the FileSystem where src and dst are on.src
- the src directorydst
- the target directory
IOException
HiveException
public static void renameOrMoveFiles(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path src, org.apache.hadoop.fs.Path dst) throws IOException, HiveException
fs
- the FileSystem where src and dst are on.src
- the src directorydst
- the target directory
IOException
HiveException
public static String getTaskIdFromFilename(String filename)
filename
- filename to extract taskid frompublic static String getPrefixedTaskIdFromFilename(String filename)
filename
- filename to extract taskid frompublic static String getFileNameFromDirName(String dirName)
public static String replaceTaskIdFromFilename(String filename, int bucketNum)
filename
- filename to replace taskid "0_0" or "0_0.gz" by 33 to "33_0" or "33_0.gz"public static String replaceTaskIdFromFilename(String filename, String fileId)
public static org.apache.hadoop.fs.FileStatus[] getFileStatusRecurse(org.apache.hadoop.fs.Path path, int level, org.apache.hadoop.fs.FileSystem fs) throws IOException
path
- the root pathlevel
- the depth of directory should explorefs
- the file system
IOException
public static void mvFileToFinalPath(String specPath, org.apache.hadoop.conf.Configuration hconf, boolean success, org.apache.commons.logging.Log log, DynamicPartitionCtx dpCtx, FileSinkDesc conf, org.apache.hadoop.mapred.Reporter reporter) throws IOException, HiveException
IOException
HiveException
public static void removeTempOrDuplicateFiles(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path path) throws IOException
IOException
public static ArrayList<String> removeTempOrDuplicateFiles(org.apache.hadoop.fs.FileSystem fs, org.apache.hadoop.fs.Path path, DynamicPartitionCtx dpCtx) throws IOException
IOException
public static HashMap<String,org.apache.hadoop.fs.FileStatus> removeTempOrDuplicateFiles(org.apache.hadoop.fs.FileStatus[] items, org.apache.hadoop.fs.FileSystem fs) throws IOException
IOException
public static String getNameMessage(Exception e)
public static String getResourceFiles(org.apache.hadoop.conf.Configuration conf, SessionState.ResourceType t)
public static ClassLoader addToClassPath(ClassLoader cloader, String[] newPaths) throws Exception
newPaths
- Array of classpath elements
Exception
public static void removeFromClassPath(String[] pathsToRemove) throws Exception
pathsToRemove
- Array of classpath elements
Exception
public static String formatBinaryString(byte[] array, int start, int length)
public static List<String> getColumnNamesFromSortCols(List<Order> sortCols)
public static List<String> getColumnNamesFromFieldSchema(List<FieldSchema> partCols)
public static List<String> getColumnNames(Properties props)
public static List<String> getColumnTypes(Properties props)
public static void validateColumnNames(List<String> colNames, List<String> checkCols) throws SemanticException
SemanticException
public static int getDefaultNotificationInterval(org.apache.hadoop.conf.Configuration hconf)
hconf
-
public static void copyTableJobPropertiesToConf(TableDesc tbl, org.apache.hadoop.mapred.JobConf job)
tbl
- table descriptor from which to readjob
- configuration which receives configured propertiespublic static org.apache.hadoop.fs.ContentSummary getInputSummary(Context ctx, MapredWork work, org.apache.hadoop.fs.PathFilter filter) throws IOException
ctx
- the hadoop job contextwork
- map reduce job planfilter
- filter to apply to the input paths before calculating size
IOException
public static boolean isEmptyPath(org.apache.hadoop.mapred.JobConf job, String dirPath, Context ctx) throws Exception
Exception
public static boolean isEmptyPath(org.apache.hadoop.mapred.JobConf job, org.apache.hadoop.fs.Path dirPath) throws Exception
Exception
public static List<ExecDriver> getMRTasks(List<Task<? extends Serializable>> tasks)
public static boolean supportCombineFileInputFormat()
public static List<LinkedHashMap<String,String>> getFullDPSpecs(org.apache.hadoop.conf.Configuration conf, DynamicPartitionCtx dpCtx) throws HiveException
HiveException
public static StatsPublisher getStatsPublisher(org.apache.hadoop.mapred.JobConf jc)
public static void setColumnNameList(org.apache.hadoop.mapred.JobConf jobConf, Operator op)
public static void setColumnTypeList(org.apache.hadoop.mapred.JobConf jobConf, Operator op)
public static void validatePartSpec(Table tbl, Map<String,String> partSpec) throws SemanticException
SemanticException
public static String generatePath(String baseURI, String dumpFilePrefix, Byte tag, String bigBucketFileName)
public static String generateFileName(Byte tag, String bigBucketFileName)
public static String generateTmpURI(String baseURI, String id)
public static String generateTarURI(String baseURI, String filename)
public static String generateTarURI(org.apache.hadoop.fs.Path baseURI, String filename)
public static String generateTarFileName(String name)
public static String generatePath(org.apache.hadoop.fs.Path baseURI, String filename)
public static String now()
public static double showTime(long time)
public static String checkJDOPushDown(Table tab, ExprNodeDesc expr)
tab
- The table that contains the partition columns.expr
- the partition pruning expression
public static void reworkMapRedWork(Task<? extends Serializable> task, boolean reworkMapredWork, HiveConf conf) throws SemanticException
task
- reworkMapredWork
- conf
-
SemanticException
public static <T> T executeWithRetry(Utilities.SQLCommand<T> cmd, PreparedStatement stmt, int baseWindow, int maxRetries) throws SQLException
cmd
- the SQL commandstmt
- the prepared statement of SQL.baseWindow
- The base time window (in milliseconds) before the next retry.
see getRandomWaitTime(int, int, java.util.Random)
for details.maxRetries
- the maximum # of retries when getting a SQLTransientException.
SQLException
- throws SQLRecoverableException or SQLNonTransientException the
first time it is caught, or SQLTransientException when the maxRetries has reached.public static Connection connectWithRetry(String connectionString, int waitWindow, int maxRetries) throws SQLException
connectionString
- the JDBC connection string.waitWindow
- The base time window (in milliseconds) before the next retry.
see getRandomWaitTime(int, int, java.util.Random)
for details.maxRetries
- the maximum # of retries when getting a SQLTransientException.
SQLException
- throws SQLRecoverableException or SQLNonTransientException the
first time it is caught, or SQLTransientException when the maxRetries has reached.public static PreparedStatement prepareWithRetry(Connection conn, String stmt, int waitWindow, int maxRetries) throws SQLException
conn
- a JDBC connection.stmt
- the SQL statement to be prepared.waitWindow
- The base time window (in milliseconds) before the next retry.
see getRandomWaitTime(int, int, java.util.Random)
for details.maxRetries
- the maximum # of retries when getting a SQLTransientException.
SQLException
- throws SQLRecoverableException or SQLNonTransientException the
first time it is caught, or SQLTransientException when the maxRetries has reached.public static long getRandomWaitTime(int baseWindow, int failures, Random r)
baseWindow
- the base waiting window.failures
- number of failures so far.r
- a random generator.
public static String escapeSqlLike(String key)
key
- the string that will be used for the SQL LIKE operator.
public static String formatMsecToStr(long msec)
msec
- milliseconds
public static Class getBuiltinUtilsClass() throws ClassNotFoundException
ClassNotFoundException
|
||||||||||
PREV CLASS NEXT CLASS | FRAMES NO FRAMES | |||||||||
SUMMARY: NESTED | FIELD | CONSTR | METHOD | DETAIL: FIELD | CONSTR | METHOD |