Deprecated Methods |
org.apache.hadoop.mapred.OutputCommitter.cleanupJob(JobContext)
Use OutputCommitter.commitJob(JobContext) or
OutputCommitter.abortJob(JobContext, int) instead. |
org.apache.hadoop.mapred.OutputCommitter.cleanupJob(JobContext)
Use OutputCommitter.commitJob(org.apache.hadoop.mapreduce.JobContext)
or OutputCommitter.abortJob(org.apache.hadoop.mapreduce.JobContext, org.apache.hadoop.mapreduce.JobStatus.State)
instead. |
org.apache.hadoop.mapred.FileOutputCommitter.cleanupJob(JobContext)
|
org.apache.hadoop.mapreduce.OutputCommitter.cleanupJob(JobContext)
Use OutputCommitter.commitJob(JobContext) or
OutputCommitter.abortJob(JobContext, JobStatus.State) instead. |
org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.cleanupJob(JobContext)
|
org.apache.hadoop.mapred.lib.CombineFileInputFormat.createPool(JobConf, List)
Use CombineFileInputFormat.createPool(List) . |
org.apache.hadoop.mapred.lib.CombineFileInputFormat.createPool(JobConf, PathFilter...)
Use CombineFileInputFormat.createPool(PathFilter...) . |
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.createValue()
|
org.apache.hadoop.mapred.JobConf.deleteLocalFiles()
|
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findByte(byte[], int, int, byte)
use
UTF8ByteArrayUtils.findByte(byte[], int,
int, byte) |
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findBytes(byte[], int, int, byte[])
use
UTF8ByteArrayUtils.findBytes(byte[], int,
int, byte[]) |
org.apache.hadoop.mapred.Counters.findCounter(String, int, String)
|
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findNthByte(byte[], byte, int)
use
UTF8ByteArrayUtils.findNthByte(byte[],
byte, int) |
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findNthByte(byte[], int, int, byte, int)
use
UTF8ByteArrayUtils.findNthByte(byte[], int,
int, byte, int) |
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findTab(byte[])
use StreamKeyValUtil.findTab(byte[]) |
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findTab(byte[], int, int)
use StreamKeyValUtil.findTab(byte[], int, int) |
org.apache.hadoop.streaming.StreamJob.getClusterNick()
|
org.apache.hadoop.mapred.Counters.Group.getCounter(int, String)
use Counters.Group.getCounter(String) instead |
org.apache.hadoop.mapred.JobClient.getJob(String)
Applications should rather use JobClient.getJob(JobID) . |
org.apache.hadoop.mapred.JobStatus.getJobId()
use getJobID instead |
org.apache.hadoop.mapred.RunningJob.getJobID()
This method is deprecated and will be removed. Applications should
rather use RunningJob.getID() . |
org.apache.hadoop.mapred.JobID.getJobIDsPattern(String, Integer)
|
org.apache.hadoop.mapred.JobClient.getMapTaskReports(String)
Applications should rather use JobClient.getMapTaskReports(JobID) |
org.apache.hadoop.mapred.JobConf.getMaxPhysicalMemoryForTask()
this variable is deprecated and nolonger in use. |
org.apache.hadoop.mapred.JobConf.getMaxVirtualMemoryForTask()
Use JobConf.getMemoryForMapTask() and
JobConf.getMemoryForReduceTask() |
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.getPos()
|
org.apache.hadoop.mapred.JobClient.getReduceTaskReports(String)
Applications should rather use JobClient.getReduceTaskReports(JobID) |
org.apache.hadoop.mapred.JobConf.getSessionId()
|
org.apache.hadoop.mapred.TaskAttemptID.getTaskAttemptIDsPattern(String, Integer, Boolean, Integer, Integer)
|
org.apache.hadoop.mapred.TaskAttemptID.getTaskAttemptIDsPattern(String, Integer, TaskType, Integer, Integer)
|
org.apache.hadoop.mapred.TaskCompletionEvent.getTaskId()
use TaskCompletionEvent.getTaskAttemptId() instead. |
org.apache.hadoop.mapred.TaskID.getTaskIDsPattern(String, Integer, Boolean, Integer)
Use TaskID.getTaskIDsPattern(String, Integer, TaskType,
Integer) |
org.apache.hadoop.mapred.TaskID.getTaskIDsPattern(String, Integer, TaskType, Integer)
|
org.apache.hadoop.mapred.JobClient.getTaskOutputFilter()
|
org.apache.hadoop.streaming.StreamJob.go()
use StreamJob.run(String[]) instead. |
org.apache.hadoop.mapred.RunningJob.killTask(String, boolean)
Applications should rather use RunningJob.killTask(TaskAttemptID, boolean) |
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.next(LongWritable, T)
Use DBRecordReader.nextKeyValue() |
org.apache.hadoop.mapred.JobID.read(DataInput)
|
org.apache.hadoop.mapred.TaskID.read(DataInput)
|
org.apache.hadoop.mapred.TaskAttemptID.read(DataInput)
|
org.apache.hadoop.streaming.UTF8ByteArrayUtils.readLine(LineReader, Text)
use
StreamKeyValUtil.readLine(LineReader, Text) |
org.apache.hadoop.mapred.jobcontrol.Job.setAssignedJobID(JobID)
setAssignedJobID should not be called.
JOBID is set by the framework. |
org.apache.hadoop.mapreduce.Counter.setDisplayName(String)
|
org.apache.hadoop.mapred.JobConf.setMaxPhysicalMemoryForTask(long)
|
org.apache.hadoop.mapred.JobConf.setMaxVirtualMemoryForTask(long)
Use JobConf.setMemoryForMapTask(long mem) and
Use JobConf.setMemoryForReduceTask(long mem) |
org.apache.hadoop.mapred.JobConf.setSessionId(String)
|
org.apache.hadoop.mapred.TaskCompletionEvent.setTaskId(String)
use TaskCompletionEvent.setTaskAttemptId(TaskAttemptID) instead. |
org.apache.hadoop.mapred.JobClient.setTaskOutputFilter(JobClient.TaskStatusFilter)
|
org.apache.hadoop.streaming.UTF8ByteArrayUtils.splitKeyVal(byte[], int, int, Text, Text, int)
use
StreamKeyValUtil.splitKeyVal(byte[], int, int, Text, Text, int) |
org.apache.hadoop.streaming.UTF8ByteArrayUtils.splitKeyVal(byte[], int, int, Text, Text, int, int)
use
StreamKeyValUtil.splitKeyVal(byte[], int, int, Text, Text,
int, int) |
org.apache.hadoop.streaming.UTF8ByteArrayUtils.splitKeyVal(byte[], Text, Text, int)
use
StreamKeyValUtil.splitKeyVal(byte[], Text, Text, int) |
org.apache.hadoop.streaming.UTF8ByteArrayUtils.splitKeyVal(byte[], Text, Text, int, int)
use
StreamKeyValUtil.splitKeyVal(byte[], Text, Text, int, int) |
org.apache.hadoop.mapred.pipes.Submitter.submitJob(JobConf)
Use Submitter.runJob(JobConf) |