Deprecated API


Contents
Deprecated Classes
org.apache.hadoop.filecache.DistributedCache
          Use methods on Job. 
org.apache.hadoop.mapred.OutputLogFilter
          Use Utils.OutputFileUtils.OutputLogFilter instead. 
org.apache.hadoop.streaming.UTF8ByteArrayUtils
          use UTF8ByteArrayUtils and StreamKeyValUtil instead 
 

Deprecated Fields
org.apache.hadoop.mapred.JobConf.MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY
            
org.apache.hadoop.mapred.JobConf.MAPRED_TASK_ENV
          Use JobConf.MAPRED_MAP_TASK_ENV or JobConf.MAPRED_REDUCE_TASK_ENV 
org.apache.hadoop.mapred.JobConf.MAPRED_TASK_JAVA_OPTS
          Use JobConf.MAPRED_MAP_TASK_JAVA_OPTS or JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS 
org.apache.hadoop.mapred.JobConf.MAPRED_TASK_MAXPMEM_PROPERTY
            
org.apache.hadoop.mapred.JobConf.MAPRED_TASK_MAXVMEM_PROPERTY
          Use JobConf.MAPRED_JOB_MAP_MEMORY_MB_PROPERTY and JobConf.MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY 
org.apache.hadoop.mapred.JobConf.MAPRED_TASK_ULIMIT
          Use JobConf.MAPRED_MAP_TASK_ULIMIT or JobConf.MAPRED_REDUCE_TASK_ULIMIT 
org.apache.hadoop.mapred.JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY
            
 

Deprecated Methods
org.apache.hadoop.mapred.OutputCommitter.cleanupJob(JobContext)
          Use OutputCommitter.commitJob(JobContext) or OutputCommitter.abortJob(JobContext, int) instead. 
org.apache.hadoop.mapred.OutputCommitter.cleanupJob(JobContext)
          Use OutputCommitter.commitJob(org.apache.hadoop.mapreduce.JobContext) or OutputCommitter.abortJob(org.apache.hadoop.mapreduce.JobContext, org.apache.hadoop.mapreduce.JobStatus.State) instead. 
org.apache.hadoop.mapred.FileOutputCommitter.cleanupJob(JobContext)
           
org.apache.hadoop.mapreduce.OutputCommitter.cleanupJob(JobContext)
          Use OutputCommitter.commitJob(JobContext) or OutputCommitter.abortJob(JobContext, JobStatus.State) instead. 
org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter.cleanupJob(JobContext)
           
org.apache.hadoop.mapred.lib.CombineFileInputFormat.createPool(JobConf, List)
          Use CombineFileInputFormat.createPool(List). 
org.apache.hadoop.mapred.lib.CombineFileInputFormat.createPool(JobConf, PathFilter...)
          Use CombineFileInputFormat.createPool(PathFilter...). 
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.createValue()
            
org.apache.hadoop.mapred.JobConf.deleteLocalFiles()
           
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findByte(byte[], int, int, byte)
          use UTF8ByteArrayUtils.findByte(byte[], int, int, byte) 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findBytes(byte[], int, int, byte[])
          use UTF8ByteArrayUtils.findBytes(byte[], int, int, byte[]) 
org.apache.hadoop.mapred.Counters.findCounter(String, int, String)
            
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findNthByte(byte[], byte, int)
          use UTF8ByteArrayUtils.findNthByte(byte[], byte, int) 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findNthByte(byte[], int, int, byte, int)
          use UTF8ByteArrayUtils.findNthByte(byte[], int, int, byte, int) 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findTab(byte[])
          use StreamKeyValUtil.findTab(byte[]) 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.findTab(byte[], int, int)
          use StreamKeyValUtil.findTab(byte[], int, int) 
org.apache.hadoop.streaming.StreamJob.getClusterNick()
           
org.apache.hadoop.mapred.Counters.Group.getCounter(int, String)
          use Counters.Group.getCounter(String) instead 
org.apache.hadoop.mapred.JobClient.getJob(String)
          Applications should rather use JobClient.getJob(JobID). 
org.apache.hadoop.mapred.JobStatus.getJobId()
          use getJobID instead 
org.apache.hadoop.mapred.RunningJob.getJobID()
          This method is deprecated and will be removed. Applications should rather use RunningJob.getID(). 
org.apache.hadoop.mapred.JobID.getJobIDsPattern(String, Integer)
           
org.apache.hadoop.mapred.JobClient.getMapTaskReports(String)
          Applications should rather use JobClient.getMapTaskReports(JobID) 
org.apache.hadoop.mapred.JobConf.getMaxPhysicalMemoryForTask()
          this variable is deprecated and nolonger in use. 
org.apache.hadoop.mapred.JobConf.getMaxVirtualMemoryForTask()
          Use JobConf.getMemoryForMapTask() and JobConf.getMemoryForReduceTask() 
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.getPos()
            
org.apache.hadoop.mapred.JobClient.getReduceTaskReports(String)
          Applications should rather use JobClient.getReduceTaskReports(JobID) 
org.apache.hadoop.mapred.JobConf.getSessionId()
           
org.apache.hadoop.mapred.TaskAttemptID.getTaskAttemptIDsPattern(String, Integer, Boolean, Integer, Integer)
           
org.apache.hadoop.mapred.TaskAttemptID.getTaskAttemptIDsPattern(String, Integer, TaskType, Integer, Integer)
           
org.apache.hadoop.mapred.TaskCompletionEvent.getTaskId()
          use TaskCompletionEvent.getTaskAttemptId() instead. 
org.apache.hadoop.mapred.TaskID.getTaskIDsPattern(String, Integer, Boolean, Integer)
          Use TaskID.getTaskIDsPattern(String, Integer, TaskType, Integer) 
org.apache.hadoop.mapred.TaskID.getTaskIDsPattern(String, Integer, TaskType, Integer)
           
org.apache.hadoop.mapred.JobClient.getTaskOutputFilter()
           
org.apache.hadoop.streaming.StreamJob.go()
          use StreamJob.run(String[]) instead. 
org.apache.hadoop.mapred.RunningJob.killTask(String, boolean)
          Applications should rather use RunningJob.killTask(TaskAttemptID, boolean) 
org.apache.hadoop.mapreduce.lib.db.DBRecordReader.next(LongWritable, T)
          Use DBRecordReader.nextKeyValue() 
org.apache.hadoop.mapred.JobID.read(DataInput)
           
org.apache.hadoop.mapred.TaskID.read(DataInput)
           
org.apache.hadoop.mapred.TaskAttemptID.read(DataInput)
           
org.apache.hadoop.streaming.UTF8ByteArrayUtils.readLine(LineReader, Text)
          use StreamKeyValUtil.readLine(LineReader, Text) 
org.apache.hadoop.mapred.jobcontrol.Job.setAssignedJobID(JobID)
          setAssignedJobID should not be called. JOBID is set by the framework. 
org.apache.hadoop.mapreduce.Counter.setDisplayName(String)
           
org.apache.hadoop.mapred.JobConf.setMaxPhysicalMemoryForTask(long)
           
org.apache.hadoop.mapred.JobConf.setMaxVirtualMemoryForTask(long)
          Use JobConf.setMemoryForMapTask(long mem) and Use JobConf.setMemoryForReduceTask(long mem) 
org.apache.hadoop.mapred.JobConf.setSessionId(String)
           
org.apache.hadoop.mapred.TaskCompletionEvent.setTaskId(String)
          use TaskCompletionEvent.setTaskAttemptId(TaskAttemptID) instead. 
org.apache.hadoop.mapred.JobClient.setTaskOutputFilter(JobClient.TaskStatusFilter)
           
org.apache.hadoop.streaming.UTF8ByteArrayUtils.splitKeyVal(byte[], int, int, Text, Text, int)
          use StreamKeyValUtil.splitKeyVal(byte[], int, int, Text, Text, int) 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.splitKeyVal(byte[], int, int, Text, Text, int, int)
          use StreamKeyValUtil.splitKeyVal(byte[], int, int, Text, Text, int, int) 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.splitKeyVal(byte[], Text, Text, int)
          use StreamKeyValUtil.splitKeyVal(byte[], Text, Text, int) 
org.apache.hadoop.streaming.UTF8ByteArrayUtils.splitKeyVal(byte[], Text, Text, int, int)
          use StreamKeyValUtil.splitKeyVal(byte[], Text, Text, int, int) 
org.apache.hadoop.mapred.pipes.Submitter.submitJob(JobConf)
          Use Submitter.runJob(JobConf) 
 

Deprecated Constructors
org.apache.hadoop.mapred.FileSplit(Path, long, long, JobConf)
            
org.apache.hadoop.mapreduce.Job()
           
org.apache.hadoop.mapreduce.Job(Configuration)
           
org.apache.hadoop.mapreduce.Job(Configuration, String)
           
org.apache.hadoop.streaming.StreamJob(String[], boolean)
          use StreamJob() with ToolRunner or set the Configuration using StreamJob.setConf(Configuration) and run with StreamJob.run(String[]). 
org.apache.hadoop.mapred.TaskAttemptID(String, int, boolean, int, int)
          Use TaskAttemptID.TaskAttemptID(String, int, TaskType, int, int). 
org.apache.hadoop.mapred.TaskID(JobID, boolean, int)
          Use TaskID.TaskID(String, int, TaskType, int) 
org.apache.hadoop.mapred.TaskID(String, int, boolean, int)
          Use TaskID.TaskID(org.apache.hadoop.mapreduce.JobID, TaskType, int) 
 



Copyright © 2009 The Apache Software Foundation