public class MRAppMaster
extends org.apache.hadoop.yarn.service.CompositeService
| Modifier and Type | Field and Description |
|---|---|
protected org.apache.hadoop.security.UserGroupInformation |
currentUser |
protected MRAppMetrics |
metrics |
static int |
SHUTDOWN_HOOK_PRIORITY
Priority of the MRAppMaster shutdown hook.
|
| Constructor and Description |
|---|
MRAppMaster(org.apache.hadoop.yarn.api.records.ApplicationAttemptId applicationAttemptId,
org.apache.hadoop.yarn.api.records.ContainerId containerId,
String nmHost,
int nmPort,
int nmHttpPort,
org.apache.hadoop.yarn.Clock clock,
long appSubmitTime) |
MRAppMaster(org.apache.hadoop.yarn.api.records.ApplicationAttemptId applicationAttemptId,
org.apache.hadoop.yarn.api.records.ContainerId containerId,
String nmHost,
int nmPort,
int nmHttpPort,
long appSubmitTime) |
| Modifier and Type | Method and Description |
|---|---|
protected void |
addIfService(Object object) |
void |
cleanupStagingDir()
clean up staging directories for the job.
|
protected ClientService |
createClientService(AppContext context) |
protected org.apache.hadoop.yarn.event.EventHandler<CommitterEvent> |
createCommitterEventHandler(AppContext context,
org.apache.hadoop.mapreduce.OutputCommitter committer) |
protected ContainerAllocator |
createContainerAllocator(ClientService clientService,
AppContext context) |
protected ContainerLauncher |
createContainerLauncher(AppContext context) |
protected org.apache.hadoop.yarn.event.Dispatcher |
createDispatcher() |
protected Job |
createJob(org.apache.hadoop.conf.Configuration conf,
JobStateInternal forcedState,
String diagnostic)
Create and initialize (but don't start) a single job.
|
protected org.apache.hadoop.yarn.event.EventHandler<JobFinishEvent> |
createJobFinishEventHandler()
create an event handler that handles the job finish event.
|
protected org.apache.hadoop.yarn.event.EventHandler<JobHistoryEvent> |
createJobHistoryHandler(AppContext context) |
protected Speculator |
createSpeculator(org.apache.hadoop.conf.Configuration conf,
AppContext context) |
protected org.apache.hadoop.yarn.service.AbstractService |
createStagingDirCleaningService() |
protected TaskAttemptListener |
createTaskAttemptListener(AppContext context) |
protected void |
downloadTokensAndSetupUGI(org.apache.hadoop.conf.Configuration conf)
Obtain the tokens needed by the job and put them in the UGI
|
List<org.apache.hadoop.mapreduce.v2.api.records.AMInfo> |
getAllAMInfos() |
org.apache.hadoop.yarn.api.records.ApplicationId |
getAppID() |
org.apache.hadoop.yarn.api.records.ApplicationAttemptId |
getAttemptID() |
org.apache.hadoop.mapreduce.OutputCommitter |
getCommitter() |
Map<org.apache.hadoop.mapreduce.v2.api.records.TaskId,org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo> |
getCompletedTaskFromPreviousRun() |
ContainerAllocator |
getContainerAllocator() |
ContainerLauncher |
getContainerLauncher() |
AppContext |
getContext() |
protected org.apache.hadoop.security.Credentials |
getCredentials() |
org.apache.hadoop.yarn.event.Dispatcher |
getDispatcher() |
protected org.apache.hadoop.fs.FileSystem |
getFileSystem(org.apache.hadoop.conf.Configuration conf)
Create the default file System for this job.
|
org.apache.hadoop.mapreduce.v2.api.records.JobId |
getJobId() |
protected RMHeartbeatHandler |
getRMHeartbeatHandler() |
int |
getStartCount() |
TaskAttemptListener |
getTaskAttemptListener() |
void |
init(org.apache.hadoop.conf.Configuration conf) |
protected static void |
initAndStartAppMaster(MRAppMaster appMaster,
org.apache.hadoop.yarn.conf.YarnConfiguration conf,
String jobUserName) |
boolean |
isNewApiCommitter() |
protected boolean |
keepJobFiles(org.apache.hadoop.mapred.JobConf conf) |
static void |
main(String[] args) |
void |
shutDownJob() |
void |
start() |
protected void |
startJobs()
This can be overridden to instantiate multiple jobs and create a
workflow.
|
protected void |
sysexit()
Exit call.
|
addService, getServices, removeService, stoppublic static final int SHUTDOWN_HOOK_PRIORITY
protected final MRAppMetrics metrics
protected org.apache.hadoop.security.UserGroupInformation currentUser
public MRAppMaster(org.apache.hadoop.yarn.api.records.ApplicationAttemptId applicationAttemptId,
org.apache.hadoop.yarn.api.records.ContainerId containerId,
String nmHost,
int nmPort,
int nmHttpPort,
long appSubmitTime)
public MRAppMaster(org.apache.hadoop.yarn.api.records.ApplicationAttemptId applicationAttemptId,
org.apache.hadoop.yarn.api.records.ContainerId containerId,
String nmHost,
int nmPort,
int nmHttpPort,
org.apache.hadoop.yarn.Clock clock,
long appSubmitTime)
public void init(org.apache.hadoop.conf.Configuration conf)
init in interface org.apache.hadoop.yarn.service.Serviceinit in class org.apache.hadoop.yarn.service.CompositeServiceprotected org.apache.hadoop.yarn.event.Dispatcher createDispatcher()
protected boolean keepJobFiles(org.apache.hadoop.mapred.JobConf conf)
protected org.apache.hadoop.fs.FileSystem getFileSystem(org.apache.hadoop.conf.Configuration conf)
throws IOException
conf - the conf objectIOExceptionprotected org.apache.hadoop.security.Credentials getCredentials()
public void cleanupStagingDir()
throws IOException
IOExceptionprotected void sysexit()
public void shutDownJob()
protected org.apache.hadoop.yarn.event.EventHandler<JobFinishEvent> createJobFinishEventHandler()
protected Job createJob(org.apache.hadoop.conf.Configuration conf, JobStateInternal forcedState, String diagnostic)
forcedState - a state to force the job into or null for normal operation.diagnostic - a diagnostic message to include with the job.protected void downloadTokensAndSetupUGI(org.apache.hadoop.conf.Configuration conf)
conf - protected void addIfService(Object object)
protected org.apache.hadoop.yarn.event.EventHandler<JobHistoryEvent> createJobHistoryHandler(AppContext context)
protected org.apache.hadoop.yarn.service.AbstractService createStagingDirCleaningService()
protected Speculator createSpeculator(org.apache.hadoop.conf.Configuration conf, AppContext context)
protected TaskAttemptListener createTaskAttemptListener(AppContext context)
protected org.apache.hadoop.yarn.event.EventHandler<CommitterEvent> createCommitterEventHandler(AppContext context, org.apache.hadoop.mapreduce.OutputCommitter committer)
protected ContainerAllocator createContainerAllocator(ClientService clientService, AppContext context)
protected RMHeartbeatHandler getRMHeartbeatHandler()
protected ContainerLauncher createContainerLauncher(AppContext context)
protected ClientService createClientService(AppContext context)
public org.apache.hadoop.yarn.api.records.ApplicationId getAppID()
public org.apache.hadoop.yarn.api.records.ApplicationAttemptId getAttemptID()
public org.apache.hadoop.mapreduce.v2.api.records.JobId getJobId()
public org.apache.hadoop.mapreduce.OutputCommitter getCommitter()
public boolean isNewApiCommitter()
public int getStartCount()
public AppContext getContext()
public org.apache.hadoop.yarn.event.Dispatcher getDispatcher()
public Map<org.apache.hadoop.mapreduce.v2.api.records.TaskId,org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo> getCompletedTaskFromPreviousRun()
public List<org.apache.hadoop.mapreduce.v2.api.records.AMInfo> getAllAMInfos()
public ContainerAllocator getContainerAllocator()
public ContainerLauncher getContainerLauncher()
public TaskAttemptListener getTaskAttemptListener()
public void start()
start in interface org.apache.hadoop.yarn.service.Servicestart in class org.apache.hadoop.yarn.service.CompositeServiceprotected void startJobs()
public static void main(String[] args)
protected static void initAndStartAppMaster(MRAppMaster appMaster, org.apache.hadoop.yarn.conf.YarnConfiguration conf, String jobUserName) throws IOException, InterruptedException
IOExceptionInterruptedExceptionCopyright © 2014 Apache Software Foundation. All Rights Reserved.