|
||||||||||
PREV CLASS NEXT CLASS | FRAMES NO FRAMES | |||||||||
SUMMARY: NESTED | FIELD | CONSTR | METHOD | DETAIL: FIELD | CONSTR | METHOD |
java.lang.Objectorg.apache.hadoop.hdfs.server.namenode.FSNamesystem
@InterfaceAudience.Private public class FSNamesystem
FSNamesystem does the actual bookkeeping work for the DataNode. It tracks several important tables. 1) valid fsname --> blocklist (kept on disk, logged) 2) Set of all valid blocks (inverted #1) 3) block --> machinelist (kept in memory, rebuilt dynamically from reports) 4) machine --> blocklist (inverted #2) 5) LRU cache of updated-heartbeat machines
Nested Class Summary |
---|
Nested classes/interfaces inherited from interface org.apache.hadoop.hdfs.protocol.FSConstants |
---|
FSConstants.DatanodeReportType, FSConstants.SafeModeAction, FSConstants.UpgradeAction |
Field Summary | |
---|---|
static org.apache.commons.logging.Log |
auditLog
Logger for audit events, noting successful FSNamesystem operations. |
org.apache.hadoop.hdfs.server.namenode.FSDirectory |
dir
|
LeaseManager |
leaseManager
|
org.apache.hadoop.util.Daemon |
lmthread
|
static org.apache.commons.logging.Log |
LOG
|
org.apache.hadoop.util.Daemon |
replthread
|
Method Summary | |
---|---|
boolean |
abandonBlock(Block b,
String src,
String holder)
The client would like to let go of the given block |
void |
blockReceived(DatanodeID nodeID,
Block block,
String delHint)
The given node is reporting that it received a certain block. |
void |
cancelDelegationToken(org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> token)
|
void |
close()
Close down this file system manager. |
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.CompleteFileStatus |
completeFile(String src,
String holder,
Block last)
|
int |
computeDatanodeWork()
Compute block replication and block invalidation work that can be scheduled on data-nodes. |
void |
concat(String target,
String[] srcs)
Moves all the blocks from srcs and appends them to trg To avoid rollbacks we will verify validitity of ALL of the args before we start actual move. |
void |
createSymlink(String target,
String link,
org.apache.hadoop.fs.permission.PermissionStatus dirPerms,
boolean createParent)
Create a symbolic link. |
DatanodeInfo[] |
datanodeReport(FSConstants.DatanodeReportType type)
|
boolean |
delete(String src,
boolean recursive)
Remove the indicated filename from namespace. |
void |
DFSNodesStatus(ArrayList<DatanodeDescriptor> live,
ArrayList<DatanodeDescriptor> dead)
|
LocatedBlock |
getAdditionalBlock(String src,
String clientName,
Block previous,
HashMap<org.apache.hadoop.net.Node,org.apache.hadoop.net.Node> excludedNodes)
The client would like to obtain an additional block for the indicated filename (which is being written-to). |
int |
getBlockCapacity()
|
long |
getBlocksTotal()
Get the total number of blocks in the system. |
long |
getCapacityRemaining()
Total non-used raw bytes. |
float |
getCapacityRemainingPercent()
Total remaining space by data nodes as percentage of total capacity |
long |
getCapacityTotal()
Total raw bytes including non-dfs used space. |
long |
getCapacityUsed()
Total used space by data nodes |
long |
getCapacityUsedNonDFS()
Total used space by data nodes for non DFS purposes such as storing temporary files on the local file system |
float |
getCapacityUsedPercent()
Total used space by data nodes as percentage of total capacity |
long |
getCorruptReplicaBlocks()
Returns number of blocks with corrupt replicas |
DatanodeDescriptor |
getDatanode(DatanodeID nodeID)
Get data node by storage ID. |
DatanodeInfo |
getDataNodeInfo(String name)
|
ArrayList<DatanodeDescriptor> |
getDecommissioningNodes()
|
org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> |
getDelegationToken(org.apache.hadoop.io.Text renewer)
|
DelegationTokenSecretManager |
getDelegationTokenSecretManager()
|
long |
getExcessBlocks()
|
long |
getFilesTotal()
Total number of files and directories |
FSNamesystemMetrics |
getFSNamesystemMetrics()
get FSNamesystemMetrics |
String |
getFSState()
The state of the file system: Safemode or Operational |
long |
getGenerationStamp()
Gets the generation stamp for this filesystem |
DirectoryListing |
getListing(String src,
byte[] startAfter)
Get a partial listing of the indicated directory |
long |
getMissingBlocksCount()
|
static Collection<URI> |
getNamespaceDirs(org.apache.hadoop.conf.Configuration conf)
|
static Collection<URI> |
getNamespaceEditsDirs(org.apache.hadoop.conf.Configuration conf)
|
int |
getNumDeadDataNodes()
Number of dead data nodes |
int |
getNumLiveDataNodes()
Number of live data nodes |
long |
getPendingDeletionBlocks()
|
long |
getPendingReplicationBlocks()
Blocks pending to be replicated |
String |
getRegistrationID()
Get registrationID for datanodes based on the namespaceID. |
long |
getScheduledReplicationBlocks()
Blocks scheduled for replication |
Date |
getStartTime()
|
static Collection<URI> |
getStorageDirs(org.apache.hadoop.conf.Configuration conf,
String propertyName)
|
int |
getTotalLoad()
Total number of connections. |
long |
getUnderReplicatedBlocks()
Blocks under replicated |
protected org.apache.hadoop.fs.permission.PermissionStatus |
getUpgradePermission()
Return the default path permission when upgrading from releases with no permissions (<=0.15) to releases with permissions (>=0.16) |
void |
logUpdateMasterKey(org.apache.hadoop.security.token.delegation.DelegationKey key)
Log the updateMasterKey operation to edit logs |
void |
markBlockAsCorrupt(Block blk,
DatanodeInfo dn)
Mark the block belonging to datanode as corrupt |
boolean |
mkdirs(String src,
org.apache.hadoop.fs.permission.PermissionStatus permissions,
boolean createParent)
Create all the necessary directories |
int |
numCorruptReplicas(Block blk)
|
void |
processReport(DatanodeID nodeID,
BlockListAsLongs newReport)
The given node is reporting all its blocks. |
void |
refreshNodes(org.apache.hadoop.conf.Configuration conf)
Rereads the config to get hosts and exclude list file names. |
void |
registerDatanode(DatanodeRegistration nodeReg)
Register Datanode. |
void |
removeDatanode(DatanodeID nodeID)
remove a datanode descriptor |
long |
renewDelegationToken(org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> token)
|
void |
setGenerationStamp(long stamp)
Sets the generation stamp for this filesystem |
void |
setNodeReplicationLimit(int limit)
|
void |
setOwner(String src,
String username,
String group)
Set owner for an existing file. |
void |
setPermission(String src,
org.apache.hadoop.fs.permission.FsPermission permission)
Set permissions for an existing file. |
boolean |
setReplication(String src,
short replication)
Set replication for an existing file. |
void |
setTimes(String src,
long mtime,
long atime)
stores the modification and access time for this inode. |
void |
shutdown()
shutdown FSNamesystem |
void |
stopDecommission(DatanodeDescriptor node)
Stop decommissioning the specified datanodes. |
Methods inherited from class java.lang.Object |
---|
clone, equals, finalize, getClass, hashCode, notify, notifyAll, toString, wait, wait, wait |
Field Detail |
---|
public static final org.apache.commons.logging.Log LOG
public static final org.apache.commons.logging.Log auditLog
key=value
pairs to be written for the following properties:
ugi=<ugi in RPC>
ip=<remote IP>
cmd=<command>
src=<src path>
dst=<dst path (optional)>
perm=<permissions (optional)>
public org.apache.hadoop.hdfs.server.namenode.FSDirectory dir
public LeaseManager leaseManager
public org.apache.hadoop.util.Daemon lmthread
public org.apache.hadoop.util.Daemon replthread
Method Detail |
---|
public static Collection<URI> getNamespaceDirs(org.apache.hadoop.conf.Configuration conf)
public static Collection<URI> getStorageDirs(org.apache.hadoop.conf.Configuration conf, String propertyName)
public static Collection<URI> getNamespaceEditsDirs(org.apache.hadoop.conf.Configuration conf)
protected org.apache.hadoop.fs.permission.PermissionStatus getUpgradePermission()
public void close()
public void setPermission(String src, org.apache.hadoop.fs.permission.FsPermission permission) throws IOException, org.apache.hadoop.fs.UnresolvedLinkException
IOException
org.apache.hadoop.fs.UnresolvedLinkException
public void setOwner(String src, String username, String group) throws IOException, org.apache.hadoop.fs.UnresolvedLinkException
IOException
org.apache.hadoop.fs.UnresolvedLinkException
public void concat(String target, String[] srcs) throws IOException, org.apache.hadoop.fs.UnresolvedLinkException
target
- srcs
-
IOException
org.apache.hadoop.fs.UnresolvedLinkException
public void setTimes(String src, long mtime, long atime) throws IOException, org.apache.hadoop.fs.UnresolvedLinkException
IOException
org.apache.hadoop.fs.UnresolvedLinkException
public void createSymlink(String target, String link, org.apache.hadoop.fs.permission.PermissionStatus dirPerms, boolean createParent) throws IOException, org.apache.hadoop.fs.UnresolvedLinkException
IOException
org.apache.hadoop.fs.UnresolvedLinkException
public boolean setReplication(String src, short replication) throws IOException, org.apache.hadoop.fs.UnresolvedLinkException
src
- file namereplication
- new replication
IOException
org.apache.hadoop.fs.UnresolvedLinkException
ClientProtocol.setReplication(String, short)
public LocatedBlock getAdditionalBlock(String src, String clientName, Block previous, HashMap<org.apache.hadoop.net.Node,org.apache.hadoop.net.Node> excludedNodes) throws IOException, org.apache.hadoop.fs.UnresolvedLinkException
IOException
org.apache.hadoop.fs.UnresolvedLinkException
public boolean abandonBlock(Block b, String src, String holder) throws IOException, org.apache.hadoop.fs.UnresolvedLinkException
IOException
org.apache.hadoop.fs.UnresolvedLinkException
public org.apache.hadoop.hdfs.server.namenode.FSNamesystem.CompleteFileStatus completeFile(String src, String holder, Block last) throws IOException, org.apache.hadoop.fs.UnresolvedLinkException
IOException
org.apache.hadoop.fs.UnresolvedLinkException
public void markBlockAsCorrupt(Block blk, DatanodeInfo dn) throws IOException
blk
- Block to be marked as corruptdn
- Datanode which holds the corrupt replica
IOException
public boolean delete(String src, boolean recursive) throws IOException, org.apache.hadoop.fs.UnresolvedLinkException
IOException
org.apache.hadoop.fs.UnresolvedLinkException
public boolean mkdirs(String src, org.apache.hadoop.fs.permission.PermissionStatus permissions, boolean createParent) throws IOException, org.apache.hadoop.fs.UnresolvedLinkException
IOException
org.apache.hadoop.fs.UnresolvedLinkException
public DirectoryListing getListing(String src, byte[] startAfter) throws IOException, org.apache.hadoop.fs.UnresolvedLinkException
src
- the directory namestartAfter
- the name to start after
IOException
org.apache.hadoop.fs.UnresolvedLinkException
public void registerDatanode(DatanodeRegistration nodeReg) throws IOException
The purpose of registration is to identify whether the new datanode serves a new data storage, and will report new data block copies, which the namenode was not aware of; or the datanode is a replacement node for the data storage that was previously served by a different or the same (in terms of host:port) datanode. The data storages are distinguished by their storageIDs. When a new data storage is reported the namenode issues a new unique storageID.
Finally, the namenode returns its namespaceID as the registrationID for the datanodes. namespaceID is a persistent attribute of the name space. The registrationID is checked every time the datanode is communicating with the namenode. Datanodes with inappropriate registrationID are rejected. If the namenode stops, and then restarts it can restore its namespaceID and will continue serving the datanodes that has previously registered with the namenode without restarting the whole cluster.
IOException
DataNode.register()
public String getRegistrationID()
registerDatanode(DatanodeRegistration)
,
FSImage.newNamespaceID()
public int computeDatanodeWork() throws IOException
IOException
public void setNodeReplicationLimit(int limit)
public void removeDatanode(DatanodeID nodeID) throws IOException
nodeID
- datanode ID
IOException
public void processReport(DatanodeID nodeID, BlockListAsLongs newReport) throws IOException
IOException
public void blockReceived(DatanodeID nodeID, Block block, String delHint) throws IOException
IOException
public long getMissingBlocksCount()
public long getCapacityTotal()
getCapacityTotal
in interface FSNamesystemMBean
public long getCapacityUsed()
getCapacityUsed
in interface FSNamesystemMBean
public float getCapacityUsedPercent()
public long getCapacityUsedNonDFS()
public long getCapacityRemaining()
getCapacityRemaining
in interface FSNamesystemMBean
public float getCapacityRemainingPercent()
public int getTotalLoad()
getTotalLoad
in interface FSClusterStats
getTotalLoad
in interface FSNamesystemMBean
public DatanodeInfo[] datanodeReport(FSConstants.DatanodeReportType type) throws org.apache.hadoop.security.AccessControlException
org.apache.hadoop.security.AccessControlException
public void DFSNodesStatus(ArrayList<DatanodeDescriptor> live, ArrayList<DatanodeDescriptor> dead)
public void stopDecommission(DatanodeDescriptor node) throws IOException
IOException
public DatanodeInfo getDataNodeInfo(String name)
public Date getStartTime()
public void refreshNodes(org.apache.hadoop.conf.Configuration conf) throws IOException
IOException
public DatanodeDescriptor getDatanode(DatanodeID nodeID) throws IOException
nodeID
-
IOException
public long getBlocksTotal()
getBlocksTotal
in interface FSNamesystemMBean
public long getFilesTotal()
FSNamesystemMBean
getFilesTotal
in interface FSNamesystemMBean
public long getPendingReplicationBlocks()
FSNamesystemMBean
getPendingReplicationBlocks
in interface FSNamesystemMBean
public long getUnderReplicatedBlocks()
FSNamesystemMBean
getUnderReplicatedBlocks
in interface FSNamesystemMBean
public long getCorruptReplicaBlocks()
public long getScheduledReplicationBlocks()
FSNamesystemMBean
getScheduledReplicationBlocks
in interface FSNamesystemMBean
public long getPendingDeletionBlocks()
public long getExcessBlocks()
public int getBlockCapacity()
public String getFSState()
FSNamesystemMBean
getFSState
in interface FSNamesystemMBean
public FSNamesystemMetrics getFSNamesystemMetrics()
public void shutdown()
public int getNumLiveDataNodes()
getNumLiveDataNodes
in interface FSNamesystemMBean
public int getNumDeadDataNodes()
getNumDeadDataNodes
in interface FSNamesystemMBean
public void setGenerationStamp(long stamp)
public long getGenerationStamp()
public int numCorruptReplicas(Block blk)
public ArrayList<DatanodeDescriptor> getDecommissioningNodes()
public DelegationTokenSecretManager getDelegationTokenSecretManager()
public org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> getDelegationToken(org.apache.hadoop.io.Text renewer) throws IOException
renewer
-
IOException
public long renewDelegationToken(org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> token) throws org.apache.hadoop.security.token.SecretManager.InvalidToken, IOException
token
-
org.apache.hadoop.security.token.SecretManager.InvalidToken
IOException
public void cancelDelegationToken(org.apache.hadoop.security.token.Token<DelegationTokenIdentifier> token) throws IOException
token
-
IOException
public void logUpdateMasterKey(org.apache.hadoop.security.token.delegation.DelegationKey key) throws IOException
key
- new delegation key.
IOException
|
||||||||||
PREV CLASS NEXT CLASS | FRAMES NO FRAMES | |||||||||
SUMMARY: NESTED | FIELD | CONSTR | METHOD | DETAIL: FIELD | CONSTR | METHOD |