@InterfaceAudience.Private public class DFSClient extends Object implements Closeable
Modifier and Type | Class and Description |
---|---|
static class |
DFSClient.Conf
DFSClient configuration
|
static class |
DFSClient.DFSDataInputStream
Deprecated.
use
HdfsDataInputStream instead. |
static class |
DFSClient.Renewer |
Modifier and Type | Field and Description |
---|---|
static org.apache.commons.logging.Log |
LOG |
static long |
SERVER_DEFAULTS_VALIDITY_PERIOD |
Constructor and Description |
---|
DFSClient(Configuration conf)
Deprecated.
Deprecated at 0.21
|
DFSClient(InetSocketAddress address,
Configuration conf) |
DFSClient(URI nameNodeUri,
ClientProtocol rpcNamenode,
Configuration conf,
FileSystem.Statistics stats)
Create a new DFSClient connected to the given nameNodeUri or rpcNamenode.
|
DFSClient(URI nameNodeUri,
Configuration conf)
Same as this(nameNodeUri, conf, null);
|
DFSClient(URI nameNodeUri,
Configuration conf,
FileSystem.Statistics stats)
Same as this(nameNodeUri, null, conf, stats);
|
Modifier and Type | Method and Description |
---|---|
void |
allowSnapshot(String snapshotRoot)
Allow snapshot on a directory.
|
HdfsDataOutputStream |
append(String src,
int buffersize,
Progressable progress,
FileSystem.Statistics statistics)
Append to an existing HDFS file.
|
void |
cancelDelegationToken(Token<DelegationTokenIdentifier> token)
Deprecated.
Use Token.cancel instead.
|
void |
clearDataEncryptionKey() |
void |
close()
Close the file system, abandoning all of the leases and files being
created and close connections to the namenode.
|
void |
concat(String trg,
String[] srcs)
Move blocks from src to trg and delete src
See
ClientProtocol.concat(String, String []) . |
OutputStream |
create(String src,
boolean overwrite)
Call
create(String, boolean, short, long, Progressable) with
default replication and blockSize |
OutputStream |
create(String src,
boolean overwrite,
Progressable progress)
|
OutputStream |
create(String src,
boolean overwrite,
short replication,
long blockSize)
Call
create(String, boolean, short, long, Progressable) with
null progress . |
OutputStream |
create(String src,
boolean overwrite,
short replication,
long blockSize,
Progressable progress)
Call
create(String, boolean, short, long, Progressable, int)
with default bufferSize. |
OutputStream |
create(String src,
boolean overwrite,
short replication,
long blockSize,
Progressable progress,
int buffersize)
Call
#create(String, FsPermission, EnumSet, short, long,
Progressable, int, ChecksumOpt) with default permission
FsPermission.getFileDefault() . |
DFSOutputStream |
create(String src,
FsPermission permission,
EnumSet<CreateFlag> flag,
boolean createParent,
short replication,
long blockSize,
Progressable progress,
int buffersize,
Options.ChecksumOpt checksumOpt)
Create a new dfs file with the specified block replication
with write-progress reporting and return an output stream for writing
into the file.
|
DFSOutputStream |
create(String src,
FsPermission permission,
EnumSet<CreateFlag> flag,
boolean createParent,
short replication,
long blockSize,
Progressable progress,
int buffersize,
Options.ChecksumOpt checksumOpt,
InetSocketAddress[] favoredNodes)
Same as
#create(String, FsPermission, EnumSet, boolean, short, long,
Progressable, int, ChecksumOpt) with the addition of favoredNodes that is
a hint to where the namenode should place the file blocks. |
DFSOutputStream |
create(String src,
FsPermission permission,
EnumSet<CreateFlag> flag,
short replication,
long blockSize,
Progressable progress,
int buffersize,
Options.ChecksumOpt checksumOpt)
Call
#create(String, FsPermission, EnumSet, boolean, short,
long, Progressable, int, ChecksumOpt) with createParent
set to true. |
String |
createSnapshot(String snapshotRoot,
String snapshotName)
Create one snapshot.
|
void |
createSymlink(String target,
String link,
boolean createParent)
Creates a symbolic link.
|
DatanodeInfo[] |
datanodeReport(HdfsConstants.DatanodeReportType type) |
boolean |
delete(String src)
Deprecated.
|
boolean |
delete(String src,
boolean recursive)
delete file or directory.
|
void |
deleteSnapshot(String snapshotRoot,
String snapshotName)
Delete a snapshot of a snapshottable directory.
|
void |
disableLegacyBlockReaderLocal() |
void |
disallowSnapshot(String snapshotRoot)
Disallow snapshot on a directory.
|
boolean |
exists(String src)
Implemented using getFileInfo(src)
|
void |
finalizeUpgrade() |
BlockLocation[] |
getBlockLocations(String src,
long start,
long length)
Get block location info about file
getBlockLocations() returns a list of hostnames that store
data for a specific file region.
|
long |
getBlockSize(String f) |
BlockStorageLocation[] |
getBlockStorageLocations(List<BlockLocation> blockLocations)
Get block location information about a list of
HdfsBlockLocation . |
String |
getCanonicalServiceName()
Get a canonical token service name for this client's tokens.
|
String |
getClientName() |
DFSClient.Conf |
getConf() |
long |
getCorruptBlocksCount()
Returns count of blocks with at least one replica marked corrupt.
|
DataEncryptionKey |
getDataEncryptionKey() |
long |
getDefaultBlockSize()
Get the default block size for this cluster
|
CachingStrategy |
getDefaultReadCachingStrategy() |
short |
getDefaultReplication() |
CachingStrategy |
getDefaultWriteCachingStrategy() |
Token<DelegationTokenIdentifier> |
getDelegationToken(Text renewer) |
FsStatus |
getDiskStatus() |
org.apache.hadoop.hdfs.DomainSocketFactory |
getDomainSocketFactory() |
MD5MD5CRC32FileChecksum |
getFileChecksum(String src)
Get the checksum of a file.
|
HdfsFileStatus |
getFileInfo(String src)
Get the file info for a specific file or directory.
|
HdfsFileStatus |
getFileLinkInfo(String src)
Get the file info for a specific file or directory.
|
org.apache.hadoop.hdfs.LeaseRenewer |
getLeaseRenewer()
Return the lease renewer instance.
|
String |
getLinkTarget(String path)
Resolve the *first* symlink, if any, in the path.
|
LocatedBlocks |
getLocatedBlocks(String src,
long start) |
LocatedBlocks |
getLocatedBlocks(String src,
long start,
long length) |
long |
getMissingBlocksCount()
Returns count of blocks with no good replicas left.
|
ClientProtocol |
getNamenode()
Get the namenode associated with this DFSClient object
|
FsServerDefaults |
getServerDefaults()
Get server default values for a number of configuration params.
|
SnapshotDiffReport |
getSnapshotDiffReport(String snapshotDir,
String fromSnapshot,
String toSnapshot)
Get the difference between two snapshots, or between a snapshot and the
current tree of a directory.
|
SnapshottableDirectoryStatus[] |
getSnapshottableDirListing()
Get all the current snapshottable directories.
|
long |
getUnderReplicatedBlocksCount()
Returns count of blocks with one of more replica missing.
|
boolean |
isFileClosed(String src)
Close status of a file
|
CorruptFileBlocks |
listCorruptFileBlocks(String path,
String cookie) |
DirectoryListing |
listPaths(String src,
byte[] startAfter)
Get a partial listing of the indicated directory
No block locations need to be fetched
|
DirectoryListing |
listPaths(String src,
byte[] startAfter,
boolean needLocation)
Get a partial listing of the indicated directory
Recommend to use HdfsFileStatus.EMPTY_NAME as startAfter
if the application wants to fetch a listing starting from
the first entry in the directory
|
void |
metaSave(String pathname)
Dumps DFS data structures into specified file.
|
boolean |
mkdirs(String src)
Deprecated.
|
boolean |
mkdirs(String src,
FsPermission permission,
boolean createParent)
Create a directory (or hierarchy of directories) with the given
name and permission.
|
DFSInputStream |
open(String src) |
DFSInputStream |
open(String src,
int buffersize,
boolean verifyChecksum)
Create an input stream that obtains a nodelist from the
namenode, and then reads from all the right places.
|
DFSInputStream |
open(String src,
int buffersize,
boolean verifyChecksum,
FileSystem.Statistics stats)
Deprecated.
Use
open(String, int, boolean) instead. |
DFSOutputStream |
primitiveCreate(String src,
FsPermission absPermission,
EnumSet<CreateFlag> flag,
boolean createParent,
short replication,
long blockSize,
Progressable progress,
int buffersize,
Options.ChecksumOpt checksumOpt)
Same as {
#create(String, FsPermission, EnumSet, short, long,
Progressable, int, ChecksumOpt) except that the permission
is absolute (ie has already been masked with umask. |
boolean |
primitiveMkdir(String src,
FsPermission absPermission)
Same {
mkdirs(String, FsPermission, boolean) except
that the permissions has already been masked against umask. |
boolean |
primitiveMkdir(String src,
FsPermission absPermission,
boolean createParent)
Same {
mkdirs(String, FsPermission, boolean) except
that the permissions has already been masked against umask. |
void |
refreshNodes()
Refresh the hosts and exclude files.
|
boolean |
rename(String src,
String dst)
Deprecated.
Use
rename(String, String, Options.Rename...) instead. |
void |
rename(String src,
String dst,
Options.Rename... options)
Rename file or directory.
|
void |
renameSnapshot(String snapshotDir,
String snapshotOldName,
String snapshotNewName)
Rename a snapshot.
|
long |
renewDelegationToken(Token<DelegationTokenIdentifier> token)
Deprecated.
Use Token.renew instead.
|
void |
reportBadBlocks(LocatedBlock[] blocks)
Report corrupt blocks that were discovered by the client.
|
void |
setBalancerBandwidth(long bandwidth)
Requests the namenode to tell all datanodes to use a new, non-persistent
bandwidth value for dfs.balance.bandwidthPerSec.
|
void |
setOwner(String src,
String username,
String groupname)
Set file or directory owner.
|
void |
setPermission(String src,
FsPermission permission)
Set permissions to a file or directory.
|
boolean |
setReplication(String src,
short replication)
Set replication for an existing file.
|
boolean |
setSafeMode(HdfsConstants.SafeModeAction action)
Enter, leave or get safe mode.
|
boolean |
setSafeMode(HdfsConstants.SafeModeAction action,
boolean isChecked)
Enter, leave or get safe mode.
|
void |
setTimes(String src,
long mtime,
long atime)
set the modification and access time of a file
|
String |
toString() |
boolean |
useLegacyBlockReaderLocal() |
public static final org.apache.commons.logging.Log LOG
public static final long SERVER_DEFAULTS_VALIDITY_PERIOD
@Deprecated public DFSClient(Configuration conf) throws IOException
IOException
DFSClient(InetSocketAddress, Configuration)
public DFSClient(InetSocketAddress address, Configuration conf) throws IOException
IOException
public DFSClient(URI nameNodeUri, Configuration conf) throws IOException
IOException
DFSClient(URI, Configuration, FileSystem.Statistics)
public DFSClient(URI nameNodeUri, Configuration conf, FileSystem.Statistics stats) throws IOException
public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, Configuration conf, FileSystem.Statistics stats) throws IOException
DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY
in the
configuration, the DFSClient will use LossyRetryInvocationHandler
as its RetryInvocationHandler. Otherwise one of nameNodeUri or rpcNamenode
must be null.IOException
public DFSClient.Conf getConf()
public String getClientName()
public org.apache.hadoop.hdfs.LeaseRenewer getLeaseRenewer() throws IOException
IOException
public void close() throws IOException
close
in interface Closeable
close
in interface AutoCloseable
IOException
public long getDefaultBlockSize()
public long getBlockSize(String f) throws IOException
IOException
ClientProtocol.getPreferredBlockSize(String)
public FsServerDefaults getServerDefaults() throws IOException
IOException
ClientProtocol.getServerDefaults()
@InterfaceAudience.LimitedPrivate(value="HDFS") public String getCanonicalServiceName()
public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer) throws IOException
IOException
ClientProtocol.getDelegationToken(Text)
@Deprecated public long renewDelegationToken(Token<DelegationTokenIdentifier> token) throws SecretManager.InvalidToken, IOException
token
- the token to renewSecretManager.InvalidToken
IOException
@Deprecated public void cancelDelegationToken(Token<DelegationTokenIdentifier> token) throws SecretManager.InvalidToken, IOException
token
- the token to cancelSecretManager.InvalidToken
IOException
public void reportBadBlocks(LocatedBlock[] blocks) throws IOException
IOException
ClientProtocol.reportBadBlocks(LocatedBlock[])
public short getDefaultReplication()
public LocatedBlocks getLocatedBlocks(String src, long start) throws IOException
IOException
public LocatedBlocks getLocatedBlocks(String src, long start, long length) throws IOException
IOException
public BlockLocation[] getBlockLocations(String src, long start, long length) throws IOException, UnresolvedLinkException
IOException
UnresolvedLinkException
public BlockStorageLocation[] getBlockStorageLocations(List<BlockLocation> blockLocations) throws IOException, UnsupportedOperationException, InvalidBlockTokenException
HdfsBlockLocation
.
Used by DistributedFileSystem.getFileBlockStorageLocations(List)
to
get BlockStorageLocation
s for blocks returned by
DistributedFileSystem.getFileBlockLocations(org.apache.hadoop.fs.FileStatus, long, long)
.
This is done by making a round of RPCs to the associated datanodes, asking
the volume of each block replica. The returned array of
BlockStorageLocation
expose this information as a
VolumeId
.blockLocations
- target blocks on which to query volume location informationIOException
UnsupportedOperationException
InvalidBlockTokenException
public DFSInputStream open(String src) throws IOException, UnresolvedLinkException
IOException
UnresolvedLinkException
@Deprecated public DFSInputStream open(String src, int buffersize, boolean verifyChecksum, FileSystem.Statistics stats) throws IOException, UnresolvedLinkException
open(String, int, boolean)
instead.IOException
UnresolvedLinkException
public DFSInputStream open(String src, int buffersize, boolean verifyChecksum) throws IOException, UnresolvedLinkException
IOException
UnresolvedLinkException
public ClientProtocol getNamenode()
public OutputStream create(String src, boolean overwrite) throws IOException
create(String, boolean, short, long, Progressable)
with
default replication
and blockSize and null
progress
.
IOException
public OutputStream create(String src, boolean overwrite, Progressable progress) throws IOException
IOException
public OutputStream create(String src, boolean overwrite, short replication, long blockSize) throws IOException
create(String, boolean, short, long, Progressable)
with
null progress
.IOException
public OutputStream create(String src, boolean overwrite, short replication, long blockSize, Progressable progress) throws IOException
create(String, boolean, short, long, Progressable, int)
with default bufferSize.IOException
public OutputStream create(String src, boolean overwrite, short replication, long blockSize, Progressable progress, int buffersize) throws IOException
#create(String, FsPermission, EnumSet, short, long,
Progressable, int, ChecksumOpt)
with default permission
FsPermission.getFileDefault()
.src
- File nameoverwrite
- overwrite an existing file if truereplication
- replication factor for the fileblockSize
- maximum block sizeprogress
- interface for reporting client progressbuffersize
- underlying buffersizeIOException
public DFSOutputStream create(String src, FsPermission permission, EnumSet<CreateFlag> flag, short replication, long blockSize, Progressable progress, int buffersize, Options.ChecksumOpt checksumOpt) throws IOException
#create(String, FsPermission, EnumSet, boolean, short,
long, Progressable, int, ChecksumOpt)
with createParent
set to true.IOException
public DFSOutputStream create(String src, FsPermission permission, EnumSet<CreateFlag> flag, boolean createParent, short replication, long blockSize, Progressable progress, int buffersize, Options.ChecksumOpt checksumOpt) throws IOException
src
- File namepermission
- The permission of the directory being created.
If null, use default permission FsPermission.getFileDefault()
flag
- indicates create a new file or create/overwrite an
existing file or append to an existing filecreateParent
- create missing parent directory if truereplication
- block replicationblockSize
- maximum block sizeprogress
- interface for reporting client progressbuffersize
- underlying buffer sizechecksumOpt
- checksum optionsIOException
for detailed description of exceptions thrown
public DFSOutputStream create(String src, FsPermission permission, EnumSet<CreateFlag> flag, boolean createParent, short replication, long blockSize, Progressable progress, int buffersize, Options.ChecksumOpt checksumOpt, InetSocketAddress[] favoredNodes) throws IOException
#create(String, FsPermission, EnumSet, boolean, short, long,
Progressable, int, ChecksumOpt)
with the addition of favoredNodes that is
a hint to where the namenode should place the file blocks.
The favored nodes hint is not persisted in HDFS. Hence it may be honored
at the creation time only. HDFS could move the blocks during balancing or
replication, to move the blocks from favored nodes. A value of null means
no favored nodes for this createIOException
public DFSOutputStream primitiveCreate(String src, FsPermission absPermission, EnumSet<CreateFlag> flag, boolean createParent, short replication, long blockSize, Progressable progress, int buffersize, Options.ChecksumOpt checksumOpt) throws IOException, UnresolvedLinkException
#create(String, FsPermission, EnumSet, short, long,
Progressable, int, ChecksumOpt)
except that the permission
is absolute (ie has already been masked with umask.IOException
UnresolvedLinkException
public void createSymlink(String target, String link, boolean createParent) throws IOException
public String getLinkTarget(String path) throws IOException
IOException
ClientProtocol.getLinkTarget(String)
public HdfsDataOutputStream append(String src, int buffersize, Progressable progress, FileSystem.Statistics statistics) throws IOException
src
- file namebuffersize
- buffer sizeprogress
- for reporting write-progress; null is acceptable.statistics
- file system statistics; null is acceptable.IOException
ClientProtocol.append(String, String)
public boolean setReplication(String src, short replication) throws IOException
src
- file namereplication
- IOException
ClientProtocol.setReplication(String, short)
@Deprecated public boolean rename(String src, String dst) throws IOException
rename(String, String, Options.Rename...)
instead.IOException
ClientProtocol.rename(String, String)
public void concat(String trg, String[] srcs) throws IOException
ClientProtocol.concat(String, String [])
.IOException
public void rename(String src, String dst, Options.Rename... options) throws IOException
IOException
ClientProtocol.rename2(String, String, Options.Rename...)
@Deprecated public boolean delete(String src) throws IOException
ClientProtocol.delete(String, boolean)
.IOException
public boolean delete(String src, boolean recursive) throws IOException
IOException
ClientProtocol.delete(String, boolean)
public boolean exists(String src) throws IOException
IOException
public DirectoryListing listPaths(String src, byte[] startAfter) throws IOException
IOException
public DirectoryListing listPaths(String src, byte[] startAfter, boolean needLocation) throws IOException
IOException
ClientProtocol.getListing(String, byte[], boolean)
public HdfsFileStatus getFileInfo(String src) throws IOException
src
- The string representation of the path to the fileIOException
for description of exceptions
public boolean isFileClosed(String src) throws IOException
IOException
public HdfsFileStatus getFileLinkInfo(String src) throws IOException
src
- path to a file or directory.
For description of exceptions thrownIOException
ClientProtocol.getFileLinkInfo(String)
public MD5MD5CRC32FileChecksum getFileChecksum(String src) throws IOException
src
- The file pathIOException
DistributedFileSystem.getFileChecksum(Path)
@InterfaceAudience.Private public void clearDataEncryptionKey()
@InterfaceAudience.Private public DataEncryptionKey getDataEncryptionKey() throws IOException
IOException
public void setPermission(String src, FsPermission permission) throws IOException
src
- path name.permission
- IOException
ClientProtocol.setPermission(String, FsPermission)
public void setOwner(String src, String username, String groupname) throws IOException
src
- path name.username
- user id.groupname
- user group.IOException
ClientProtocol.setOwner(String, String, String)
public FsStatus getDiskStatus() throws IOException
IOException
ClientProtocol.getStats()
public long getMissingBlocksCount() throws IOException
IOException
public long getUnderReplicatedBlocksCount() throws IOException
IOException
public long getCorruptBlocksCount() throws IOException
IOException
public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie) throws IOException
IOException
public DatanodeInfo[] datanodeReport(HdfsConstants.DatanodeReportType type) throws IOException
IOException
public boolean setSafeMode(HdfsConstants.SafeModeAction action) throws IOException
public boolean setSafeMode(HdfsConstants.SafeModeAction action, boolean isChecked) throws IOException
action
- One of SafeModeAction.GET, SafeModeAction.ENTER and
SafeModeActiob.LEAVEisChecked
- If true, then check only active namenode's safemode status, else
check first namenode's status.IOException
ClientProtocol.setSafeMode(HdfsConstants.SafeModeAction, boolean)
public String createSnapshot(String snapshotRoot, String snapshotName) throws IOException
snapshotRoot
- The directory where the snapshot is to be takensnapshotName
- Name of the snapshotIOException
ClientProtocol.createSnapshot(String, String)
public void deleteSnapshot(String snapshotRoot, String snapshotName) throws IOException
snapshotRoot
- The snapshottable directory that the
to-be-deleted snapshot belongs tosnapshotName
- The name of the to-be-deleted snapshotIOException
ClientProtocol.deleteSnapshot(String, String)
public void renameSnapshot(String snapshotDir, String snapshotOldName, String snapshotNewName) throws IOException
snapshotDir
- The directory path where the snapshot was takensnapshotOldName
- Old name of the snapshotsnapshotNewName
- New name of the snapshotIOException
ClientProtocol.renameSnapshot(String, String, String)
public SnapshottableDirectoryStatus[] getSnapshottableDirListing() throws IOException
IOException
ClientProtocol.getSnapshottableDirListing()
public void allowSnapshot(String snapshotRoot) throws IOException
IOException
ClientProtocol.allowSnapshot(String snapshotRoot)
public void disallowSnapshot(String snapshotRoot) throws IOException
IOException
ClientProtocol.disallowSnapshot(String snapshotRoot)
public SnapshotDiffReport getSnapshotDiffReport(String snapshotDir, String fromSnapshot, String toSnapshot) throws IOException
public void refreshNodes() throws IOException
ClientProtocol.refreshNodes()
for more details.IOException
ClientProtocol.refreshNodes()
public void metaSave(String pathname) throws IOException
IOException
ClientProtocol.metaSave(String)
public void setBalancerBandwidth(long bandwidth) throws IOException
ClientProtocol.setBalancerBandwidth(long)
for more details.IOException
ClientProtocol.setBalancerBandwidth(long)
public void finalizeUpgrade() throws IOException
IOException
ClientProtocol.finalizeUpgrade()
@Deprecated public boolean mkdirs(String src) throws IOException
IOException
public boolean mkdirs(String src, FsPermission permission, boolean createParent) throws IOException
src
- The path of the directory being createdpermission
- The permission of the directory being created.
If permission == null, use FsPermission.getDefault()
.createParent
- create missing parent directory if trueIOException
ClientProtocol.mkdirs(String, FsPermission, boolean)
public boolean primitiveMkdir(String src, FsPermission absPermission) throws IOException
mkdirs(String, FsPermission, boolean)
except
that the permissions has already been masked against umask.IOException
public boolean primitiveMkdir(String src, FsPermission absPermission, boolean createParent) throws IOException
mkdirs(String, FsPermission, boolean)
except
that the permissions has already been masked against umask.IOException
public void setTimes(String src, long mtime, long atime) throws IOException
IOException
ClientProtocol.setTimes(String, long, long)
public org.apache.hadoop.hdfs.DomainSocketFactory getDomainSocketFactory()
public void disableLegacyBlockReaderLocal()
public boolean useLegacyBlockReaderLocal()
public CachingStrategy getDefaultReadCachingStrategy()
public CachingStrategy getDefaultWriteCachingStrategy()
Copyright © 2013 Apache Software Foundation. All rights reserved.