下面列出了org.apache.hadoop.fs.CommonConfigurationKeysPublic#org.apache.hadoop.security.UserGroupInformation 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。
public static void main(String[] args) throws IOException {
// 通过Java API创建HDFS目录
String rootPath = "hdfs://nameservice1";
Path p = new Path(rootPath + "/tmp/newDir3");
Configuration conf = new Configuration();
conf.addResource("core-site.xml");
conf.addResource("hdfs-site.xml");
conf.addResource("yarn-site.xml");
// 没开kerberos,注释下面两行
UserGroupInformation.setConfiguration(conf);
UserGroupInformation.loginUserFromKeytab("[email protected]","E:\\星环\\hdfs.keytab");
FileSystem fs = p.getFileSystem(conf);
boolean b = fs.mkdirs(p);
System.out.println(b);
fs.close();
}
/**
* Create a new cache pool based on a CachePoolInfo object and the defaults.
* We will fill in information that was not supplied according to the
* defaults.
*/
static CachePool createFromInfoAndDefaults(CachePoolInfo info)
throws IOException {
UserGroupInformation ugi = null;
String ownerName = info.getOwnerName();
if (ownerName == null) {
ugi = NameNode.getRemoteUser();
ownerName = ugi.getShortUserName();
}
String groupName = info.getGroupName();
if (groupName == null) {
if (ugi == null) {
ugi = NameNode.getRemoteUser();
}
groupName = ugi.getPrimaryGroupName();
}
FsPermission mode = (info.getMode() == null) ?
FsPermission.getCachePoolDefault() : info.getMode();
long limit = info.getLimit() == null ?
CachePoolInfo.DEFAULT_LIMIT : info.getLimit();
long maxRelativeExpiry = info.getMaxRelativeExpiryMs() == null ?
CachePoolInfo.DEFAULT_MAX_RELATIVE_EXPIRY :
info.getMaxRelativeExpiryMs();
return new CachePool(info.getPoolName(),
ownerName, groupName, mode, limit, maxRelativeExpiry);
}
/** HELPER FUNCTIONS **/
private RangerPrestoAccessRequest createAccessRequest(RangerPrestoResource resource, SystemSecurityContext context, PrestoAccessType accessType) {
Set<String> userGroups = null;
if (useUgi) {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(context.getIdentity().getUser());
String[] groups = ugi != null ? ugi.getGroupNames() : null;
if (groups != null && groups.length > 0) {
userGroups = new HashSet<>(Arrays.asList(groups));
}
} else {
userGroups = context.getIdentity().getGroups();
}
RangerPrestoAccessRequest request = new RangerPrestoAccessRequest(
resource,
context.getIdentity().getUser(),
userGroups,
accessType
);
return request;
}
@POST
@Path("/delegation-token")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
@Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public Response postDelegationToken(DelegationToken tokenData,
@Context HttpServletRequest hsr) throws AuthorizationException,
IOException, InterruptedException, Exception {
init();
UserGroupInformation callerUGI;
try {
callerUGI = createKerberosUserGroupInformation(hsr);
} catch (YarnException ye) {
return Response.status(Status.FORBIDDEN).entity(ye.getMessage()).build();
}
return createDelegationToken(tokenData, hsr, callerUGI);
}
@Test
public void testSetPermissionOnFile() throws Exception {
Path newFile = new Path("testPermission");
OutputStream output = fs.create(newFile);
output.write(13);
output.close();
FsPermission newPermission = new FsPermission((short) 0700);
fs.setPermission(newFile, newPermission);
FileStatus newStatus = fs.getFileStatus(newFile);
assertNotNull(newStatus);
assertEquals(newPermission, newStatus.getPermission());
assertEquals("supergroup", newStatus.getGroup());
assertEquals(UserGroupInformation.getCurrentUser().getShortUserName(),
newStatus.getOwner());
// Don't check the file length for page blobs. Only block blobs
// provide the actual length of bytes written.
if (!(this instanceof TestNativeAzureFSPageBlobLive)) {
assertEquals(1, newStatus.getLen());
}
}
@Override
public void serviceInit(Configuration conf) throws Exception {
if (rmContext.isHAEnabled()) {
autoFailoverEnabled = HAUtil.isAutomaticFailoverEnabled(conf);
if (autoFailoverEnabled) {
if (HAUtil.isAutomaticFailoverEmbedded(conf)) {
embeddedElector = createEmbeddedElectorService();
addIfService(embeddedElector);
}
}
}
masterServiceBindAddress = conf.getSocketAddr(
YarnConfiguration.RM_BIND_HOST,
YarnConfiguration.RM_ADMIN_ADDRESS,
YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS,
YarnConfiguration.DEFAULT_RM_ADMIN_PORT);
daemonUser = UserGroupInformation.getCurrentUser();
authorizer = YarnAuthorizationProvider.getInstance(conf);
authorizer.setAdmins(getAdminAclList(conf), UserGroupInformation
.getCurrentUser());
rmId = conf.get(YarnConfiguration.RM_HA_ID);
super.serviceInit(conf);
}
public static ByteBuffer getTokens(UserGroupInformation ugi, Token<StramDelegationTokenIdentifier> delegationToken)
{
try {
Collection<Token<? extends TokenIdentifier>> tokens = ugi.getCredentials().getAllTokens();
Credentials credentials = new Credentials();
for (Token<? extends TokenIdentifier> token : tokens) {
if (!token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
credentials.addToken(token.getService(), token);
LOG.debug("Passing container token {}", token);
}
}
credentials.addToken(delegationToken.getService(), delegationToken);
DataOutputBuffer dataOutput = new DataOutputBuffer();
credentials.writeTokenStorageToStream(dataOutput);
byte[] tokenBytes = dataOutput.getData();
ByteBuffer cTokenBuf = ByteBuffer.wrap(tokenBytes);
return cTokenBuf.duplicate();
} catch (IOException e) {
throw new RuntimeException("Error generating delegation token", e);
}
}
private void displayQueueAclsInfoForCurrentUser() throws IOException {
QueueAclsInfo[] queueAclsInfoList = jc.getQueueAclsForCurrentUser();
UserGroupInformation ugi = UserGroupInformation.readFrom(getConf());
if (queueAclsInfoList.length > 0) {
System.out.println("Queue acls for user : "
+ ugi.getUserName());
System.out.println("\nQueue Operations");
System.out.println("=====================");
for (QueueAclsInfo queueInfo : queueAclsInfoList) {
System.out.print(queueInfo.getQueueName() + " ");
String[] ops = queueInfo.getOperations();
int max = ops.length - 1;
for (int j = 0; j < ops.length; j++) {
System.out.print(ops[j].replaceFirst("acl-", ""));
if (j < max) {
System.out.print(",");
}
}
System.out.println();
}
} else {
System.out.println("User " +
ugi.getUserName() +
" does not have access to any queue. \n");
}
}
@Test
public void testReadTablesAsGroupIT() throws Exception {
final Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum", "localhost");
conf.set("hbase.zookeeper.property.clientPort", "" + port);
conf.set("zookeeper.znode.parent", "/hbase-unsecure");
String user = "IT";
UserGroupInformation ugi = UserGroupInformation.createUserForTesting(user, new String[] {"IT"});
ugi.doAs(new PrivilegedExceptionAction<Void>() {
public Void run() throws Exception {
Connection conn = ConnectionFactory.createConnection(conf);
Admin admin = conn.getAdmin();
HTableDescriptor[] tableDescriptors = admin.listTables();
for (HTableDescriptor desc : tableDescriptors) {
LOG.info("Found table:[" + desc.getTableName().getNameAsString() + "]");
}
Assert.assertEquals(0, tableDescriptors.length);
conn.close();
return null;
}
});
}
@Override
public HadoopInputSplit[] createInputSplits(int minNumSplits)
throws IOException {
configuration.setInt("mapreduce.input.fileinputformat.split.minsize", minNumSplits);
JobContext jobContext = new JobContextImpl(configuration, new JobID());
jobContext.getCredentials().addAll(this.credentials);
Credentials currentUserCreds = getCredentialsFromUGI(UserGroupInformation.getCurrentUser());
if (currentUserCreds != null) {
jobContext.getCredentials().addAll(currentUserCreds);
}
List<org.apache.hadoop.mapreduce.InputSplit> splits;
try {
splits = this.mapreduceInputFormat.getSplits(jobContext);
} catch (InterruptedException e) {
throw new IOException("Could not get Splits.", e);
}
HadoopInputSplit[] hadoopInputSplits = new HadoopInputSplit[splits.size()];
for (int i = 0; i < hadoopInputSplits.length; i++) {
hadoopInputSplits[i] = new HadoopInputSplit(i, splits.get(i), jobContext);
}
return hadoopInputSplits;
}
/**
* Test method 'taskCounters'. Should print message about error or set CountersPage class for rendering
*/
@Test
public void testGetTaskCounters() {
when(job.checkAccess(any(UserGroupInformation.class), any(JobACL.class)))
.thenReturn(false);
appController.taskCounters();
verify(appController.response()).setContentType(MimeType.TEXT);
assertEquals(
"Access denied: User user does not have permission to view job job_01_01",
appController.getData());
when(job.checkAccess(any(UserGroupInformation.class), any(JobACL.class)))
.thenReturn(true);
appController.getProperty().remove(AMParams.TASK_ID);
appController.taskCounters();
assertEquals(
"Access denied: User user does not have permission to view job job_01_01missing task ID",
appController.getData());
appController.getProperty().put(AMParams.TASK_ID, "task_01_01_m01_01");
appController.taskCounters();
assertEquals(CountersPage.class, appController.getClazz());
}
@Override
protected void serviceStop() throws Exception
{
super.serviceStop();
if (UserGroupInformation.isSecurityEnabled()) {
delegationTokenManager.stopThreads();
}
if (nmClient != null) {
nmClient.stop();
}
if (amRmClient != null) {
amRmClient.stop();
}
if (dnmgr != null) {
dnmgr.teardown();
}
}
@Override
public Map<ApplicationId, ApplicationReport> getAllApplications()
throws YarnException, IOException {
TimelineEntities entities = timelineDataManager.getEntities(
ApplicationMetricsConstants.ENTITY_TYPE, null, null, null, null,
null, null, Long.MAX_VALUE, EnumSet.allOf(Field.class),
UserGroupInformation.getLoginUser());
Map<ApplicationId, ApplicationReport> apps =
new LinkedHashMap<ApplicationId, ApplicationReport>();
if (entities != null && entities.getEntities() != null) {
for (TimelineEntity entity : entities.getEntities()) {
try {
ApplicationReportExt app =
generateApplicationReport(entity, ApplicationReportField.ALL);
apps.put(app.appReport.getApplicationId(), app.appReport);
} catch (Exception e) {
LOG.error("Error on generating application report for " +
entity.getEntityId(), e);
}
}
}
return apps;
}
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response
) throws ServletException, IOException {
final ServletContext context = getServletContext();
final Configuration conf = NameNodeHttpServer.getConfFromContext(context);
final UserGroupInformation ugi = getUGI(request, conf);
final NameNode namenode = NameNodeHttpServer.getNameNodeFromContext(
context);
final DatanodeID datanode = NamenodeJspHelper.getRandomDatanode(namenode);
try {
response.sendRedirect(
createRedirectURL(ugi, datanode, request, namenode).toString());
} catch (IOException e) {
response.sendError(400, e.getMessage());
}
}
@Test
public void testAuthorizedUser() throws Exception {
UserGroupInformation admin = UserGroupInformation.loginUserFromKeytabAndReturnUGI(
USER_ADMIN_STR, KEYTAB_FILE.getAbsolutePath());
admin.doAs(new PrivilegedExceptionAction<Void>() {
@Override public Void run() throws Exception {
// Check the expected content is present in the http response
String expectedContent = "Get Log Level";
Pair<Integer,String> pair = getLogLevelPage();
assertEquals(HttpURLConnection.HTTP_OK, pair.getFirst().intValue());
assertTrue("expected=" + expectedContent + ", content=" + pair.getSecond(),
pair.getSecond().contains(expectedContent));
return null;
}
});
}
protected RecordWriter getRecordWriter(HiveEndPoint endPoint, UserGroupInformation ugi, HiveConf hiveConf) throws StreamingException, IOException, InterruptedException {
if (ugi == null) {
return new StrictJsonWriter(endPoint, hiveConf);
} else {
try {
return ugi.doAs((PrivilegedExceptionAction<StrictJsonWriter>) () -> new StrictJsonWriter(endPoint, hiveConf));
} catch (UndeclaredThrowableException e) {
Throwable cause = e.getCause();
if (cause instanceof StreamingException) {
throw (StreamingException) cause;
} else {
throw e;
}
}
}
}
/** Helper method to create DrillFileSystem */
private static DrillFileSystem createFileSystem(UserGroupInformation proxyUserUgi, final Configuration fsConf,
final OperatorStats stats) {
DrillFileSystem fs;
try {
fs = proxyUserUgi.doAs((PrivilegedExceptionAction<DrillFileSystem>) () -> {
logger.trace("Creating DrillFileSystem for proxy user: " + UserGroupInformation.getCurrentUser());
return new DrillFileSystem(fsConf, stats);
});
} catch (InterruptedException | IOException e) {
final String errMsg = "Failed to create DrillFileSystem for proxy user: " + e.getMessage();
logger.error(errMsg, e);
throw new DrillRuntimeException(errMsg, e);
}
return fs;
}
/**
* Return trash root for the given path.
* @return trash root for the given path.
*/
public Path getTrashRoot() {
if (!this.isKey()) {
throw new RuntimeException("Volume or bucket doesn't have trash root.");
}
try {
String username = UserGroupInformation.getCurrentUser().getUserName();
final Path pathRoot = new Path(
OZONE_OFS_URI_SCHEME, authority, OZONE_URI_DELIMITER);
final Path pathToVolume = new Path(pathRoot, volumeName);
final Path pathToBucket = new Path(pathToVolume, bucketName);
final Path pathToTrash = new Path(pathToBucket, TRASH_PREFIX);
return new Path(pathToTrash, username);
} catch (IOException ex) {
throw new RuntimeException("getTrashRoot failed.", ex);
}
}
@Private
@VisibleForTesting
protected ContainerManagementProtocol newProxy(final YarnRPC rpc,
String containerManagerBindAddr, ContainerId containerId, Token token)
throws InvalidToken {
if (token == null) {
throw new InvalidToken("No NMToken sent for "
+ containerManagerBindAddr);
}
final InetSocketAddress cmAddr =
NetUtils.createSocketAddr(containerManagerBindAddr);
LOG.info("Opening proxy : " + containerManagerBindAddr);
// the user in createRemoteUser in this context has to be ContainerID
UserGroupInformation user =
UserGroupInformation.createRemoteUser(containerId
.getApplicationAttemptId().toString());
org.apache.hadoop.security.token.Token<NMTokenIdentifier> nmToken =
ConverterUtils.convertFromYarn(token, cmAddr);
user.addToken(nmToken);
return NMProxy.createNMProxy(conf, ContainerManagementProtocol.class,
user, rpc, cmAddr);
}
/**
* Create a new cache pool based on a CachePoolInfo object and the defaults.
* We will fill in information that was not supplied according to the
* defaults.
*/
static CachePool createFromInfoAndDefaults(CachePoolInfo info)
throws IOException {
UserGroupInformation ugi = null;
String ownerName = info.getOwnerName();
if (ownerName == null) {
ugi = NameNode.getRemoteUser();
ownerName = ugi.getShortUserName();
}
String groupName = info.getGroupName();
if (groupName == null) {
if (ugi == null) {
ugi = NameNode.getRemoteUser();
}
groupName = ugi.getPrimaryGroupName();
}
FsPermission mode = (info.getMode() == null) ?
FsPermission.getCachePoolDefault() : info.getMode();
long limit = info.getLimit() == null ?
CachePoolInfo.DEFAULT_LIMIT : info.getLimit();
long maxRelativeExpiry = info.getMaxRelativeExpiryMs() == null ?
CachePoolInfo.DEFAULT_MAX_RELATIVE_EXPIRY :
info.getMaxRelativeExpiryMs();
return new CachePool(info.getPoolName(),
ownerName, groupName, mode, limit, maxRelativeExpiry);
}
private static void validateConf(Configuration conf) {
String authHandlerName = conf.get(ServerConfig.SENTRY_WEB_SECURITY_TYPE);
Preconditions.checkNotNull(authHandlerName, "Web authHandler should not be null.");
String allowUsers = conf.get(ServerConfig.SENTRY_WEB_SECURITY_ALLOW_CONNECT_USERS);
Preconditions.checkNotNull(allowUsers, "Allow connect user(s) should not be null.");
if (ServerConfig.SENTRY_WEB_SECURITY_TYPE_KERBEROS.equalsIgnoreCase(authHandlerName)) {
String principal = conf.get(ServerConfig.SENTRY_WEB_SECURITY_PRINCIPAL);
Preconditions.checkNotNull(principal, "Kerberos principal should not be null.");
Preconditions.checkArgument(principal.length() != 0, "Kerberos principal is not right.");
String keytabFile = conf.get(ServerConfig.SENTRY_WEB_SECURITY_KEYTAB);
Preconditions.checkNotNull(keytabFile, "Keytab File should not be null.");
Preconditions.checkArgument(keytabFile.length() != 0, "Keytab File is not right.");
try {
UserGroupInformation.setConfiguration(conf);
String hostPrincipal = SecurityUtil.getServerPrincipal(principal, ServerConfig.RPC_ADDRESS_DEFAULT);
UserGroupInformation.loginUserFromKeytab(hostPrincipal, keytabFile);
} catch (IOException ex) {
throw new IllegalArgumentException("Can't use Kerberos authentication, principal ["
+ principal + "] keytab [" + keytabFile + "]", ex);
}
LOGGER.info("Using Kerberos authentication, principal ["
+ principal + "] keytab [" + keytabFile + "]");
}
}
public long renewDelegationToken(final Token<?> token) throws IOException {
try {
return UserGroupInformation.getCurrentUser().doAs(
new PrivilegedExceptionAction<Long>() {
@Override
public Long run() throws Exception {
return authURL.renewDelegationToken(uri.toURL(), authToken);
}
}
);
} catch (Exception ex) {
if (ex instanceof IOException) {
throw (IOException) ex;
} else {
throw new IOException(ex);
}
}
}
protected UserGroupInformation getUserGroupInformation() {
getLogger().trace("getting UGI instance");
if (hdfsResources.get().getKerberosUser() != null) {
// if there's a KerberosUser associated with this UGI, check the TGT and relogin if it is close to expiring
KerberosUser kerberosUser = hdfsResources.get().getKerberosUser();
getLogger().debug("kerberosUser is " + kerberosUser);
try {
getLogger().debug("checking TGT on kerberosUser " + kerberosUser);
kerberosUser.checkTGTAndRelogin();
} catch (LoginException e) {
throw new ProcessException("Unable to relogin with kerberos credentials for " + kerberosUser.getPrincipal(), e);
}
} else {
getLogger().debug("kerberosUser was null, will not refresh TGT with KerberosUser");
}
return hdfsResources.get().getUserGroupInformation();
}
private <T> T doAsUser(UserGroupInformation ugi, Callable<T> task) throws Exception {
return ugi.doAs(new PrivilegedExceptionAction<T>() {
@Override
public T run() throws Exception {
return task.call();
}
});
}
/** Handle HTTP DELETE request. */
@DELETE
@Path("{" + UriFsPathParam.NAME + ":.*}")
@Produces(MediaType.APPLICATION_JSON)
public Response delete(
@Context final UserGroupInformation ugi,
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
final DelegationParam delegation,
@QueryParam(UserParam.NAME) @DefaultValue(UserParam.DEFAULT)
final UserParam username,
@QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT)
final DoAsParam doAsUser,
@PathParam(UriFsPathParam.NAME) final UriFsPathParam path,
@QueryParam(DeleteOpParam.NAME) @DefaultValue(DeleteOpParam.DEFAULT)
final DeleteOpParam op,
@QueryParam(RecursiveParam.NAME) @DefaultValue(RecursiveParam.DEFAULT)
final RecursiveParam recursive,
@QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT)
final SnapshotNameParam snapshotName
) throws IOException, InterruptedException {
init(ugi, delegation, username, doAsUser, path, op, recursive, snapshotName);
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
public Response run() throws IOException {
try {
return delete(ugi, delegation, username, doAsUser,
path.getAbsolutePath(), op, recursive, snapshotName);
} finally {
reset();
}
}
});
}
/** Ensure the authentication method is kerberos */
private void checkKerberosAuthMethod(String msg) throws IOException {
// User invoking the call must be same as the datanode user
if (!UserGroupInformation.isSecurityEnabled()) {
return;
}
if (UserGroupInformation.getCurrentUser().getAuthenticationMethod() !=
AuthenticationMethod.KERBEROS) {
throw new AccessControlException("Error in " + msg
+ "Only kerberos based authentication is allowed.");
}
}
@Test
public void testExistingInterleavedWithNonExistentUsers() throws Exception {
String actualOutput = runTool(conf,
new String[]{"does-not-exist1", testUser1.getUserName(),
"does-not-exist2", testUser2.getUserName()}, true);
assertEquals("Show the output for only the user given, with no groups",
getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist1")) +
getExpectedOutput(testUser1) +
getExpectedOutput(UserGroupInformation.createRemoteUser("does-not-exist2")) +
getExpectedOutput(testUser2),
actualOutput);
}
@Test
public void testGetUserNullOwner() {
TestDelegationTokenIdentifier ident =
new TestDelegationTokenIdentifier(null, null, null);
UserGroupInformation ugi = ident.getUser();
assertNull(ugi);
}
protected void authorizeUser(UserGroupInformation remoteUgi,
NMTokenIdentifier nmTokenIdentifier) throws YarnException {
if (!remoteUgi.getUserName().equals(
nmTokenIdentifier.getApplicationAttemptId().toString())) {
throw RPCUtil.getRemoteException("Expected applicationAttemptId: "
+ remoteUgi.getUserName() + "Found: "
+ nmTokenIdentifier.getApplicationAttemptId());
}
}
private int refreshLoadedJobCache() throws IOException {
// Refresh the loaded job cache
Configuration conf = getConf();
InetSocketAddress address = conf.getSocketAddr(
JHAdminConfig.JHS_ADMIN_ADDRESS,
JHAdminConfig.DEFAULT_JHS_ADMIN_ADDRESS,
JHAdminConfig.DEFAULT_JHS_ADMIN_PORT);
HSAdminRefreshProtocol refreshProtocol = HSProxies.createProxy(conf,
address, HSAdminRefreshProtocol.class,
UserGroupInformation.getCurrentUser());
refreshProtocol.refreshLoadedJobCache();
return 0;
}