类org.apache.hadoop.fs.CommonConfigurationKeys源码实例Demo

下面列出了怎么用org.apache.hadoop.fs.CommonConfigurationKeys的API类实例代码及写法,或者点击链接到github查看源代码。

源代码1 项目: big-c   文件: DFSAdmin.java
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
    throws IOException {
  InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
  // Get the current configuration
  Configuration conf = getConf();

  // For datanode proxy the server principal should be DN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
      conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));

  // Create the client
  ClientDatanodeProtocol dnProtocol =     
      DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
          NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
  return dnProtocol;
}
 
源代码2 项目: hadoop   文件: TestSaslRPC.java
@Test
public void testPingInterval() throws Exception {
  Configuration newConf = new Configuration(conf);
  newConf.set(SERVER_PRINCIPAL_KEY, SERVER_PRINCIPAL_1);
  conf.setInt(CommonConfigurationKeys.IPC_PING_INTERVAL_KEY,
      CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT);

  // set doPing to true
  newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true);
  ConnectionId remoteId = ConnectionId.getConnectionId(
      new InetSocketAddress(0), TestSaslProtocol.class, null, 0, newConf);
  assertEquals(CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT,
      remoteId.getPingInterval());
  // set doPing to false
  newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, false);
  remoteId = ConnectionId.getConnectionId(
      new InetSocketAddress(0), TestSaslProtocol.class, null, 0, newConf);
  assertEquals(0, remoteId.getPingInterval());
}
 
源代码3 项目: big-c   文件: TestServiceAuthorization.java
@Test
public void testDefaultAcl() {
  ServiceAuthorizationManager serviceAuthorizationManager = 
      new ServiceAuthorizationManager();
  Configuration conf = new Configuration ();
  // test without setting a default acl
  conf.set(ACL_CONFIG, "user1 group1");
  serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
  AccessControlList acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol.class);
  assertEquals("user1 group1", acl.getAclString());
  acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol1.class);
  assertEquals(AccessControlList.WILDCARD_ACL_VALUE, acl.getAclString());

  // test with a default acl
  conf.set(
      CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL, 
      "user2 group2");
  serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
  acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol.class);
  assertEquals("user1 group1", acl.getAclString());
  acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol1.class);
  assertEquals("user2 group2", acl.getAclString());
}
 
源代码4 项目: big-c   文件: TestCodec.java
@Test
public void testCodecPoolGzipReuse() throws Exception {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
  if (!ZlibFactory.isNativeZlibLoaded(conf)) {
    LOG.warn("testCodecPoolGzipReuse skipped: native libs not loaded");
    return;
  }
  GzipCodec gzc = ReflectionUtils.newInstance(GzipCodec.class, conf);
  DefaultCodec dfc = ReflectionUtils.newInstance(DefaultCodec.class, conf);
  Compressor c1 = CodecPool.getCompressor(gzc);
  Compressor c2 = CodecPool.getCompressor(dfc);
  CodecPool.returnCompressor(c1);
  CodecPool.returnCompressor(c2);
  assertTrue("Got mismatched ZlibCompressor", c2 != CodecPool.getCompressor(gzc));
}
 
源代码5 项目: big-c   文件: TestRPC.java
@Test
public void testAuthorization() throws IOException {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
      true);

  // Expect to succeed
  conf.set(ACL_CONFIG, "*");
  doRPCs(conf, false);

  // Reset authorization to expect failure
  conf.set(ACL_CONFIG, "invalid invalid");
  doRPCs(conf, true);

  conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY, 2);
  // Expect to succeed
  conf.set(ACL_CONFIG, "*");
  doRPCs(conf, false);

  // Reset authorization to expect failure
  conf.set(ACL_CONFIG, "invalid invalid");
  doRPCs(conf, true);
}
 
源代码6 项目: big-c   文件: TestGroupsCaching.java
@Test
public void testCacheEntriesExpire() throws Exception {
  conf.setLong(
    CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 1);
  FakeTimer timer = new FakeTimer();
  final Groups groups = new Groups(conf, timer);
  groups.cacheGroupsAdd(Arrays.asList(myGroups));
  groups.refresh();
  FakeGroupMapping.clearBlackList();

  // We make an entry
  groups.getGroups("me");
  int startingRequestCount = FakeGroupMapping.getRequestCount();

  timer.advance(20 * 1000);

  // Cache entry has expired so it results in a new fetch
  groups.getGroups("me");
  assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
}
 
源代码7 项目: big-c   文件: TestGroupFallback.java
@Test
public void testGroupWithFallback() throws Exception {
  LOG.info("running 'mvn -Pnative -DTestGroupFallback clear test' will " +
      "test the normal path and 'mvn -DTestGroupFallback clear test' will" +
      " test the fall back functionality");
  Logger.getRootLogger().setLevel(Level.DEBUG);
  Configuration conf = new Configuration();
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
      "org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback");

  Groups groups = new Groups(conf);

  String username = System.getProperty("user.name");
  List<String> groupList = groups.getGroups(username);

  LOG.info(username + " has GROUPS: " + groupList.toString());
  assertTrue(groupList.size() > 0);
}
 
源代码8 项目: hadoop   文件: TestZlibCompressorDecompressor.java
@Test
public void testZlibCompressorDecompressorSetDictionary() {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
  if (ZlibFactory.isNativeZlibLoaded(conf)) {
    Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
    Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);

    checkSetDictionaryNullPointerException(zlibCompressor);
    checkSetDictionaryNullPointerException(zlibDecompressor);

    checkSetDictionaryArrayIndexOutOfBoundsException(zlibDecompressor);
    checkSetDictionaryArrayIndexOutOfBoundsException(zlibCompressor);
  } else {
    assertTrue("ZlibFactory is using native libs against request",
        ZlibFactory.isNativeZlibLoaded(conf));
  }
}
 
源代码9 项目: tez   文件: TestPipelinedSorter.java
public static Configuration getConf() {
  Configuration conf = new Configuration();
  conf.set("fs.defaultFS", "file:///");
  conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
  //To enable PipelinedSorter
  conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_SORTER_CLASS, SorterImpl.PIPELINED.name());

  conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_KEY_CLASS, Text.class.getName());
  conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_VALUE_CLASS, Text.class.getName());
  conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_PARTITIONER_CLASS, HashPartitioner.class.getName());

  conf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_ENABLE_FINAL_MERGE_IN_OUTPUT, true);

  //Setup localdirs
  if (workDir != null) {
    String localDirs = workDir.toString();
    conf.setStrings(TezRuntimeFrameworkConfigs.LOCAL_DIRS, localDirs);
  }
  return conf;
}
 
源代码10 项目: big-c   文件: TestZlibCompressorDecompressor.java
@Test
public void testZlibCompressorDecompressorSetDictionary() {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
  if (ZlibFactory.isNativeZlibLoaded(conf)) {
    Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
    Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);

    checkSetDictionaryNullPointerException(zlibCompressor);
    checkSetDictionaryNullPointerException(zlibDecompressor);

    checkSetDictionaryArrayIndexOutOfBoundsException(zlibDecompressor);
    checkSetDictionaryArrayIndexOutOfBoundsException(zlibCompressor);
  } else {
    assertTrue("ZlibFactory is using native libs against request",
        ZlibFactory.isNativeZlibLoaded(conf));
  }
}
 
源代码11 项目: hadoop   文件: NfsExports.java
public static synchronized NfsExports getInstance(Configuration conf) {
  if (exports == null) {
    String matchHosts = conf.get(
        CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY,
        CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT);
    int cacheSize = conf.getInt(Nfs3Constant.NFS_EXPORTS_CACHE_SIZE_KEY,
        Nfs3Constant.NFS_EXPORTS_CACHE_SIZE_DEFAULT);
    long expirationPeriodNano = conf.getLong(
        Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY,
        Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT) * 1000 * 1000;
    try {
      exports = new NfsExports(cacheSize, expirationPeriodNano, matchHosts);
    } catch (IllegalArgumentException e) {
      LOG.error("Invalid NFS Exports provided: ", e);
      return exports;
    }
  }
  return exports;
}
 
源代码12 项目: big-c   文件: TestHttpServer.java
@Test
public void testRequiresAuthorizationAccess() throws Exception {
  Configuration conf = new Configuration();
  ServletContext context = Mockito.mock(ServletContext.class);
  Mockito.when(context.getAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
  HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
  HttpServletResponse response = Mockito.mock(HttpServletResponse.class);

  //requires admin access to instrumentation, FALSE by default
  Assert.assertTrue(HttpServer2.isInstrumentationAccessAllowed(context, request, response));

  //requires admin access to instrumentation, TRUE
  conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true);
  conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
  AccessControlList acls = Mockito.mock(AccessControlList.class);
  Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
  Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
  Assert.assertFalse(HttpServer2.isInstrumentationAccessAllowed(context, request, response));
}
 
源代码13 项目: hadoop   文件: TestFileCreation.java
static FSDataOutputStream createNonRecursive(FileSystem fs, Path name,
    int repl, EnumSet<CreateFlag> flag) throws IOException {
  System.out.println("createNonRecursive: Created " + name + " with " + repl
      + " replica.");
  FSDataOutputStream stm = ((DistributedFileSystem) fs).createNonRecursive(
      name, FsPermission.getDefault(), flag, fs.getConf().getInt(
          CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) repl,  blockSize, null);
  return stm;
}
 
源代码14 项目: big-c   文件: RMHAUtils.java
private static HAServiceState getHAState(YarnConfiguration yarnConf)
    throws Exception {
  HAServiceTarget haServiceTarget;
  int rpcTimeoutForChecks =
      yarnConf.getInt(CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_KEY,
          CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_DEFAULT);

  yarnConf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
      yarnConf.get(YarnConfiguration.RM_PRINCIPAL, ""));
  haServiceTarget = new RMHAServiceTarget(yarnConf);
  HAServiceProtocol proto =
      haServiceTarget.getProxy(yarnConf, rpcTimeoutForChecks);
  HAServiceState haState = proto.getServiceStatus().getState();
  return haState;
}
 
源代码15 项目: hadoop-ozone   文件: OMBucketCreateRequest.java
private BucketEncryptionInfoProto getBeinfo(
    KeyProviderCryptoExtension kmsProvider, BucketInfo bucketInfo)
    throws IOException {
  BucketEncryptionInfoProto bek = bucketInfo.getBeinfo();
  BucketEncryptionInfoProto.Builder bekb = null;
  if (kmsProvider == null) {
    throw new OMException("Invalid KMS provider, check configuration " +
        CommonConfigurationKeys.HADOOP_SECURITY_KEY_PROVIDER_PATH,
        OMException.ResultCodes.INVALID_KMS_PROVIDER);
  }
  if (bek.getKeyName() == null) {
    throw new OMException("Bucket encryption key needed.", OMException
        .ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND);
  }
  // Talk to KMS to retrieve the bucket encryption key info.
  KeyProvider.Metadata metadata = kmsProvider.getMetadata(
      bek.getKeyName());
  if (metadata == null) {
    throw new OMException("Bucket encryption key " + bek.getKeyName()
        + " doesn't exist.",
        OMException.ResultCodes.BUCKET_ENCRYPTION_KEY_NOT_FOUND);
  }
  // If the provider supports pool for EDEKs, this will fill in the pool
  kmsProvider.warmUpEncryptedKeys(bek.getKeyName());
  bekb = BucketEncryptionInfoProto.newBuilder()
      .setKeyName(bek.getKeyName())
      .setCryptoProtocolVersion(ENCRYPTION_ZONES)
      .setSuite(OMPBHelper.convert(
          CipherSuite.convert(metadata.getCipher())));
  return bekb.build();
}
 
源代码16 项目: big-c   文件: HttpServer2.java
/**
 * Does the user sending the HttpServletRequest has the administrator ACLs? If
 * it isn't the case, response will be modified to send an error to the user.
 *
 * @param response used to send the error response if user does not have admin access.
 * @return true if admin-authorized, false otherwise
 * @throws IOException
 */
public static boolean hasAdministratorAccess(
    ServletContext servletContext, HttpServletRequest request,
    HttpServletResponse response) throws IOException {
  Configuration conf =
      (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
  // If there is no authorization, anybody has administrator access.
  if (!conf.getBoolean(
      CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
    return true;
  }

  String remoteUser = request.getRemoteUser();
  if (remoteUser == null) {
    response.sendError(HttpServletResponse.SC_FORBIDDEN,
                       "Unauthenticated users are not " +
                       "authorized to access this page.");
    return false;
  }

  if (servletContext.getAttribute(ADMINS_ACL) != null &&
      !userHasAdministratorAccess(servletContext, remoteUser)) {
    response.sendError(HttpServletResponse.SC_FORBIDDEN, "User "
        + remoteUser + " is unauthorized to access this page.");
    return false;
  }

  return true;
}
 
源代码17 项目: big-c   文件: HttpServer2.java
/**
 * Add default apps.
 * @param appDir The application directory
 * @throws IOException
 */
protected void addDefaultApps(ContextHandlerCollection parent,
    final String appDir, Configuration conf) throws IOException {
  // set up the context for "/logs/" if "hadoop.log.dir" property is defined.
  String logDir = System.getProperty("hadoop.log.dir");
  if (logDir != null) {
    Context logContext = new Context(parent, "/logs");
    logContext.setResourceBase(logDir);
    logContext.addServlet(AdminAuthorizedServlet.class, "/*");
    if (conf.getBoolean(
        CommonConfigurationKeys.HADOOP_JETTY_LOGS_SERVE_ALIASES,
        CommonConfigurationKeys.DEFAULT_HADOOP_JETTY_LOGS_SERVE_ALIASES)) {
      @SuppressWarnings("unchecked")
      Map<String, String> params = logContext.getInitParams();
      params.put(
          "org.mortbay.jetty.servlet.Default.aliases", "true");
    }
    logContext.setDisplayName("logs");
    setContextAttributes(logContext, conf);
    addNoCacheFilter(webAppContext);
    defaultContexts.put(logContext, true);
  }
  // set up the context for "/static/*"
  Context staticContext = new Context(parent, "/static");
  staticContext.setResourceBase(appDir + "/static");
  staticContext.addServlet(DefaultServlet.class, "/*");
  staticContext.setDisplayName("static");
  setContextAttributes(staticContext, conf);
  defaultContexts.put(staticContext, true);
}
 
源代码18 项目: Bats   文件: SpnegoConfig.java
private UserGroupInformation loginAndReturnUgi() throws DrillException {

    validateSpnegoConfig();

    UserGroupInformation ugi;
    try {
      // Check if security is not enabled and try to set the security parameter to login the principal.
      // After the login is performed reset the static UGI state.
      if (!UserGroupInformation.isSecurityEnabled()) {
        final Configuration newConfig = new Configuration();
        newConfig.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
            UserGroupInformation.AuthenticationMethod.KERBEROS.toString());

        if (clientNameMapping != null) {
          newConfig.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTH_TO_LOCAL, clientNameMapping);
        }

        UserGroupInformation.setConfiguration(newConfig);
        ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab);

        // Reset the original configuration for static UGI
        UserGroupInformation.setConfiguration(new Configuration());
      } else {
        // Let's not overwrite the rules here since it might be possible that CUSTOM security is configured for
        // JDBC/ODBC with default rules. If Kerberos was enabled then the correct rules must already be set
        ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab);
      }
    } catch (Exception e) {
      throw new DrillException(String.format("Login failed for %s with given keytab", principal), e);
    }
    return ugi;
  }
 
源代码19 项目: hadoop   文件: Lz4Codec.java
/**
 * Create a {@link CompressionInputStream} that will read from the given
 * {@link InputStream} with the given {@link Decompressor}.
 *
 * @param in           the stream to read compressed bytes from
 * @param decompressor decompressor to use
 * @return a stream to read uncompressed bytes from
 * @throws IOException
 */
@Override
public CompressionInputStream createInputStream(InputStream in,
                                                Decompressor decompressor)
    throws IOException {
  if (!isNativeCodeLoaded()) {
    throw new RuntimeException("native lz4 library not available");
  }

  return new BlockDecompressorStream(in, decompressor, conf.getInt(
      CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY,
      CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT));
}
 
源代码20 项目: hadoop   文件: TestGroupsCaching.java
@Before
public void setup() {
  FakeGroupMapping.resetRequestCount();
  ExceptionalGroupMapping.resetRequestCount();

  conf = new Configuration();
  conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
    FakeGroupMapping.class,
    ShellBasedUnixGroupsMapping.class);
}
 
源代码21 项目: hadoop   文件: TestFSInputChecker.java
private void testSeekAndRead(FileSystem fileSys)
throws IOException {
  Path file = new Path("try.dat");
  writeFile(fileSys, file);
  stm = fileSys.open(
      file,
      fileSys.getConf().getInt(
          CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096));
  checkSeekAndRead();
  stm.close();
  cleanupFile(fileSys, file);
}
 
源代码22 项目: big-c   文件: HAAdmin.java
@Override
public void setConf(Configuration conf) {
  super.setConf(conf);
  if (conf != null) {
    rpcTimeoutForChecks = conf.getInt(
        CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_KEY,
        CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_DEFAULT);
  }
}
 
源代码23 项目: big-c   文件: TestUGILoginFromKeytab.java
@Before
public void startMiniKdc() throws Exception {
  // This setting below is required. If not enabled, UGI will abort
  // any attempt to loginUserFromKeytab.
  Configuration conf = new Configuration();
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
      "kerberos");
  UserGroupInformation.setConfiguration(conf);
  workDir = folder.getRoot();
  kdc = new MiniKdc(MiniKdc.createConf(), workDir);
  kdc.start();
}
 
@Override
public MiniRaftClusterWithHadoopRpc newCluster(int numPeers) {
  final Configuration conf = new Configuration();
  HadoopConfigKeys.Ipc.setHandlers(conf, 20);
  conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
  conf.setInt(CommonConfigurationKeys.IPC_SERVER_HANDLER_QUEUE_SIZE_KEY, 1000);
  conf.setInt(CommonConfigurationKeys.IPC_CLIENT_RPC_TIMEOUT_KEY, 1000);
  return MiniRaftClusterWithHadoopRpc.FACTORY.newCluster(
      numPeers, getProperties(), conf);
}
 
源代码25 项目: big-c   文件: TestActiveStandbyElector.java
ActiveStandbyElectorTester(String hostPort, int timeout, String parent,
    List<ACL> acl, ActiveStandbyElectorCallback app) throws IOException,
    KeeperException {
  super(hostPort, timeout, parent, acl, Collections
      .<ZKAuthInfo> emptyList(), app,
      CommonConfigurationKeys.HA_FC_ELECTOR_ZK_OP_RETRIES_DEFAULT);
}
 
源代码26 项目: hadoop   文件: Lz4Codec.java
/**
 * Create a new {@link Compressor} for use by this {@link CompressionCodec}.
 *
 * @return a new compressor for use by this codec
 */
@Override
public Compressor createCompressor() {
  if (!isNativeCodeLoaded()) {
    throw new RuntimeException("native lz4 library not available");
  }
  int bufferSize = conf.getInt(
      CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY,
      CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_DEFAULT);
  boolean useLz4HC = conf.getBoolean(
      CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_KEY,
      CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZ4_USELZ4HC_DEFAULT);
  return new Lz4Compressor(bufferSize, useLz4HC);
}
 
源代码27 项目: big-c   文件: JournalNodeRpcServer.java
JournalNodeRpcServer(Configuration conf, JournalNode jn) throws IOException {
  this.jn = jn;
  
  Configuration confCopy = new Configuration(conf);
  
  // Ensure that nagling doesn't kick in, which could cause latency issues.
  confCopy.setBoolean(
      CommonConfigurationKeysPublic.IPC_SERVER_TCPNODELAY_KEY,
      true);
  
  InetSocketAddress addr = getAddress(confCopy);
  RPC.setProtocolEngine(confCopy, QJournalProtocolPB.class,
      ProtobufRpcEngine.class);
  QJournalProtocolServerSideTranslatorPB translator =
      new QJournalProtocolServerSideTranslatorPB(this);
  BlockingService service = QJournalProtocolService
      .newReflectiveBlockingService(translator);
  
  this.server = new RPC.Builder(confCopy)
    .setProtocol(QJournalProtocolPB.class)
    .setInstance(service)
    .setBindAddress(addr.getHostName())
    .setPort(addr.getPort())
    .setNumHandlers(HANDLER_COUNT)
    .setVerbose(false)
    .build();

  // set service-level authorization security policy
  if (confCopy.getBoolean(
    CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
        server.refreshServiceAcl(confCopy, new HDFSPolicyProvider());
  }
}
 
源代码28 项目: hadoop   文件: TestActiveStandbyElectorRealZK.java
@Override
public void setUp() throws Exception {
  super.setUp();
  
  zkServer = getServer(serverFactory);

  for (int i = 0; i < NUM_ELECTORS; i++) {
    cbs[i] =  Mockito.mock(ActiveStandbyElectorCallback.class);
    appDatas[i] = Ints.toByteArray(i);
    electors[i] = new ActiveStandbyElector(hostPort, 5000, PARENT_DIR,
        Ids.OPEN_ACL_UNSAFE, Collections.<ZKAuthInfo> emptyList(), cbs[i],
        CommonConfigurationKeys.HA_FC_ELECTOR_ZK_OP_RETRIES_DEFAULT);
  }
}
 
源代码29 项目: big-c   文件: TestCodec.java
@Test
public void testCodecPoolCompressorReinit() throws Exception {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
  if (ZlibFactory.isNativeZlibLoaded(conf)) {
    GzipCodec gzc = ReflectionUtils.newInstance(GzipCodec.class, conf);
    gzipReinitTest(conf, gzc);
  } else {
    LOG.warn("testCodecPoolCompressorReinit skipped: native libs not loaded");
  }
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, false);
  DefaultCodec dfc = ReflectionUtils.newInstance(DefaultCodec.class, conf);
  gzipReinitTest(conf, dfc);
}
 
源代码30 项目: big-c   文件: HttpServer.java
/**
 * Does the user sending the HttpServletRequest has the administrator ACLs? If
 * it isn't the case, response will be modified to send an error to the user.
 * 
 * @param servletContext
 * @param request
 * @param response used to send the error response if user does not have admin access.
 * @return true if admin-authorized, false otherwise
 * @throws IOException
 */
public static boolean hasAdministratorAccess(
    ServletContext servletContext, HttpServletRequest request,
    HttpServletResponse response) throws IOException {
  Configuration conf =
      (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
  // If there is no authorization, anybody has administrator access.
  if (!conf.getBoolean(
      CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
    return true;
  }

  String remoteUser = request.getRemoteUser();
  if (remoteUser == null) {
    response.sendError(HttpServletResponse.SC_UNAUTHORIZED,
                       "Unauthenticated users are not " +
                       "authorized to access this page.");
    return false;
  }
  
  if (servletContext.getAttribute(ADMINS_ACL) != null &&
      !userHasAdministratorAccess(servletContext, remoteUser)) {
    response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "User "
        + remoteUser + " is unauthorized to access this page.");
    return false;
  }

  return true;
}
 
 类所在包
 同包方法