com.google.protobuf.BlockingRpcChannel#org.jboss.netty.channel.socket.ClientSocketChannelFactory源码实例Demo

下面列出了com.google.protobuf.BlockingRpcChannel#org.jboss.netty.channel.socket.ClientSocketChannelFactory 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: distributedlog   文件: BookKeeperClient.java
BookKeeperClient(DistributedLogConfiguration conf,
                 String name,
                 String zkServers,
                 ZooKeeperClient zkc,
                 String ledgersPath,
                 ClientSocketChannelFactory channelFactory,
                 HashedWheelTimer requestTimer,
                 StatsLogger statsLogger,
                 Optional<FeatureProvider> featureProvider) {
    this.conf = conf;
    this.name = name;
    this.zkServers = zkServers;
    this.ledgersPath = ledgersPath;
    this.passwd = conf.getBKDigestPW().getBytes(UTF_8);
    this.channelFactory = channelFactory;
    this.requestTimer = requestTimer;
    this.statsLogger = statsLogger;
    this.featureProvider = featureProvider;
    this.ownZK = null == zkc;
    if (null != zkc) {
        // reference the passing zookeeper client
        this.zkc = zkc;
    }
}
 
源代码2 项目: incubator-tajo   文件: Fetcher.java
public Fetcher(URI uri, File file, ClientSocketChannelFactory factory) {
  this.uri = uri;
  this.file = file;

  String scheme = uri.getScheme() == null ? "http" : uri.getScheme();
  this.host = uri.getHost() == null ? "localhost" : uri.getHost();
  this.port = uri.getPort();
  if (port == -1) {
    if (scheme.equalsIgnoreCase("http")) {
      this.port = 80;
    } else if (scheme.equalsIgnoreCase("https")) {
      this.port = 443;
    }
  }

  bootstrap = new ClientBootstrap(factory);
  bootstrap.setOption("connectTimeoutMillis", 5000L); // set 5 sec
  bootstrap.setOption("receiveBufferSize", 1048576); // set 1M
  bootstrap.setOption("tcpNoDelay", true);

  ChannelPipelineFactory pipelineFactory = new HttpClientPipelineFactory(file);
  bootstrap.setPipelineFactory(pipelineFactory);
}
 
源代码3 项目: incubator-tajo   文件: BlockingRpcClient.java
BlockingRpcClient(final Class<?> protocol,
                         final InetSocketAddress addr, ClientSocketChannelFactory factory)
    throws Exception {

  this.protocol = protocol;
  String serviceClassName = protocol.getName() + "$"
      + protocol.getSimpleName() + "Service";
  Class<?> serviceClass = Class.forName(serviceClassName);
  stubMethod = serviceClass.getMethod("newBlockingStub",
      BlockingRpcChannel.class);

  this.handler = new ClientChannelUpstreamHandler();
  pipeFactory = new ProtoPipelineFactory(handler,
      RpcResponse.getDefaultInstance());
  super.init(addr, pipeFactory, factory);
  rpcChannel = new ProxyRpcChannel();

  this.key = new RpcConnectionKey(addr, protocol, false);
}
 
源代码4 项目: incubator-tajo   文件: AsyncRpcClient.java
AsyncRpcClient(final Class<?> protocol,
                      final InetSocketAddress addr, ClientSocketChannelFactory factory)
    throws Exception {

  this.protocol = protocol;
  String serviceClassName = protocol.getName() + "$"
      + protocol.getSimpleName() + "Service";
  Class<?> serviceClass = Class.forName(serviceClassName);
  stubMethod = serviceClass.getMethod("newStub", RpcChannel.class);

  this.handler = new ClientChannelUpstreamHandler();
  pipeFactory = new ProtoPipelineFactory(handler,
      RpcResponse.getDefaultInstance());
  super.init(addr, pipeFactory, factory);
  rpcChannel = new ProxyRpcChannel();
  this.key = new RpcConnectionKey(addr, protocol, true);
}
 
源代码5 项目: incubator-tajo   文件: RpcChannelFactory.java
public static synchronized ClientSocketChannelFactory createClientChannelFactory(String name, int workerNum) {
  name = name + "-" + clientCount.incrementAndGet();
  if(LOG.isDebugEnabled()){
    LOG.debug("Create " + name + " ClientSocketChannelFactory. Worker:" + workerNum);
  }

  ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
  ThreadFactory bossFactory = builder.setNameFormat(name + " Boss #%d").build();
  ThreadFactory workerFactory = builder.setNameFormat(name + " Worker #%d").build();

  NioClientBossPool bossPool = new NioClientBossPool(Executors.newCachedThreadPool(bossFactory), 1,
      new HashedWheelTimer(), ThreadNameDeterminer.CURRENT);
  NioWorkerPool workerPool = new NioWorkerPool(Executors.newCachedThreadPool(workerFactory), workerNum,
      ThreadNameDeterminer.CURRENT);

  return new NioClientSocketChannelFactory(bossPool, workerPool);
}
 
源代码6 项目: incubator-tajo   文件: NettyClientBase.java
public void init(InetSocketAddress addr, ChannelPipelineFactory pipeFactory, ClientSocketChannelFactory factory)
    throws IOException {
  try {
    this.bootstrap = new ClientBootstrap(factory);
    this.bootstrap.setPipelineFactory(pipeFactory);
    // TODO - should be configurable
    this.bootstrap.setOption("connectTimeoutMillis", 10000);
    this.bootstrap.setOption("connectResponseTimeoutMillis", 10000);
    this.bootstrap.setOption("receiveBufferSize", 1048576 * 10);
    this.bootstrap.setOption("tcpNoDelay", true);
    this.bootstrap.setOption("keepAlive", true);

    connect(addr);
  } catch (Throwable t) {
    close();
    throw new IOException(t.getCause());
  }
}
 
源代码7 项目: Flink-CEPplus   文件: NetworkFailureHandler.java
public NetworkFailureHandler(
		AtomicBoolean blocked,
		Consumer<NetworkFailureHandler> onClose,
		ClientSocketChannelFactory channelFactory,
		String remoteHost,
		int remotePort) {
	this.blocked = blocked;
	this.onClose = onClose;
	this.channelFactory = channelFactory;
	this.remoteHost = remoteHost;
	this.remotePort = remotePort;
}
 
源代码8 项目: Flink-CEPplus   文件: NetworkFailuresProxy.java
public NetworkFailuresProxy(int localPort, String remoteHost, int remotePort) {
	// Configure the bootstrap.
	serverBootstrap = new ServerBootstrap(
		new NioServerSocketChannelFactory(executor, executor));

	// Set up the event pipeline factory.
	ClientSocketChannelFactory channelFactory = new NioClientSocketChannelFactory(executor, executor);
	serverBootstrap.setOption("child.tcpNoDelay", true);
	serverBootstrap.setOption("child.keepAlive", true);
	serverBootstrap.setPipelineFactory(new ChannelPipelineFactory() {
		public ChannelPipeline getPipeline() throws Exception {
			ChannelPipeline pipeline = Channels.pipeline();

			// synchronized for a race between blocking and creating new handlers
			synchronized (networkFailureHandlers) {
				NetworkFailureHandler failureHandler = new NetworkFailureHandler(
					blocked,
					networkFailureHandler -> networkFailureHandlers.remove(networkFailureHandler),
					channelFactory,
					remoteHost,
					remotePort);
				networkFailureHandlers.add(failureHandler);
				pipeline.addLast(NETWORK_FAILURE_HANDLER_NAME, failureHandler);
			}
			return pipeline;
		}
	});
	channel = serverBootstrap.bind(new InetSocketAddress(localPort));

	LOG.info("Proxying [*:{}] to [{}:{}]", getLocalPort(), remoteHost, remotePort);
}
 
源代码9 项目: flink   文件: NetworkFailureHandler.java
public NetworkFailureHandler(
		AtomicBoolean blocked,
		Consumer<NetworkFailureHandler> onClose,
		ClientSocketChannelFactory channelFactory,
		String remoteHost,
		int remotePort) {
	this.blocked = blocked;
	this.onClose = onClose;
	this.channelFactory = channelFactory;
	this.remoteHost = remoteHost;
	this.remotePort = remotePort;
}
 
源代码10 项目: flink   文件: NetworkFailuresProxy.java
public NetworkFailuresProxy(int localPort, String remoteHost, int remotePort) {
	// Configure the bootstrap.
	serverBootstrap = new ServerBootstrap(
		new NioServerSocketChannelFactory(executor, executor));

	// Set up the event pipeline factory.
	ClientSocketChannelFactory channelFactory = new NioClientSocketChannelFactory(executor, executor);
	serverBootstrap.setOption("child.tcpNoDelay", true);
	serverBootstrap.setOption("child.keepAlive", true);
	serverBootstrap.setPipelineFactory(new ChannelPipelineFactory() {
		public ChannelPipeline getPipeline() throws Exception {
			ChannelPipeline pipeline = Channels.pipeline();

			// synchronized for a race between blocking and creating new handlers
			synchronized (networkFailureHandlers) {
				NetworkFailureHandler failureHandler = new NetworkFailureHandler(
					blocked,
					networkFailureHandler -> networkFailureHandlers.remove(networkFailureHandler),
					channelFactory,
					remoteHost,
					remotePort);
				networkFailureHandlers.add(failureHandler);
				pipeline.addLast(NETWORK_FAILURE_HANDLER_NAME, failureHandler);
			}
			return pipeline;
		}
	});
	channel = serverBootstrap.bind(new InetSocketAddress(localPort));

	LOG.info("Proxying [*:{}] to [{}:{}]", getLocalPort(), remoteHost, remotePort);
}
 
源代码11 项目: flink   文件: NetworkFailureHandler.java
public NetworkFailureHandler(
		AtomicBoolean blocked,
		Consumer<NetworkFailureHandler> onClose,
		ClientSocketChannelFactory channelFactory,
		String remoteHost,
		int remotePort) {
	this.blocked = blocked;
	this.onClose = onClose;
	this.channelFactory = channelFactory;
	this.remoteHost = remoteHost;
	this.remotePort = remotePort;
}
 
源代码12 项目: flink   文件: NetworkFailuresProxy.java
public NetworkFailuresProxy(int localPort, String remoteHost, int remotePort) {
	// Configure the bootstrap.
	serverBootstrap = new ServerBootstrap(
		new NioServerSocketChannelFactory(executor, executor));

	// Set up the event pipeline factory.
	ClientSocketChannelFactory channelFactory = new NioClientSocketChannelFactory(executor, executor);
	serverBootstrap.setOption("child.tcpNoDelay", true);
	serverBootstrap.setOption("child.keepAlive", true);
	serverBootstrap.setPipelineFactory(new ChannelPipelineFactory() {
		public ChannelPipeline getPipeline() throws Exception {
			ChannelPipeline pipeline = Channels.pipeline();

			// synchronized for a race between blocking and creating new handlers
			synchronized (networkFailureHandlers) {
				NetworkFailureHandler failureHandler = new NetworkFailureHandler(
					blocked,
					networkFailureHandler -> networkFailureHandlers.remove(networkFailureHandler),
					channelFactory,
					remoteHost,
					remotePort);
				networkFailureHandlers.add(failureHandler);
				pipeline.addLast(NETWORK_FAILURE_HANDLER_NAME, failureHandler);
			}
			return pipeline;
		}
	});
	channel = serverBootstrap.bind(new InetSocketAddress(localPort));

	LOG.info("Proxying [*:{}] to [{}:{}]", getLocalPort(), remoteHost, remotePort);
}
 
源代码13 项目: incubator-tajo   文件: TestFetcher.java
@Test
public void testGet() throws IOException {
  Random rnd = new Random();
  FileWriter writer = new FileWriter(INPUT_DIR + "data");
  String data;
  for (int i = 0; i < 100; i++) {
    data = ""+rnd.nextInt();
    writer.write(data);
  }
  writer.flush();
  writer.close();

  DataRetriever ret = new DirectoryRetriever(INPUT_DIR);
  HttpDataServer server = new HttpDataServer(
      NetUtils.createSocketAddr("127.0.0.1:0"), ret);
  server.start();
  InetSocketAddress addr = server.getBindAddress();
  
  URI uri = URI.create("http://127.0.0.1:"+addr.getPort() + "/data");
  ClientSocketChannelFactory channelFactory = RpcChannelFactory.createClientChannelFactory("Fetcher", 1);
  Fetcher fetcher = new Fetcher(uri, new File(OUTPUT_DIR + "data"), channelFactory);
  fetcher.get();
  server.stop();
  
  FileSystem fs = FileSystem.getLocal(new TajoConf());
  FileStatus inStatus = fs.getFileStatus(new Path(INPUT_DIR, "data"));
  FileStatus outStatus = fs.getFileStatus(new Path(OUTPUT_DIR, "data"));
  assertEquals(inStatus.getLen(), outStatus.getLen());
}
 
源代码14 项目: incubator-tajo   文件: RpcChannelFactory.java
/**
 * make this factory static thus all clients can share its thread pool.
 * NioClientSocketChannelFactory has only one method newChannel() visible for user, which is thread-safe
 */
public static synchronized ClientSocketChannelFactory getSharedClientChannelFactory(){
  //shared woker and boss pool
  if(factory == null){
    TajoConf conf = new TajoConf();
    int workerNum = conf.getIntVar(TajoConf.ConfVars.INTERNAL_RPC_CLIENT_WORKER_THREAD_NUM);
    factory = createClientChannelFactory("Internal-Client", workerNum);
  }
  return factory;
}
 
源代码15 项目: distributedlog   文件: BookKeeperClient.java
@SuppressWarnings("deprecation")
private synchronized void commonInitialization(
        DistributedLogConfiguration conf, String ledgersPath,
        ClientSocketChannelFactory channelFactory, StatsLogger statsLogger, HashedWheelTimer requestTimer,
        boolean registerExpirationHandler)
    throws IOException, InterruptedException, KeeperException {
    ClientConfiguration bkConfig = new ClientConfiguration();
    bkConfig.setAddEntryTimeout(conf.getBKClientWriteTimeout());
    bkConfig.setReadTimeout(conf.getBKClientReadTimeout());
    bkConfig.setZkLedgersRootPath(ledgersPath);
    bkConfig.setZkTimeout(conf.getBKClientZKSessionTimeoutMilliSeconds());
    bkConfig.setNumWorkerThreads(conf.getBKClientNumberWorkerThreads());
    bkConfig.setEnsemblePlacementPolicy(RegionAwareEnsemblePlacementPolicy.class);
    bkConfig.setZkRequestRateLimit(conf.getBKClientZKRequestRateLimit());
    bkConfig.setProperty(RegionAwareEnsemblePlacementPolicy.REPP_DISALLOW_BOOKIE_PLACEMENT_IN_REGION_FEATURE_NAME,
            DistributedLogConstants.DISALLOW_PLACEMENT_IN_REGION_FEATURE_NAME);
    // reload configuration from dl configuration with settings prefixed with 'bkc.'
    ConfUtils.loadConfiguration(bkConfig, conf, "bkc.");

    Class<? extends DNSToSwitchMapping> dnsResolverCls;
    try {
        dnsResolverCls = conf.getEnsemblePlacementDnsResolverClass();
    } catch (ConfigurationException e) {
        LOG.error("Failed to load bk dns resolver : ", e);
        throw new IOException("Failed to load bk dns resolver : ", e);
    }
    final DNSToSwitchMapping dnsResolver =
            NetUtils.getDNSResolver(dnsResolverCls, conf.getBkDNSResolverOverrides());

    this.bkc = BookKeeper.newBuilder()
        .config(bkConfig)
        .zk(zkc.get())
        .channelFactory(channelFactory)
        .statsLogger(statsLogger)
        .dnsResolver(dnsResolver)
        .requestTimer(requestTimer)
        .featureProvider(featureProvider.orNull())
        .build();

    if (registerExpirationHandler) {
        sessionExpireWatcher = this.zkc.registerExpirationHandler(this);
    }
}
 
源代码16 项目: incubator-tajo   文件: RpcConnectionPool.java
private RpcConnectionPool(TajoConf conf, ClientSocketChannelFactory channelFactory) {
  this.conf = conf;
  this.channelFactory =  channelFactory;
}
 
源代码17 项目: usergrid   文件: MongoProxyServer.java
public static void main( String[] args ) throws Exception {
    logger.info( "Starting Usergrid Mongo Proxy Server" );

    // Configure the server.
    Executor executor = Executors.newCachedThreadPool();
    ServerBootstrap bootstrap = new ServerBootstrap( new NioServerSocketChannelFactory( executor, executor ) );

    bootstrap.setOption( "child.bufferFactory", HeapChannelBufferFactory.getInstance( ByteOrder.LITTLE_ENDIAN ) );

    ClientSocketChannelFactory cf = new NioClientSocketChannelFactory( executor, executor );

    bootstrap.setPipelineFactory( new MongoProxyPipelineFactory( cf, "localhost", 12345 ) );

    bootstrap.bind( new InetSocketAddress( 27017 ) );

    logger.info( "Usergrid Mongo Proxy Server accepting connections..." );
}
 
源代码18 项目: usergrid   文件: MongoProxyPipelineFactory.java
public MongoProxyPipelineFactory( ClientSocketChannelFactory cf, String remoteHost, int remotePort ) {
    this.cf = cf;
    this.remoteHost = remoteHost;
    this.remotePort = remotePort;
}
 
源代码19 项目: usergrid   文件: MongoProxyInboundHandler.java
public MongoProxyInboundHandler( ClientSocketChannelFactory cf, String remoteHost, int remotePort ) {
    this.cf = cf;
    this.remoteHost = remoteHost;
    this.remotePort = remotePort;
}