io.netty.channel.nio.NioEventLoopGroup#shutdownGracefully ( )源码实例Demo

下面列出了io.netty.channel.nio.NioEventLoopGroup#shutdownGracefully ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: etcd4j   文件: EtcdNettyClientTest.java
@Test
public void testManagedEventLoopGroup() throws Exception {
  NioEventLoopGroup evl = new NioEventLoopGroup();
  EtcdNettyConfig config = new EtcdNettyConfig()
      .setConnectTimeout(100)
      .setSocketChannelClass(NioSocketChannel.class)
      .setMaxFrameSize(1024 * 1024)
      .setEventLoopGroup(evl, false)
      .setHostName("localhost");

  URI[] endpoints = CLUSTER.endpoints();
  EtcdNettyClient client = new EtcdNettyClient(config, endpoints);
  client.close();

  assertTrue(!(evl.isShuttingDown() || evl.isShutdown() || evl.isTerminated()));
  
  evl.shutdownGracefully();
  assertTrue(evl.isShuttingDown() || evl.isShutdown() || evl.isTerminated());
}
 
源代码2 项目: jdk-source-analysis   文件: TestServer.java
@Test
public void test() throws InterruptedException {
  NioEventLoopGroup bossGroup = new NioEventLoopGroup(1);
  NioEventLoopGroup workerGroup = new NioEventLoopGroup();
  try {
    ServerBootstrap bootstrap = new ServerBootstrap();
    bootstrap.group(bossGroup, workerGroup)
      .channel(NioServerSocketChannel.class)
      .childHandler(new TestServerInitializer());

    ChannelFuture future = bootstrap.bind(11911).sync();

    future.channel().closeFuture().sync();
  } finally {
    bossGroup.shutdownGracefully();
    workerGroup.shutdownGracefully();
  }
}
 
源代码3 项目: SynapseAPI   文件: SynapseClient.java
public boolean connect() {
    clientGroup = new NioEventLoopGroup();
    try {
        Bootstrap b = new Bootstrap();  //服务引导程序,服务器端快速启动程序
        b.group(clientGroup)
                .channel(NioSocketChannel.class)
                .option(ChannelOption.SO_KEEPALIVE, true)
                .handler(new SynapseClientInitializer(this));

        b.connect(this.interfaz, this.port).get();
        // 等待服务端监听端口关闭,等待服务端链路关闭之后main函数才退出
        //future.channel().closeFuture().sync();
        return true;
    } catch (Exception e) {
        clientGroup.shutdownGracefully();
        Server.getInstance().getLogger().alert("Synapse Client can't connect to server: " + this.interfaz + ":" + this.port);
        Server.getInstance().getLogger().alert("Reason: " + e.getLocalizedMessage());
        Server.getInstance().getLogger().warning("We will reconnect in 3 seconds");
        this.reconnect();
        return false;
    }
}
 
源代码4 项目: smartacus-mqtt-broker   文件: MqttServer.java
public void run() throws Exception{
    EventLoopGroup bossGroup=new NioEventLoopGroup(1);
    NioEventLoopGroup  workerGroup=new NioEventLoopGroup(128);
    try{
        //实例化session工厂和connection工厂
        SessionManager sessionManager=new SessionManager();
        ConnectionFactory connectionFactory=new ConnectionFactory();
        ServerBootstrap sboot=new ServerBootstrap();
        sboot.group(bossGroup,workerGroup)
                //设置通道类型
                .channel(NioServerSocketChannel.class)
                //向通道的中添加handler初始化器
                .childHandler(new MqttChannelChannelInitializer(sessionManager,connectionFactory))
                .option(ChannelOption.SO_BACKLOG,1024)
                //设置子Socket的keepalive时间
                .childOption(ChannelOption.SO_KEEPALIVE,true);
        //绑定端口号
        Integer port=Integer.valueOf(SystemConfiguration.INSTANCE.getPort());
        ChannelFuture cf = sboot.bind(port).sync();
        System.out.println("Broker initiated...");
        sboot.bind(port).addListeners(new ChannelFutureListener() {
            @Override
            public void operationComplete(ChannelFuture future) throws Exception {
                System.out.println("=========绑定完成==============");
                //
            }
        });
        cf.channel().closeFuture().sync();
    }finally {
        workerGroup.shutdownGracefully();
        bossGroup.shutdownGracefully();
    }
}
 
源代码5 项目: netty4.0.27Learn   文件: ByteEchoClient.java
public static void main(String[] args) throws Exception {
    // Configure the client.
    final ThreadFactory connectFactory = new DefaultThreadFactory("connect");
    final NioEventLoopGroup connectGroup = new NioEventLoopGroup(1,
            connectFactory, NioUdtProvider.BYTE_PROVIDER);
    try {
        final Bootstrap boot = new Bootstrap();
        boot.group(connectGroup)
                .channelFactory(NioUdtProvider.BYTE_CONNECTOR)
                .handler(new ChannelInitializer<UdtChannel>() {
                    @Override
                    public void initChannel(final UdtChannel ch)
                            throws Exception {
                        ch.pipeline().addLast(
                                new LoggingHandler(LogLevel.INFO),
                                new ByteEchoClientHandler());
                    }
                });
        // Start the client.
        final ChannelFuture f = boot.connect(HOST, PORT).sync();
        // Wait until the connection is closed.
        f.channel().closeFuture().sync();
    } finally {
        // Shut down the event loop to terminate all threads.
        connectGroup.shutdownGracefully();
    }
}
 
源代码6 项目: netty-4.1.22   文件: ByteEchoClient.java
public static void main(String[] args) throws Exception {
    // Configure the client.
    final ThreadFactory connectFactory = new DefaultThreadFactory("connect");
    final NioEventLoopGroup connectGroup = new NioEventLoopGroup(1,
            connectFactory, NioUdtProvider.BYTE_PROVIDER);
    try {
        final Bootstrap boot = new Bootstrap();
        boot.group(connectGroup)
                .channelFactory(NioUdtProvider.BYTE_CONNECTOR)
                .handler(new ChannelInitializer<UdtChannel>() {
                    @Override
                    public void initChannel(final UdtChannel ch)
                            throws Exception {
                        ch.pipeline().addLast(
                                new LoggingHandler(LogLevel.INFO),
                                new ByteEchoClientHandler());
                    }
                });
        // Start the client.
        final ChannelFuture f = boot.connect(HOST, PORT).sync();
        // Wait until the connection is closed.
        f.channel().closeFuture().sync();
    } finally {
        // Shut down the event loop to terminate all threads.
        connectGroup.shutdownGracefully();
    }
}
 
源代码7 项目: JTelegramBot   文件: WebhookServer.java
/**
 * Starts receiving requests from Telegram server (WEBHOOK mode). This is a blocking method.
 */
public void start() throws InterruptedException
{
	bossGroup = new NioEventLoopGroup();
	workerGroup = new NioEventLoopGroup();
	
	try
	{
		ServerBootstrap server = new ServerBootstrap();
		
		server.group(bossGroup, workerGroup)
		      .channel(NioServerSocketChannel.class)
		      .childHandler(new ServerInitializer(sslCtx, path))
		      .option(ChannelOption.SO_BACKLOG, 128)
		      .childOption(ChannelOption.SO_KEEPALIVE, true);
		
		// Bind and start to accept incoming connections.
		ChannelFuture f = server.bind(port.getPortNumber()).sync();
		
		// Wait until the server socket is closed.
		f.channel().closeFuture().sync();
	}
	finally
	{
		workerGroup.shutdownGracefully();
		bossGroup.shutdownGracefully();
	}
}
 
源代码8 项目: netty-4.1.22   文件: SslHandlerTest.java
@Test(timeout = 30000)
public void testRemoval() throws Exception {
    NioEventLoopGroup group = new NioEventLoopGroup();
    Channel sc = null;
    Channel cc = null;
    try {
        final Promise<Void> clientPromise = group.next().newPromise();
        Bootstrap bootstrap = new Bootstrap()
                .group(group)
                .channel(NioSocketChannel.class)
                .handler(newHandler(SslContextBuilder.forClient().trustManager(
                        InsecureTrustManagerFactory.INSTANCE).build(), clientPromise));

        SelfSignedCertificate ssc = new SelfSignedCertificate();
        final Promise<Void> serverPromise = group.next().newPromise();
        ServerBootstrap serverBootstrap = new ServerBootstrap()
                .group(group, group)
                .channel(NioServerSocketChannel.class)
                .childHandler(newHandler(SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey()).build(),
                        serverPromise));
        sc = serverBootstrap.bind(new InetSocketAddress(0)).syncUninterruptibly().channel();
        cc = bootstrap.connect(sc.localAddress()).syncUninterruptibly().channel();

        serverPromise.syncUninterruptibly();
        clientPromise.syncUninterruptibly();
    } finally {
        if (cc != null) {
            cc.close().syncUninterruptibly();
        }
        if (sc != null) {
            sc.close().syncUninterruptibly();
        }
        group.shutdownGracefully();
    }
}
 
源代码9 项目: netty-rest-server   文件: WebServer.java
/**
 * 启动服务
 * @throws InterruptedException 
 */
public void start() throws InterruptedException {
    // 注册所有REST Controller
    new ControllerFactory().registerController(this.controllerBasePackage);
    
    // BossGroup处理nio的Accept事件(TCP连接)
    NioEventLoopGroup bossGroup = new NioEventLoopGroup(this.bossThreads);
    // Worker处理nio的Read和Write事件(通道的I/O事件)
    NioEventLoopGroup workerGroup = new NioEventLoopGroup(this.workerThreads);
    
    try {
        // handler在初始化时就会执行,而childHandler会在客户端成功connect后才执行。
        ServerBootstrap bootstrap = new ServerBootstrap();
        bootstrap.group(bossGroup, workerGroup)
        .channel(NioServerSocketChannel.class)
        .option(ChannelOption.SO_BACKLOG, 128)
        .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)       
        .childHandler(new HandlerInitializer(this.maxContentLength));
        
        ChannelFuture f = bootstrap.bind(port).sync();
        logger.info("The netty rest server is now ready to accept requests on port {}", this.port); 
        f.channel().closeFuture().sync();
    } finally {
        bossGroup.shutdownGracefully();
        workerGroup.shutdownGracefully();
    }
}
 
源代码10 项目: netty-cookbook   文件: HttpServerSpringMVC.java
public void run() throws Exception {
	ServerBootstrap server = new ServerBootstrap();
	NioEventLoopGroup pGroup = new NioEventLoopGroup();
	NioEventLoopGroup cGroup = new NioEventLoopGroup();
	try {
		server.group(pGroup, cGroup)
				.channel(NioServerSocketChannel.class)					
				.childHandler(new DispatcherServletChannelInitializer(WebConfig.class));
		server.bind(host, port).sync().channel().closeFuture().sync();
	}
	finally {
		cGroup.shutdownGracefully();
		pGroup.shutdownGracefully();
	}
}
 
源代码11 项目: gruffalo   文件: GraphiteMetricsPublisherTest.java
public static void main(String[] args) throws InterruptedException {

    final MetricFactory metricFactoryMock = Mockito.mock(MetricFactory.class);
    final Counter counterMock = Mockito.mock(Counter.class);
    Mockito.when(metricFactoryMock.createCounter(Mockito.anyString(), Mockito.anyString())).thenReturn(counterMock);

    NioEventLoopGroup eventLoopGroup = new NioEventLoopGroup(2);
    final Throttler throttler = Mockito.mock(Throttler.class);
    NettyGraphiteClient client = new NettyGraphiteClient(throttler, 1000, metricFactoryMock, "localhost:666");
    String host = "localhost";
    int port = 3003;
    GraphiteClientChannelInitializer channelInitializer = new GraphiteClientChannelInitializer(host, port, eventLoopGroup, new StringDecoder(), new StringEncoder(), new GraphiteChannelInboundHandler(client, host + ":" + port, throttler));
    client.setChannelInitializer(channelInitializer);
    client.connect();

//    Thread.sleep(20000);
    System.out.println("Begin bombardment...");
    StopWatch time = new StopWatch();
    time.start();
    for (int i = 0; i < 10000000; i++) {
      client.publishMetrics(i + " - 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789\n");
      if(i % 10000 == 0) {
        Thread.sleep(100);
      }
      if (i % 100000 == 0) {
        System.out.println(i);
        Thread.sleep(300);
      }
    }
    time.stop();

    System.out.println("sent all data: " + time +"; shutting down...");

    Thread.sleep(1000000);
    eventLoopGroup.shutdownGracefully(10, 20, TimeUnit.SECONDS);
  }
 
源代码12 项目: AgentX   文件: XClient.java
public void start() {
    Configuration config = Configuration.INSTANCE;
    InternalLoggerFactory.setDefaultFactory(Slf4JLoggerFactory.INSTANCE);
    bossGroup = new NioEventLoopGroup(1);
    workerGroup = new NioEventLoopGroup();
    try {
        ServerBootstrap bootstrap = new ServerBootstrap();
        bootstrap.group(bossGroup, workerGroup)
                .channel(NioServerSocketChannel.class)
                .childHandler(new ChannelInitializer<SocketChannel>() {
                    @Override
                    protected void initChannel(SocketChannel socketChannel) throws Exception {
                        socketChannel.pipeline()
                                .addLast("logging", new LoggingHandler(LogLevel.DEBUG))
                                .addLast(new SocksInitRequestDecoder())
                                .addLast(new SocksMessageEncoder())
                                .addLast(new Socks5Handler())
                                .addLast(Status.TRAFFIC_HANDLER);
                    }
                });
        log.info("\tStartup {}-{}-client [{}{}]", Constants.APP_NAME, Constants.APP_VERSION, config.getMode(), config.getMode().equals("socks5") ? "" : ":" + config.getProtocol());
        new Thread(() -> new UdpServer().start()).start();
        ChannelFuture future = bootstrap.bind(config.getLocalHost(), config.getLocalPort()).sync();
        future.addListener(future1 -> log.info("\tTCP listening at {}:{}...", config.getLocalHost(), config.getLocalPort()));
        future.channel().closeFuture().sync();
    } catch (Exception e) {
        log.error("\tSocket bind failure ({})", e.getMessage());
    } finally {
        log.info("\tShutting down");
        bossGroup.shutdownGracefully();
        workerGroup.shutdownGracefully();
    }
}
 
源代码13 项目: openAGV   文件: TcpServerChannelManager.java
@Override
    public void initialize() {
        if (initialized) {
            LOG.warn("已经初始化,请勿重复初始化");
            return;
        }

        serverBootstrap = new ServerBootstrap();
        NioEventLoopGroup workerGroup = new NioEventLoopGroup();
        serverBootstrap.group(workerGroup, workerGroup);
        serverBootstrap.channel(NioServerSocketChannel.class);
        serverBootstrap.option(ChannelOption.SO_BACKLOG, 1);
        serverBootstrap.childOption(ChannelOption.SO_KEEPALIVE, true);
        serverBootstrap.childOption(ChannelOption.TCP_NODELAY, true);
//        TcpServerHandler tcpServerHandler = new TcpServerHandler(clientEntries);
        serverBootstrap.childHandler(new ChannelInitializer<SocketChannel>() {
            @Override
            protected void initChannel(SocketChannel ch) {
                if (loggingInitially) {
                    ch.pipeline().addFirst(LOGGING_HANDLER_NAME,
                            new LoggingHandler(TcpServerChannelManager.this.getClass()));
                }
                if (readTimeout > 0) {
                    ch.pipeline().addLast(new IdleStateHandler(readTimeout, 0, 0, TimeUnit.MILLISECONDS));
                }
                for (ChannelHandler handler : channelSupplier.get()) {
                    ch.pipeline().addLast(handler);
                }
                ch.pipeline().addLast(new TcpServerHandler(clientEntries));
                ch.pipeline().addLast(new ServerConnectionStateNotifier(clientEntries));
            }

        });
        try {
            serverChannelFuture = serverBootstrap.bind(host, port).sync();
            serverChannelFuture.addListener((ChannelFuture future) -> {
                if (future.isSuccess()) {
                    initialized = true;
                    LOG.warn("TcpServerChannelManager绑定[" + host + ":" + port + "]成功");
                } else {
                    throw new RuntimeException("udp绑定[" + host + ":" + port + "]不成功");
                }
            });
        } catch (Exception e) {
            e.printStackTrace();
            workerGroup.shutdownGracefully();
            throw new RuntimeException("TcpServerChannelManager initialized is " + isInitialized() + ", exception message:  " + e.getMessage());
        }
    }
 
源代码14 项目: wildfly-camel   文件: LumberjackComponentTest.java
private List<Integer> sendMessages(int port, SSLContextParameters sslContextParameters) throws InterruptedException {
    NioEventLoopGroup eventLoopGroup = new NioEventLoopGroup();
    try {
        // This list will hold the acknowledgment response sequence numbers
        List<Integer> responses = new ArrayList<>();

        // This initializer configures the SSL and an acknowledgment recorder
        ChannelInitializer<Channel> initializer = new ChannelInitializer<Channel>() {
            @Override
            protected void initChannel(Channel ch) throws Exception {
                ChannelPipeline pipeline = ch.pipeline();
                if (sslContextParameters != null) {
                    SSLEngine sslEngine = sslContextParameters.createSSLContext(null).createSSLEngine();
                    sslEngine.setUseClientMode(true);
                    pipeline.addLast(new SslHandler(sslEngine));
                }

                // Add the response recorder
                pipeline.addLast(new SimpleChannelInboundHandler<ByteBuf>() {
                    @Override
                    protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Exception {
                        Assert.assertEquals(msg.readUnsignedByte(), (short) '2');
                        Assert.assertEquals(msg.readUnsignedByte(), (short) 'A');
                        synchronized (responses) {
                            responses.add(msg.readInt());
                        }
                    }
                });
            }
        };

        // Connect to the server
        Channel channel = new Bootstrap()
                .group(eventLoopGroup)
                .channel(NioSocketChannel.class)
                .handler(initializer)
                .connect("127.0.0.1", port).sync().channel();

        // Send the 2 window frames
        TimeUnit.MILLISECONDS.sleep(100);
        channel.writeAndFlush(readSample("lumberjack/window10"));
        TimeUnit.MILLISECONDS.sleep(100);
        channel.writeAndFlush(readSample("lumberjack/window15"));
        TimeUnit.MILLISECONDS.sleep(100);

        channel.close();

        synchronized (responses) {
            return responses;
        }
    } finally {
        eventLoopGroup.shutdownGracefully();
    }
}
 
/**
 * verify basic echo byte rendezvous
 */
@Test(timeout = 10 * 1000)
public void basicEcho() throws Exception {

    final int messageSize = 64 * 1024;
    final int transferLimit = messageSize * 16;

    final Meter rate1 = Metrics.newMeter(
            NioUdtMessageRendezvousChannelTest.class, "send rate", "bytes",
            TimeUnit.SECONDS);

    final Meter rate2 = Metrics.newMeter(
            NioUdtMessageRendezvousChannelTest.class, "send rate", "bytes",
            TimeUnit.SECONDS);

    final InetSocketAddress addr1 = UnitHelp.localSocketAddress();
    final InetSocketAddress addr2 = UnitHelp.localSocketAddress();

    final EchoByteHandler handler1 = new EchoByteHandler(rate1, messageSize);
    final EchoByteHandler handler2 = new EchoByteHandler(rate2, messageSize);

    final NioEventLoopGroup group1 = new NioEventLoopGroup(
            1, Executors.defaultThreadFactory(), NioUdtProvider.BYTE_PROVIDER);
    final NioEventLoopGroup group2 = new NioEventLoopGroup(
            1, Executors.defaultThreadFactory(), NioUdtProvider.BYTE_PROVIDER);

    final Bootstrap boot1 = new Bootstrap();
    boot1.group(group1)
         .channelFactory(NioUdtProvider.BYTE_RENDEZVOUS)
         .localAddress(addr1)
         .remoteAddress(addr2)
         .handler(handler1);

    final Bootstrap boot2 = new Bootstrap();
    boot2.group(group1)
         .channelFactory(NioUdtProvider.BYTE_RENDEZVOUS)
         .localAddress(addr2)
         .remoteAddress(addr1)
         .handler(handler2);

    final ChannelFuture connectFuture1 = boot1.connect();
    final ChannelFuture connectFuture2 = boot2.connect();

    while (handler1.meter().count() < transferLimit
            && handler2.meter().count() < transferLimit) {

        log.info("progress : {} {}", handler1.meter().count(), handler2
                .meter().count());

        Thread.sleep(1000);
    }

    connectFuture1.channel().close().sync();
    connectFuture2.channel().close().sync();

    log.info("handler1 : {}", handler1.meter().count());
    log.info("handler2 : {}", handler2.meter().count());

    assertTrue(handler1.meter().count() >= transferLimit);
    assertTrue(handler2.meter().count() >= transferLimit);

    assertEquals(handler1.meter().count(), handler2.meter().count());

    group1.shutdownGracefully();
    group2.shutdownGracefully();

    group1.terminationFuture().sync();
    group2.terminationFuture().sync();
}
 
/**
 * verify basic echo message rendezvous
 *
 * FIXME: Re-enable after making it pass on Windows without unncessary tight loop.
 *        https://github.com/netty/netty/issues/2853
 */
@Test(timeout = 10 * 1000)
@Ignore
public void basicEcho() throws Exception {

    final int messageSize = 64 * 1024;
    final int transferLimit = messageSize * 16;

    final Meter rate1 = Metrics.newMeter(
            NioUdtMessageRendezvousChannelTest.class, "send rate", "bytes", TimeUnit.SECONDS);

    final Meter rate2 = Metrics.newMeter(
            NioUdtMessageRendezvousChannelTest.class, "send rate", "bytes", TimeUnit.SECONDS);

    final InetSocketAddress addr1 = UnitHelp.localSocketAddress();
    final InetSocketAddress addr2 = UnitHelp.localSocketAddress();

    final EchoMessageHandler handler1 = new EchoMessageHandler(rate1, messageSize);
    final EchoMessageHandler handler2 = new EchoMessageHandler(rate2, messageSize);

    final NioEventLoopGroup group1 = new NioEventLoopGroup(
            1, Executors.defaultThreadFactory(), NioUdtProvider.MESSAGE_PROVIDER);
    final NioEventLoopGroup group2 = new NioEventLoopGroup(
            1, Executors.defaultThreadFactory(), NioUdtProvider.MESSAGE_PROVIDER);

    final Bootstrap boot1 = new Bootstrap();
    boot1.group(group1)
         .channelFactory(NioUdtProvider.MESSAGE_RENDEZVOUS)
         .localAddress(addr1).remoteAddress(addr2).handler(handler1);

    final Bootstrap boot2 = new Bootstrap();
    boot2.group(group2)
         .channelFactory(NioUdtProvider.MESSAGE_RENDEZVOUS)
         .localAddress(addr2).remoteAddress(addr1).handler(handler2);

    final ChannelFuture connectFuture1 = boot1.connect();
    final ChannelFuture connectFuture2 = boot2.connect();

    while (handler1.meter().count() < transferLimit
            && handler2.meter().count() < transferLimit) {

        log.info("progress : {} {}", handler1.meter().count(), handler2
                .meter().count());

        Thread.sleep(1000);
    }

    connectFuture1.channel().close().sync();
    connectFuture2.channel().close().sync();

    log.info("handler1 : {}", handler1.meter().count());
    log.info("handler2 : {}", handler2.meter().count());

    assertTrue(handler1.meter().count() >= transferLimit);
    assertTrue(handler2.meter().count() >= transferLimit);

    assertEquals(handler1.meter().count(), handler2.meter().count());

    group1.shutdownGracefully();
    group2.shutdownGracefully();

    group1.terminationFuture().sync();
    group2.terminationFuture().sync();
}
 
/**
 * verify basic echo message rendezvous
 *
 * FIXME: Re-enable after making it pass on Windows without unncessary tight loop.
 *        https://github.com/netty/netty/issues/2853
 */
@Test(timeout = 10 * 1000)
@Ignore
public void basicEcho() throws Exception {

    final int messageSize = 64 * 1024;
    final int transferLimit = messageSize * 16;

    final Meter rate1 = Metrics.newMeter(
            NioUdtMessageRendezvousChannelTest.class, "send rate", "bytes", TimeUnit.SECONDS);

    final Meter rate2 = Metrics.newMeter(
            NioUdtMessageRendezvousChannelTest.class, "send rate", "bytes", TimeUnit.SECONDS);

    final InetSocketAddress addr1 = UnitHelp.localSocketAddress();
    final InetSocketAddress addr2 = UnitHelp.localSocketAddress();

    final EchoMessageHandler handler1 = new EchoMessageHandler(rate1, messageSize);
    final EchoMessageHandler handler2 = new EchoMessageHandler(rate2, messageSize);

    final NioEventLoopGroup group1 = new NioEventLoopGroup(
            1, Executors.defaultThreadFactory(), NioUdtProvider.MESSAGE_PROVIDER);
    final NioEventLoopGroup group2 = new NioEventLoopGroup(
            1, Executors.defaultThreadFactory(), NioUdtProvider.MESSAGE_PROVIDER);

    final Bootstrap boot1 = new Bootstrap();
    boot1.group(group1)
         .channelFactory(NioUdtProvider.MESSAGE_RENDEZVOUS)
         .localAddress(addr1).remoteAddress(addr2).handler(handler1);

    final Bootstrap boot2 = new Bootstrap();
    boot2.group(group2)
         .channelFactory(NioUdtProvider.MESSAGE_RENDEZVOUS)
         .localAddress(addr2).remoteAddress(addr1).handler(handler2);

    final ChannelFuture connectFuture1 = boot1.connect();
    final ChannelFuture connectFuture2 = boot2.connect();

    while (handler1.meter().count() < transferLimit
            && handler2.meter().count() < transferLimit) {

        log.info("progress : {} {}", handler1.meter().count(), handler2
                .meter().count());

        Thread.sleep(1000);
    }

    connectFuture1.channel().close().sync();
    connectFuture2.channel().close().sync();

    log.info("handler1 : {}", handler1.meter().count());
    log.info("handler2 : {}", handler2.meter().count());

    assertTrue(handler1.meter().count() >= transferLimit);
    assertTrue(handler2.meter().count() >= transferLimit);

    assertEquals(handler1.meter().count(), handler2.meter().count());

    group1.shutdownGracefully();
    group2.shutdownGracefully();

    group1.terminationFuture().sync();
    group2.terminationFuture().sync();
}
 
源代码18 项目: Lealone-Plugins   文件: NettyTest.java
public static void main2(String[] args) throws Exception {
    NettyNetServerStart.optimizeNetty();

    long t0 = System.currentTimeMillis();
    long t1 = System.currentTimeMillis();
    // 这行执行很慢
    // String[] a = ((SSLSocketFactory) SSLSocketFactory.getDefault()).getDefaultCipherSuites();
    // System.out.println(a.length);
    long t2 = System.currentTimeMillis();
    // System.out.println("getDefaultCipherSuites: " + (t2 - t1) + "ms");

    t1 = System.currentTimeMillis();
    NetworkInterface.getNetworkInterfaces();
    t2 = System.currentTimeMillis();
    System.out.println("1 NetworkInterface.getNetworkInterfaces(): " + (t2 - t1) + "ms");

    t1 = System.currentTimeMillis();
    NetworkInterface.getNetworkInterfaces();
    t2 = System.currentTimeMillis();
    System.out.println("2 NetworkInterface.getNetworkInterfaces(): " + (t2 - t1) + "ms");

    t1 = System.currentTimeMillis();
    PlatformDependent.isWindows();
    t2 = System.currentTimeMillis();
    System.out.println("PlatformDependent init: " + (t2 - t1) + "ms");

    t1 = System.currentTimeMillis();
    // 调用了NetworkInterface.getNetworkInterfaces()导致很慢(近400ms)
    System.out.println(io.netty.util.NetUtil.SOMAXCONN);
    // test();
    t2 = System.currentTimeMillis();
    System.out.println("NetUtil init: " + (t2 - t1) + "ms");

    t1 = System.currentTimeMillis();
    SelectorProvider.provider().openSelector();
    t2 = System.currentTimeMillis();
    System.out.println("1 openSelector: " + (t2 - t1) + "ms");

    t1 = System.currentTimeMillis();
    SelectorProvider.provider().openSelector();
    t2 = System.currentTimeMillis();
    System.out.println("2 openSelector: " + (t2 - t1) + "ms");

    t1 = System.currentTimeMillis();
    NioEventLoopGroup bossGroup = new NioEventLoopGroup(1);
    t2 = System.currentTimeMillis();
    System.out.println("1 new NioEventLoopGroup: " + (t2 - t1) + "ms");

    t1 = System.currentTimeMillis();
    NioEventLoopGroup workerGroup = new NioEventLoopGroup();
    t2 = System.currentTimeMillis();
    System.out.println("2 new NioEventLoopGroup: " + (t2 - t1) + "ms");
    System.out.println("1 total: " + (t2 - t0) + "ms");

    try {
        t1 = System.currentTimeMillis();
        ServerBootstrap b = new ServerBootstrap();
        b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class).option(ChannelOption.SO_BACKLOG, 100)
                .handler(new LoggingHandler(LogLevel.INFO)).childHandler(new ChannelInitializer<SocketChannel>() {
                    @Override
                    public void initChannel(SocketChannel ch) throws Exception {
                        // ChannelPipeline p = ch.pipeline();
                        // p.addLast(new LoggingHandler(LogLevel.INFO));
                        // p.addLast(new EchoServerHandler());
                    }
                });
        t2 = System.currentTimeMillis();
        System.out.println("init ServerBootstrap: " + (t2 - t1) + "ms");

        t1 = System.currentTimeMillis();
        // Start the server.
        ChannelFuture f = b.bind(PORT).sync();
        t2 = System.currentTimeMillis();
        System.out.println("bind ServerBootstrap: " + (t2 - t1) + "ms");
        System.out.println("2 total: " + (t2 - t0) + "ms");
        // Wait until the server socket is closed.
        f.channel().closeFuture().sync();
    } finally {
        // Shut down all event loops to terminate all threads.
        bossGroup.shutdownGracefully();
        workerGroup.shutdownGracefully();
    }
}
 
源代码19 项目: netty4.0.27Learn   文件: UdtNetty.java
public static void main(final String[] args) throws Exception {

        log.info("init");
        TrafficControl.delay(0);

        final AtomicBoolean isOn = new AtomicBoolean(true);

        final InetSocketAddress addr1 = UnitHelp.localSocketAddress();
        final InetSocketAddress addr2 = UnitHelp.localSocketAddress();

        final ChannelHandler handler1 = new EchoMessageHandler(rate, size);
        final ChannelHandler handler2 = new EchoMessageHandler(null, size);

        final NioEventLoopGroup group1 = new NioEventLoopGroup(
                1, Executors.defaultThreadFactory(), NioUdtProvider.MESSAGE_PROVIDER);
        final NioEventLoopGroup group2 = new NioEventLoopGroup(
                1, Executors.defaultThreadFactory(), NioUdtProvider.MESSAGE_PROVIDER);

        final Bootstrap peerBoot1 = new Bootstrap();
        peerBoot1.group(group1)
                 .channelFactory(NioUdtProvider.MESSAGE_RENDEZVOUS)
                 .localAddress(addr1).remoteAddress(addr2).handler(handler1);

        final Bootstrap peerBoot2 = new Bootstrap();
        peerBoot2.group(group2)
                 .channelFactory(NioUdtProvider.MESSAGE_RENDEZVOUS)
                 .localAddress(addr2).remoteAddress(addr1).handler(handler2);

        final ChannelFuture peerFuture1 = peerBoot1.connect();
        final ChannelFuture peerFuture2 = peerBoot2.connect();

        CustomReporter.enable(3, TimeUnit.SECONDS);

        Thread.sleep(time);

        isOn.set(false);

        Thread.sleep(1000);

        peerFuture1.channel().close().sync();
        peerFuture2.channel().close().sync();

        Thread.sleep(1000);

        group1.shutdownGracefully();
        group2.shutdownGracefully();

        Metrics.defaultRegistry().shutdown();

        TrafficControl.delay(0);
        log.info("done");
    }
 
源代码20 项目: netty-4.1.22   文件: UdtNetty.java
public static void main(final String[] args) throws Exception {

        log.info("init");
        TrafficControl.delay(0);

        final AtomicBoolean isOn = new AtomicBoolean(true);

        final InetSocketAddress addr1 = UnitHelp.localSocketAddress();
        final InetSocketAddress addr2 = UnitHelp.localSocketAddress();

        final ChannelHandler handler1 = new EchoMessageHandler(rate, size);
        final ChannelHandler handler2 = new EchoMessageHandler(null, size);

        final NioEventLoopGroup group1 = new NioEventLoopGroup(
                1, Executors.defaultThreadFactory(), NioUdtProvider.MESSAGE_PROVIDER);
        final NioEventLoopGroup group2 = new NioEventLoopGroup(
                1, Executors.defaultThreadFactory(), NioUdtProvider.MESSAGE_PROVIDER);

        final Bootstrap peerBoot1 = new Bootstrap();
        peerBoot1.group(group1)
                 .channelFactory(NioUdtProvider.MESSAGE_RENDEZVOUS)
                 .localAddress(addr1).remoteAddress(addr2).handler(handler1);

        final Bootstrap peerBoot2 = new Bootstrap();
        peerBoot2.group(group2)
                 .channelFactory(NioUdtProvider.MESSAGE_RENDEZVOUS)
                 .localAddress(addr2).remoteAddress(addr1).handler(handler2);

        final ChannelFuture peerFuture1 = peerBoot1.connect();
        final ChannelFuture peerFuture2 = peerBoot2.connect();

        CustomReporter.enable(3, TimeUnit.SECONDS);

        Thread.sleep(time);

        isOn.set(false);

        Thread.sleep(1000);

        peerFuture1.channel().close().sync();
        peerFuture2.channel().close().sync();

        Thread.sleep(1000);

        group1.shutdownGracefully();
        group2.shutdownGracefully();

        Metrics.defaultRegistry().shutdown();

        TrafficControl.delay(0);
        log.info("done");
    }