io.netty.channel.AdaptiveRecvByteBufAllocator#io.netty.channel.FixedRecvByteBufAllocator源码实例Demo

下面列出了io.netty.channel.AdaptiveRecvByteBufAllocator#io.netty.channel.FixedRecvByteBufAllocator 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: gsc-core   文件: GSCChannelInitializer.java
@Override
public void initChannel(NioSocketChannel ch) {
    try {
        final Channel channel = ctx.getBean(PeerConnection.class);

        channel.init(ch.pipeline(), remoteId, peerDiscoveryMode, channelManager);

        // limit the size of receiving buffer to 1024
        ch.config().setRecvByteBufAllocator(new FixedRecvByteBufAllocator(256 * 1024));
        ch.config().setOption(ChannelOption.SO_RCVBUF, 256 * 1024);
        ch.config().setOption(ChannelOption.SO_BACKLOG, 1024);

        // be aware of channel closing
        ch.closeFuture().addListener((ChannelFutureListener) future -> {
            logger.info("Close channel:" + channel);
            if (!peerDiscoveryMode) {
                channelManager.notifyDisconnect(channel);
            }
        });

    } catch (Exception e) {
        logger.error("Unexpected error: ", e);
    }
}
 
源代码2 项目: gsc-core   文件: BaseNet.java
public static Channel connect(ByteToMessageDecoder decoder) throws InterruptedException {
  NioEventLoopGroup group = new NioEventLoopGroup(1);
  Bootstrap b = new Bootstrap();
  b.group(group).channel(NioSocketChannel.class)
      .handler(new ChannelInitializer<Channel>() {
        @Override
        protected void initChannel(Channel ch) throws Exception {
          ch.config().setRecvByteBufAllocator(new FixedRecvByteBufAllocator(256 * 1024));
          ch.config().setOption(ChannelOption.SO_RCVBUF, 256 * 1024);
          ch.config().setOption(ChannelOption.SO_BACKLOG, 1024);
          ch.pipeline()
              .addLast("readTimeoutHandler", new ReadTimeoutHandler(600, TimeUnit.SECONDS))
              .addLast("writeTimeoutHandler", new WriteTimeoutHandler(600, TimeUnit.SECONDS));
          ch.pipeline().addLast("protoPender", new ProtobufVarint32LengthFieldPrepender());
          ch.pipeline().addLast("lengthDecode", new ProtobufVarint32FrameDecoder());
          ch.pipeline().addLast("handshakeHandler", decoder);
          ch.closeFuture();
        }
      }).option(ChannelOption.SO_KEEPALIVE, true)
      .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 60000)
      .option(ChannelOption.MESSAGE_SIZE_ESTIMATOR, DefaultMessageSizeEstimator.DEFAULT);
  return b.connect("127.0.0.1", port).sync().channel();
}
 
源代码3 项目: Kepler   文件: MusServer.java
/**
 * Create the Netty sockets.
 */
public void createSocket() {
    int threads = Runtime.getRuntime().availableProcessors();
    this.bossGroup = (Epoll.isAvailable()) ? new EpollEventLoopGroup(threads) : new NioEventLoopGroup(threads);
    this.workerGroup = (Epoll.isAvailable()) ? new EpollEventLoopGroup(threads) : new NioEventLoopGroup(threads);

    this.bootstrap.group(bossGroup, workerGroup)
            .channel((Epoll.isAvailable()) ? EpollServerSocketChannel.class : NioServerSocketChannel.class)
            .childHandler(new MusChannelInitializer(this))
            .option(ChannelOption.SO_BACKLOG, BACK_LOG)
            .childOption(ChannelOption.TCP_NODELAY, true)
            .childOption(ChannelOption.SO_KEEPALIVE, true)
            .childOption(ChannelOption.SO_RCVBUF, BUFFER_SIZE)
            .childOption(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(BUFFER_SIZE))
            .childOption(ChannelOption.ALLOCATOR, new PooledByteBufAllocator(true));
}
 
源代码4 项目: Kepler   文件: NettyServer.java
/**
 * Create the Netty sockets.
 */
public void createSocket() {
    int threads = Runtime.getRuntime().availableProcessors();
    this.bossGroup = (Epoll.isAvailable()) ? new EpollEventLoopGroup(threads) : new NioEventLoopGroup(threads);
    this.workerGroup = (Epoll.isAvailable()) ? new EpollEventLoopGroup(threads) : new NioEventLoopGroup(threads);

    this.bootstrap.group(bossGroup, workerGroup)
            .channel((Epoll.isAvailable()) ? EpollServerSocketChannel.class : NioServerSocketChannel.class)
            .childHandler(new NettyChannelInitializer(this))
            .option(ChannelOption.SO_BACKLOG, BACK_LOG)
            .childOption(ChannelOption.TCP_NODELAY, true)
            .childOption(ChannelOption.SO_KEEPALIVE, true)
            .childOption(ChannelOption.SO_RCVBUF, BUFFER_SIZE)
            .childOption(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(BUFFER_SIZE))
            .childOption(ChannelOption.ALLOCATOR, new PooledByteBufAllocator(true));
}
 
源代码5 项目: activemq-artemis   文件: NettyTcpTransport.java
private void configureNetty(Bootstrap bootstrap, NettyTransportOptions options) {
   bootstrap.option(ChannelOption.TCP_NODELAY, options.isTcpNoDelay());
   bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, options.getConnectTimeout());
   bootstrap.option(ChannelOption.SO_KEEPALIVE, options.isTcpKeepAlive());
   bootstrap.option(ChannelOption.SO_LINGER, options.getSoLinger());

   if (options.getSendBufferSize() != -1) {
      bootstrap.option(ChannelOption.SO_SNDBUF, options.getSendBufferSize());
   }

   if (options.getReceiveBufferSize() != -1) {
      bootstrap.option(ChannelOption.SO_RCVBUF, options.getReceiveBufferSize());
      bootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(options.getReceiveBufferSize()));
   }

   if (options.getTrafficClass() != -1) {
      bootstrap.option(ChannelOption.IP_TOS, options.getTrafficClass());
   }
}
 
/**
 * Creates a new instance.
 */
public DefaultDatagramChannelConfig(DatagramChannel channel, DatagramSocket javaSocket) {
    super(channel, new FixedRecvByteBufAllocator(2048));
    if (javaSocket == null) {
        throw new NullPointerException("javaSocket");
    }
    this.javaSocket = javaSocket;
}
 
@Override
protected Transport<SourceServerRequest> createTransportService() {
    NettyPooledUdpTransport<SourceServerRequest> transport = new NettyPooledUdpTransport<>(ChannelType.NIO_UDP);
    transport.setChannelInitializer(new SourceQueryChannelInitializer(this));
    transport.addChannelOption(ChannelOption.SO_SNDBUF, 1048576);
    transport.addChannelOption(ChannelOption.SO_RCVBUF, 1048576);
    transport.addChannelOption(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(1400));
    return transport;
}
 
源代码8 项目: SI   文件: HttpClient.java
private HttpClient() {
	// Configure the client.
	group = new NioEventLoopGroup();
	bootstrap = new Bootstrap();
	bootstrap.group(group)
	.channel(NioSocketChannel.class)
	.handler(new HttpClientInitializer(mHttpClientListener));

	bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, CONNECTION_TIMEOUT_MILLIS);
	bootstrap.option(ChannelOption.TCP_NODELAY, true);
	bootstrap.option(ChannelOption.SO_RCVBUF, 65536 * 3);			// added in 2017-07-14
	bootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(  65536 * 3 ));   // added in 2017-07-14
	
}
 
源代码9 项目: reactive-ipc-jvm   文件: CodecSample.java
private static void runLineBasedFrameDecoder() {

        TcpServer<String, String> transport = Netty4TcpServer.<String, String>create(
                0,
                new ChannelInitializer<Channel>() {
                    @Override
                    protected void initChannel(Channel channel) throws Exception {
                        int bufferSize = 1;
                        ChannelConfig config = channel.config();
                        config.setOption(ChannelOption.SO_RCVBUF, bufferSize);
                        config.setOption(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(bufferSize));
                        channel.pipeline().addFirst(
                                new LineBasedFrameDecoder(256),
                                new StringDecoder(CharsetUtil.UTF_8),
                                new StringEncoder(CharsetUtil.UTF_8));
                    }
                });

        ReactorTcpServer.create(transport).start(connection -> {
            connection.log("input")
                    .observeComplete(v -> LOG.info("Connection input complete"))
                    .capacity(1)
                    .consume(line -> {
                        String response = "Hello " + line + "\n";
                        Streams.wrap(connection.writeWith(Streams.just(response))).consume();
                    });
            return Streams.never();
        });
    }
 
源代码10 项目: SI   文件: HttpClient.java
private HttpClient() {
	// Configure the client.
	group = new NioEventLoopGroup();
	bootstrap = new Bootstrap();
	bootstrap.group(group)
	.channel(NioSocketChannel.class)
	.handler(new HttpClientInitializer(mHttpClientListener));

	bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, CONNECTION_TIMEOUT_MILLIS);
	bootstrap.option(ChannelOption.TCP_NODELAY, true);
	bootstrap.option(ChannelOption.SO_RCVBUF, 65536 * 3);			// added in 2017-07-14
	bootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(  65536 * 3 ));   // added in 2017-07-14
	
}
 
源代码11 项目: ethereumj   文件: EthereumChannelInitializer.java
public void initChannel(NioSocketChannel ch) throws Exception {

        MessageQueue msgQueue;
        P2pHandler p2pHandler;
        EthHandler ethHandler;
        ShhHandler shhHandler;

        msgQueue = new MessageQueue(null);

        logger.info("Incoming connection from: {}", ch.toString());

        ch.remoteAddress();

        p2pHandler = new P2pHandler(msgQueue, null, false);
        p2pHandler.activate();

        ethHandler = new EthHandler(msgQueue, null, false);
        shhHandler = new ShhHandler(msgQueue, null);


        ch.pipeline().addLast("readTimeoutHandler",
                new ReadTimeoutHandler(CONFIG.peerChannelReadTimeout(), TimeUnit.SECONDS));
        ch.pipeline().addLast("out encoder", new MessageEncoder());
        ch.pipeline().addLast("in  encoder", new MessageDecoder());
        ch.pipeline().addLast(Capability.P2P, p2pHandler);
        ch.pipeline().addLast(Capability.ETH, ethHandler);
        ch.pipeline().addLast(Capability.SHH, shhHandler);

        // limit the size of receiving buffer to 1024
        ch.config().setRecvByteBufAllocator(new FixedRecvByteBufAllocator(32368));
        ch.config().setOption(ChannelOption.SO_RCVBUF, 32368);

        peerServer.addChannel(new Channel(msgQueue, p2pHandler, ethHandler, shhHandler));

        // todo: check if have or not active peer if not set this one
    }
 
源代码12 项目: bgpcep   文件: PCEPDispatcherImpl.java
synchronized ServerBootstrap createServerBootstrap(final ChannelPipelineInitializer initializer) {
    final ServerBootstrap b = new ServerBootstrap();
    b.childHandler(new ChannelInitializer<SocketChannel>() {
        @Override
        protected void initChannel(final SocketChannel ch) {
            initializer.initializeChannel(ch, new DefaultPromise<>(PCEPDispatcherImpl.this.executor));
        }
    });
    b.option(ChannelOption.SO_BACKLOG, SOCKET_BACKLOG_SIZE);

    b.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);

    if (Epoll.isAvailable()) {
        b.channel(EpollServerSocketChannel.class);
        b.childOption(EpollChannelOption.EPOLL_MODE, EpollMode.LEVEL_TRIGGERED);
    } else {
        b.channel(NioServerSocketChannel.class);
    }
    if (!this.keys.isEmpty()) {
        if (Epoll.isAvailable()) {
            b.option(EpollChannelOption.TCP_MD5SIG, this.keys);
        } else {
            throw new UnsupportedOperationException(Epoll.unavailabilityCause().getCause());
        }
    }

    // Make sure we are doing round-robin processing
    b.childOption(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(1));

    if (b.config().group() == null) {
        b.group(this.bossGroup, this.workerGroup);
    }

    return b;
}
 
源代码13 项目: qpid-jms   文件: NettyTcpTransport.java
private void configureNetty(Bootstrap bootstrap, TransportOptions options) {
    bootstrap.option(ChannelOption.TCP_NODELAY, options.isTcpNoDelay());
    bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, options.getConnectTimeout());
    bootstrap.option(ChannelOption.SO_KEEPALIVE, options.isTcpKeepAlive());
    bootstrap.option(ChannelOption.SO_LINGER, options.getSoLinger());

    if (options.getSendBufferSize() != -1) {
        bootstrap.option(ChannelOption.SO_SNDBUF, options.getSendBufferSize());
    }

    if (options.getReceiveBufferSize() != -1) {
        bootstrap.option(ChannelOption.SO_RCVBUF, options.getReceiveBufferSize());
        bootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(options.getReceiveBufferSize()));
    }

    if (options.getTrafficClass() != -1) {
        bootstrap.option(ChannelOption.IP_TOS, options.getTrafficClass());
    }

    if (options.getLocalAddress() != null || options.getLocalPort() != 0) {
        if(options.getLocalAddress() != null) {
            bootstrap.localAddress(options.getLocalAddress(), options.getLocalPort());
        } else {
            bootstrap.localAddress(options.getLocalPort());
        }
    }
    if (options.getProxyHandlerSupplier() != null) {
        // in case we have a proxy we do not want to resolve the address by ourselves but leave this to the proxy
        bootstrap.resolver(NoopAddressResolverGroup.INSTANCE);
    }
}
 
源代码14 项目: kcp-netty   文件: DefaultUkcpServerChannelConfig.java
public DefaultUkcpServerChannelConfig(UkcpServerChannel channel, DatagramSocket javaSocket) {
    super(channel, new FixedRecvByteBufAllocator(Consts.FIXED_RECV_BYTEBUF_ALLOCATE_SIZE));
    this.javaSocket = Objects.requireNonNull(javaSocket, "javaSocket");
}
 
源代码15 项目: kcp-netty   文件: DefaultUkcpClientChannelConfig.java
public DefaultUkcpClientChannelConfig(UkcpClientChannel channel, Ukcp ukcp, DatagramSocket javaSocket) {
    super(channel, new FixedRecvByteBufAllocator(Consts.FIXED_RECV_BYTEBUF_ALLOCATE_SIZE));
    this.ukcp = Objects.requireNonNull(ukcp, "ukcp");
    this.javaSocket = Objects.requireNonNull(javaSocket, "javaSocket");
}
 
public DefaultUkcpServerChildChannelConfig(UkcpServerChildChannel channel, Ukcp ukcp) {
    super(channel, new FixedRecvByteBufAllocator(Consts.FIXED_RECV_BYTEBUF_ALLOCATE_SIZE));
    this.ukcp = Objects.requireNonNull(ukcp, "ukcp");
}
 
源代码17 项目: JRakNet   文件: PeerFactory.java
/**
 * Further assembles the peer creation by handling the specified packet.
 * 
 * @param packet
 *            the packet to handle.
 * @return the created peer, <code>null</code> if the peer is not yet
 *         finished assembling.
 * @throws NullPointerException
 *             if the <code>packet</code> is <code>null</code>.
 * @throws IllegalStateException
 *             if the peer is not currently being assembled or if the peer
 *             has already been assembled.
 */
public RakNetServerPeer assemble(RakNetPacket packet) throws NullPointerException, IllegalStateException {
	if (packet == null) {
		throw new NullPointerException("Packet cannot be null");
	} else if (factoryState <= STATE_IDLE) {
		throw new IllegalStateException("Peer is not currently being assembled");
	} else if (factoryState >= STATE_PEER_ASSEMBLED) {
		throw new IllegalStateException("Peer has already been assembled");
	} else {
		try {
			if (packet.getId() == ID_OPEN_CONNECTION_REPLY_1 && factoryState == STATE_FIRST_CONNECTION_REQUEST) {
				OpenConnectionResponseOne connectionResponseOne = new OpenConnectionResponseOne(packet);
				connectionResponseOne.decode();
				if (connectionResponseOne.magic == false) {
					throw new InvalidMagicException(client);
				} else if (connectionResponseOne.maximumTransferUnit < RakNet.MINIMUM_MTU_SIZE) {
					throw new InvalidMaximumTransferUnitException(client,
							connectionResponseOne.maximumTransferUnit);
				}

				/*
				 * If the maximum transfer unit of the server is smaller
				 * than that of the client, then use that one. Otherwise,
				 * use the highest valid maximum transfer unit of the
				 * client.
				 */
				this.maximumTransferUnit = Math.min(connectionResponseOne.maximumTransferUnit,
						maximumMaximumTransferUnit);
				this.serverGuid = connectionResponseOne.serverGuid;
				this.factoryState = STATE_SECOND_CONNECTION_REQUEST;
				logger.debug("Applied maximum transfer unit " + maximumTransferUnit + " and globally unique ID "
						+ serverGuid + " from " + getName(packet.getId()) + " packet");
			} else if (packet.getId() == ID_OPEN_CONNECTION_REPLY_2
					&& factoryState == STATE_SECOND_CONNECTION_REQUEST) {
				OpenConnectionResponseTwo connectionResponseTwo = new OpenConnectionResponseTwo(packet);
				connectionResponseTwo.decode();
				if (connectionResponseTwo.failed()) {
					throw new PacketBufferException(connectionResponseTwo);
				} else if (connectionResponseTwo.magic == false) {
					throw new InvalidMagicException(client);
				} else if (connectionResponseTwo.serverGuid != serverGuid) {
					throw new InconsistentGuidException(client);
				} else if (connectionResponseTwo.maximumTransferUnit > maximumMaximumTransferUnit
						|| connectionResponseTwo.maximumTransferUnit < RakNet.MINIMUM_MTU_SIZE) {
					throw new InvalidMaximumTransferUnitException(client, maximumTransferUnit);
				} else if (connectionResponseTwo.maximumTransferUnit > maximumTransferUnit) {
					logger.warn("Server responded with higher maximum transfer unit than agreed upon earlier");
				}
				bootstrap.option(ChannelOption.SO_SNDBUF, maximumTransferUnit)
						.option(ChannelOption.SO_RCVBUF, maximumTransferUnit)
						.option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(maximumTransferUnit));

				// Create peer
				this.maximumTransferUnit = connectionResponseTwo.maximumTransferUnit;
				this.connectionType = connectionResponseTwo.connectionType;
				this.factoryState = STATE_PEER_ASSEMBLED;
				client.callEvent(listener -> listener.onConnect(client, address, connectionType));
				logger.debug(
						"Created server peer using globally unique ID " + Long.toHexString(serverGuid).toUpperCase()
								+ " and maximum transfer unit with size of " + maximumTransferUnit + " bytes ("
								+ (maximumTransferUnit * 8) + " bits) for server address " + address);
				return new RakNetServerPeer(client, address, serverGuid, maximumTransferUnit, connectionType,
						channel);
			} else if (packet.getId() == ID_ALREADY_CONNECTED) {
				throw new AlreadyConnectedException(client, address);
			} else if (packet.getId() == ID_NO_FREE_INCOMING_CONNECTIONS) {
				throw new NoFreeIncomingConnectionsException(client, address);
			} else if (packet.getId() == ID_CONNECTION_BANNED) {
				ConnectionBanned connectionBanned = new ConnectionBanned(packet);
				connectionBanned.decode();
				if (connectionBanned.magic != true) {
					throw new InvalidMagicException(client);
				} else if (connectionBanned.serverGuid == serverGuid) {
					throw new ConnectionBannedException(client, address);
				}
			} else if (packet.getId() == ID_INCOMPATIBLE_PROTOCOL_VERSION) {
				IncompatibleProtocolVersion incompatibleProtocol = new IncompatibleProtocolVersion(packet);
				incompatibleProtocol.decode();
				if (incompatibleProtocol.serverGuid == serverGuid) {
					throw new IncompatibleProtocolException(client, address, client.getProtocolVersion(),
							incompatibleProtocol.networkProtocol);
				}
			}
		} catch (PeerFactoryException | PacketBufferException e) {
			this.exceptionCaught(e);
		}
	}
	return null;
}
 
源代码18 项目: JRakNet   文件: RakNetServer.java
/**
 * Starts the server.
 * 
 * @throws IllegalStateException
 *             if the server is already running.
 * @throws RakNetException
 *             if an error occurs during startup.
 */
public void start() throws IllegalStateException, RakNetException {
	if (running == true) {
		throw new IllegalStateException("Server is already running");
	} else if (listeners.isEmpty()) {
		logger.warn("Server has no listeners");
	}
	try {
		this.bootstrap = new Bootstrap();
		this.group = new NioEventLoopGroup();
		this.handler = new RakNetServerHandler(this);
		bootstrap.handler(handler);

		// Create bootstrap and bind channel
		bootstrap.channel(NioDatagramChannel.class).group(group);
		bootstrap.option(ChannelOption.SO_BROADCAST, true).option(ChannelOption.SO_REUSEADDR, false)
				.option(ChannelOption.SO_SNDBUF, maximumTransferUnit)
				.option(ChannelOption.SO_RCVBUF, maximumTransferUnit)
				.option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(maximumTransferUnit));
		this.channel = (bindingAddress != null ? bootstrap.bind(bindingAddress) : bootstrap.bind(0)).sync()
				.channel();
		this.bindAddress = (InetSocketAddress) channel.localAddress();
		this.running = true;
		logger.debug("Created and bound bootstrap");

		// Create and start peer update thread
		RakNetServer server = this;
		this.peerThread = new Thread(
				RakNetServer.class.getSimpleName() + "-Peer-Thread-" + Long.toHexString(guid).toUpperCase()) {

			@Override
			public void run() {
				HashMap<RakNetClientPeer, Throwable> disconnected = new HashMap<RakNetClientPeer, Throwable>();
				while (server.running == true && !this.isInterrupted()) {
					try {
						Thread.sleep(0, 1); // Lower CPU usage
					} catch (InterruptedException e) {
						this.interrupt(); // Interrupted during sleep
						continue;
					}
					for (RakNetClientPeer peer : clients.values()) {
						if (!peer.isDisconnected()) {
							try {
								peer.update();
								if (peer.getPacketsReceivedThisSecond() >= RakNet.getMaxPacketsPerSecond()) {
									server.blockAddress(peer.getInetAddress(), "Too many packets",
											RakNet.MAX_PACKETS_PER_SECOND_BLOCK);
								}
							} catch (Throwable throwable) {
								server.callEvent(listener -> listener.onPeerException(server, peer, throwable));
								disconnected.put(peer, throwable);
							}
						}
					}

					/*
					 * Disconnect peers.
					 * 
					 * This must be done here as simply removing a client
					 * from the clients map would be an incorrect way of
					 * disconnecting a client. This means that calling the
					 * disconnect() method is required. However, calling it
					 * while in the loop would cause a
					 * ConcurrentModifactionException. To get around this,
					 * the clients that need to be disconnected are properly
					 * disconnected after the loop is finished. This is done
					 * simply by having them and their disconnect reason be
					 * put in a disconnection map.
					 */
					if (disconnected.size() > 0) {
						for (RakNetClientPeer peer : disconnected.keySet()) {
							server.disconnect(peer, disconnected.get(peer));
						}
						disconnected.clear();
					}
				}
			}

		};
		peerThread.start();
		logger.debug("Created and started peer update thread");
		this.callEvent(listener -> listener.onStart(this));
	} catch (InterruptedException e) {
		this.running = false;
		throw new RakNetException(e);
	}
	logger.info("Started server");
}
 
源代码19 项目: crate   文件: Netty4HttpServerTransport.java
public Netty4HttpServerTransport(Settings settings,
                                 NetworkService networkService,
                                 BigArrays bigArrays,
                                 ThreadPool threadPool,
                                 NamedXContentRegistry xContentRegistry,
                                 PipelineRegistry pipelineRegistry,
                                 NodeClient nodeClient) {
    Netty4Utils.setAvailableProcessors(EsExecutors.PROCESSORS_SETTING.get(settings));
    this.settings = settings;
    this.networkService = networkService;
    this.bigArrays = bigArrays;
    this.threadPool = threadPool;
    this.xContentRegistry = xContentRegistry;
    this.pipelineRegistry = pipelineRegistry;
    this.nodeClient = nodeClient;

    this.maxContentLength = SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings);
    this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings);
    this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings);
    this.maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings);
    this.resetCookies = SETTING_HTTP_RESET_COOKIES.get(settings);
    this.maxCompositeBufferComponents = SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings);
    this.workerCount = SETTING_HTTP_WORKER_COUNT.get(settings);
    this.port = SETTING_HTTP_PORT.get(settings);
    // we can't make the network.bind_host a fallback since we already fall back to http.host hence the extra conditional here
    List<String> httpBindHost = SETTING_HTTP_BIND_HOST.get(settings);
    this.bindHosts = (httpBindHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_BIND_HOST_SETTING.get(settings) : httpBindHost)
        .toArray(Strings.EMPTY_ARRAY);
    // we can't make the network.publish_host a fallback since we already fall back to http.host hence the extra conditional here
    List<String> httpPublishHost = SETTING_HTTP_PUBLISH_HOST.get(settings);
    this.publishHosts = (httpPublishHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING.get(settings) : httpPublishHost)
        .toArray(Strings.EMPTY_ARRAY);
    this.detailedErrorsEnabled = SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings);
    this.readTimeoutMillis = Math.toIntExact(SETTING_HTTP_READ_TIMEOUT.get(settings).getMillis());

    ByteSizeValue receivePredictor = SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE.get(settings);
    recvByteBufAllocator = new FixedRecvByteBufAllocator(receivePredictor.bytesAsInt());

    this.compression = SETTING_HTTP_COMPRESSION.get(settings);
    this.compressionLevel = SETTING_HTTP_COMPRESSION_LEVEL.get(settings);
    this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings);
    this.corsConfig = buildCorsConfig(settings);

    logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}], " +
            "receive_predictor[{}], max_composite_buffer_components[{}], pipelining_max_events[{}]",
        maxChunkSize, maxHeaderSize, maxInitialLineLength, maxContentLength,
        receivePredictor, maxCompositeBufferComponents, pipeliningMaxEvents);
}