javax.websocket.SendHandler#onResult ( )源码实例Demo

下面列出了javax.websocket.SendHandler#onResult ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

private void sendObjectImpl(final Object o, final SendHandler callback) {
    try {
        if (o instanceof String) {
            sendText((String) o, callback);
        } else if (o instanceof byte[]) {
            sendBinary(ByteBuffer.wrap((byte[]) o), callback);
        } else if (o instanceof ByteBuffer) {
            sendBinary((ByteBuffer) o, callback);
        } else if (encoding.canEncodeText(o.getClass())) {
            sendText(encoding.encodeText(o), callback);
        } else if (encoding.canEncodeBinary(o.getClass())) {
            sendBinary(encoding.encodeBinary(o), callback);
        } else {
            // TODO: Replace on bug is fixed
            // https://issues.jboss.org/browse/LOGTOOL-64
            throw new EncodeException(o, "No suitable encoder found");
        }
    } catch (Exception e) {
        callback.onResult(new SendResult(e));
    }
}
 
void endMessage(SendHandler handler, SendResult result) {
    boolean doWrite = false;
    MessagePart mpNext = null;
    synchronized (messagePartLock) {

        fragmented = nextFragmented;
        text = nextText;

        mpNext = messagePartQueue.poll();
        if (mpNext == null) {
            messagePartInProgress.release();
        } else if (!closed){
            // Session may have been closed unexpectedly in the middle of
            // sending a fragmented message closing the endpoint. If this
            // happens, clearly there is no point trying to send the rest of
            // the message.
            doWrite = true;
        }
    }
    if (doWrite) {
        // Actual write has to be outside sync block to avoid possible
        // deadlock between messagePartLock and writeLock in
        // o.a.coyote.http11.upgrade.AbstractServletOutputStream
        writeMessagePart(mpNext);
    }

    wsSession.updateLastActive();

    // Some handlers, such as the IntermediateMessageHandler, do not have a
    // nested handler so handler may be null.
    if (handler != null) {
        handler.onResult(result);
    }
}
 
@Override
protected void doWrite(SendHandler handler, long blockingWriteTimeoutExpiry,
        ByteBuffer... data) {
    long timeout;
    for (ByteBuffer byteBuffer : data) {
        if (blockingWriteTimeoutExpiry == -1) {
            timeout = getSendTimeout();
            if (timeout < 1) {
                timeout = Long.MAX_VALUE;
            }
        } else {
            timeout = blockingWriteTimeoutExpiry - System.currentTimeMillis();
            if (timeout < 0) {
                SendResult sr = new SendResult(new IOException(sm.getString("wsRemoteEndpoint.writeTimeout")));
                handler.onResult(sr);
            }
        }

        try {
            channel.write(byteBuffer).get(timeout, TimeUnit.MILLISECONDS);
        } catch (InterruptedException | ExecutionException | TimeoutException e) {
            handler.onResult(new SendResult(e));
            return;
        }
    }
    handler.onResult(SENDRESULT_OK);
}
 
/**
 *
 * @param t             The throwable associated with any error that
 *                      occurred
 * @param useDispatch   Should {@link SendHandler#onResult(SendResult)} be
 *                      called from a new thread, keeping in mind the
 *                      requirements of
 *                      {@link javax.websocket.RemoteEndpoint.Async}
 */
private void clearHandler(Throwable t, boolean useDispatch) {
    // Setting the result marks this (partial) message as
    // complete which means the next one may be sent which
    // could update the value of the handler. Therefore, keep a
    // local copy before signalling the end of the (partial)
    // message.
    SendHandler sh = handler;
    handler = null;
    buffers = null;
    if (sh != null) {
        if (useDispatch) {
            OnResultRunnable r = new OnResultRunnable(sh, t);
            AbstractEndpoint<?> endpoint = socketWrapper.getEndpoint();
            Executor containerExecutor = endpoint.getExecutor();
            if (endpoint.isRunning() && containerExecutor != null) {
                containerExecutor.execute(r);
            } else {
                // Can't use the executor so call the runnable directly.
                // This may not be strictly specification compliant in all
                // cases but during shutdown only close messages are going
                // to be sent so there should not be the issue of nested
                // calls leading to stack overflow as described in bug
                // 55715. The issues with nested calls was the reason for
                // the separate thread requirement in the specification.
                r.run();
            }
        } else {
            if (t == null) {
                sh.onResult(new SendResult());
            } else {
                sh.onResult(new SendResult(t));
            }
        }
    }
}
 
源代码5 项目: Tomcat7.0.67   文件: WsRemoteEndpointImplBase.java
void endMessage(SendHandler handler, SendResult result) {
    boolean doWrite = false;
    MessagePart mpNext = null;
    synchronized (messagePartLock) {

        fragmented = nextFragmented;
        text = nextText;

        mpNext = messagePartQueue.poll();
        if (mpNext == null) {
            messagePartInProgress = false;
        } else if (!closed){
            // Session may have been closed unexpectedly in the middle of
            // sending a fragmented message closing the endpoint. If this
            // happens, clearly there is no point trying to send the rest of
            // the message.
            doWrite = true;
        }
    }
    if (doWrite) {
        // Actual write has to be outside sync block to avoid possible
        // deadlock between messagePartLock and writeLock in
        // o.a.coyote.http11.upgrade.AbstractServletOutputStream
        writeMessagePart(mpNext);
    }

    wsSession.updateLastActive();

    // Some handlers, such as the IntermediateMessageHandler, do not have a
    // nested handler so handler may be null.
    if (handler != null) {
        handler.onResult(result);
    }
}
 
源代码6 项目: Tomcat7.0.67   文件: WsRemoteEndpointImplServer.java
/**
 *
 * @param t             The throwable associated with any error that
 *                      occurred
 * @param useDispatch   Should {@link SendHandler#onResult(SendResult)} be
 *                      called from a new thread, keeping in mind the
 *                      requirements of
 *                      {@link javax.websocket.RemoteEndpoint.Async}
 */
private void clearHandler(Throwable t, boolean useDispatch) {
    // Setting the result marks this (partial) message as
    // complete which means the next one may be sent which
    // could update the value of the handler. Therefore, keep a
    // local copy before signalling the end of the (partial)
    // message.
    SendHandler sh = handler;
    handler = null;
    buffers = null;
    if (sh != null) {
        if (useDispatch) {
            OnResultRunnable r = onResultRunnables.poll();
            if (r == null) {
                r = new OnResultRunnable(onResultRunnables);
            }
            r.init(sh, t);
            if (executorService == null || executorService.isShutdown()) {
                // Can't use the executor so call the runnable directly.
                // This may not be strictly specification compliant in all
                // cases but during shutdown only close messages are going
                // to be sent so there should not be the issue of nested
                // calls leading to stack overflow as described in bug
                // 55715. The issues with nested calls was the reason for
                // the separate thread requirement in the specification.
                r.run();
            } else {
                executorService.execute(r);
            }
        } else {
            if (t == null) {
                sh.onResult(new SendResult());
            } else {
                sh.onResult(new SendResult(t));
            }
        }
    }
}
 
源代码7 项目: tomcatsrc   文件: WsRemoteEndpointImplBase.java
void endMessage(SendHandler handler, SendResult result) {
    boolean doWrite = false;
    MessagePart mpNext = null;
    synchronized (messagePartLock) {

        fragmented = nextFragmented;
        text = nextText;

        mpNext = messagePartQueue.poll();
        if (mpNext == null) {
            messagePartInProgress = false;
        } else if (!closed){
            // Session may have been closed unexpectedly in the middle of
            // sending a fragmented message closing the endpoint. If this
            // happens, clearly there is no point trying to send the rest of
            // the message.
            doWrite = true;
        }
    }
    if (doWrite) {
        // Actual write has to be outside sync block to avoid possible
        // deadlock between messagePartLock and writeLock in
        // o.a.coyote.http11.upgrade.AbstractServletOutputStream
        writeMessagePart(mpNext);
    }

    wsSession.updateLastActive();

    // Some handlers, such as the IntermediateMessageHandler, do not have a
    // nested handler so handler may be null.
    if (handler != null) {
        handler.onResult(result);
    }
}
 
源代码8 项目: tomcatsrc   文件: WsRemoteEndpointImplServer.java
/**
 *
 * @param t             The throwable associated with any error that
 *                      occurred
 * @param useDispatch   Should {@link SendHandler#onResult(SendResult)} be
 *                      called from a new thread, keeping in mind the
 *                      requirements of
 *                      {@link javax.websocket.RemoteEndpoint.Async}
 */
private void clearHandler(Throwable t, boolean useDispatch) {
    // Setting the result marks this (partial) message as
    // complete which means the next one may be sent which
    // could update the value of the handler. Therefore, keep a
    // local copy before signalling the end of the (partial)
    // message.
    SendHandler sh = handler;
    handler = null;
    buffers = null;
    if (sh != null) {
        if (useDispatch) {
            OnResultRunnable r = onResultRunnables.poll();
            if (r == null) {
                r = new OnResultRunnable(onResultRunnables);
            }
            r.init(sh, t);
            if (executorService == null || executorService.isShutdown()) {
                // Can't use the executor so call the runnable directly.
                // This may not be strictly specification compliant in all
                // cases but during shutdown only close messages are going
                // to be sent so there should not be the issue of nested
                // calls leading to stack overflow as described in bug
                // 55715. The issues with nested calls was the reason for
                // the separate thread requirement in the specification.
                r.run();
            } else {
                executorService.execute(r);
            }
        } else {
            if (t == null) {
                sh.onResult(new SendResult());
            } else {
                sh.onResult(new SendResult(t));
            }
        }
    }
}
 
void startMessage(byte opCode, ByteBuffer payload, boolean last,
        SendHandler handler) {

    wsSession.updateLastActive();

    List<MessagePart> messageParts = new ArrayList<>();
    messageParts.add(new MessagePart(last, 0, opCode, payload,
            intermediateMessageHandler,
            new EndMessageHandler(this, handler), -1));

    try {
        messageParts = transformation.sendMessagePart(messageParts);
    } catch (IOException ioe) {
        handler.onResult(new SendResult(ioe));
        return;
    }

    // Some extensions/transformations may buffer messages so it is possible
    // that no message parts will be returned. If this is the case the
    // trigger the supplied SendHandler
    if (messageParts.size() == 0) {
        handler.onResult(new SendResult());
        return;
    }

    MessagePart mp = messageParts.remove(0);

    boolean doWrite = false;
    synchronized (messagePartLock) {
        if (Constants.OPCODE_CLOSE == mp.getOpCode() && getBatchingAllowed()) {
            // Should not happen. To late to send batched messages now since
            // the session has been closed. Complain loudly.
            log.warn(sm.getString("wsRemoteEndpoint.flushOnCloseFailed"));
        }
        if (messagePartInProgress.tryAcquire()) {
            doWrite = true;
        } else {
            // When a control message is sent while another message is being
            // sent, the control message is queued. Chances are the
            // subsequent data message part will end up queued while the
            // control message is sent. The logic in this class (state
            // machine, EndMessageHandler, TextMessageSendHandler) ensures
            // that there will only ever be one data message part in the
            // queue. There could be multiple control messages in the queue.

            // Add it to the queue
            messagePartQueue.add(mp);
        }
        // Add any remaining messages to the queue
        messagePartQueue.addAll(messageParts);
    }
    if (doWrite) {
        // Actual write has to be outside sync block to avoid possible
        // deadlock between messagePartLock and writeLock in
        // o.a.coyote.http11.upgrade.AbstractServletOutputStream
        writeMessagePart(mp);
    }
}
 
源代码10 项目: Tomcat7.0.67   文件: WsRemoteEndpointImplBase.java
void startMessage(byte opCode, ByteBuffer payload, boolean last,
        SendHandler handler) {

    wsSession.updateLastActive();

    List<MessagePart> messageParts = new ArrayList<MessagePart>();
    messageParts.add(new MessagePart(last, 0, opCode, payload,
            intermediateMessageHandler,
            new EndMessageHandler(this, handler)));

    messageParts = transformation.sendMessagePart(messageParts);

    // Some extensions/transformations may buffer messages so it is possible
    // that no message parts will be returned. If this is the case the
    // trigger the suppler SendHandler
    if (messageParts.size() == 0) {
        handler.onResult(new SendResult());
        return;
    }

    MessagePart mp = messageParts.remove(0);

    boolean doWrite = false;
    synchronized (messagePartLock) {
        if (Constants.OPCODE_CLOSE == mp.getOpCode() && getBatchingAllowed()) {
            // Should not happen. To late to send batched messages now since
            // the session has been closed. Complain loudly.
            log.warn(sm.getString("wsRemoteEndpoint.flushOnCloseFailed"));
        }
        if (messagePartInProgress) {
            // When a control message is sent while another message is being
            // sent, the control message is queued. Chances are the
            // subsequent data message part will end up queued while the
            // control message is sent. The logic in this class (state
            // machine, EndMessageHandler, TextMessageSendHandler) ensures
            // that there will only ever be one data message part in the
            // queue. There could be multiple control messages in the queue.

            // Add it to the queue
            messagePartQueue.add(mp);
        } else {
            messagePartInProgress = true;
            doWrite = true;
        }
        // Add any remaining messages to the queue
        messagePartQueue.addAll(messageParts);
    }
    if (doWrite) {
        // Actual write has to be outside sync block to avoid possible
        // deadlock between messagePartLock and writeLock in
        // o.a.coyote.http11.upgrade.AbstractServletOutputStream
        writeMessagePart(mp);
    }
}
 
源代码11 项目: tomcatsrc   文件: WsRemoteEndpointImplBase.java
void startMessage(byte opCode, ByteBuffer payload, boolean last,
        SendHandler handler) {

    wsSession.updateLastActive();

    List<MessagePart> messageParts = new ArrayList<MessagePart>();
    messageParts.add(new MessagePart(last, 0, opCode, payload,
            intermediateMessageHandler,
            new EndMessageHandler(this, handler)));

    messageParts = transformation.sendMessagePart(messageParts);

    // Some extensions/transformations may buffer messages so it is possible
    // that no message parts will be returned. If this is the case the
    // trigger the suppler SendHandler
    if (messageParts.size() == 0) {
        handler.onResult(new SendResult());
        return;
    }

    MessagePart mp = messageParts.remove(0);

    boolean doWrite = false;
    synchronized (messagePartLock) {
        if (Constants.OPCODE_CLOSE == mp.getOpCode() && getBatchingAllowed()) {
            // Should not happen. To late to send batched messages now since
            // the session has been closed. Complain loudly.
            log.warn(sm.getString("wsRemoteEndpoint.flushOnCloseFailed"));
        }
        if (messagePartInProgress) {
            // When a control message is sent while another message is being
            // sent, the control message is queued. Chances are the
            // subsequent data message part will end up queued while the
            // control message is sent. The logic in this class (state
            // machine, EndMessageHandler, TextMessageSendHandler) ensures
            // that there will only ever be one data message part in the
            // queue. There could be multiple control messages in the queue.

            // Add it to the queue
            messagePartQueue.add(mp);
        } else {
            messagePartInProgress = true;
            doWrite = true;
        }
        // Add any remaining messages to the queue
        messagePartQueue.addAll(messageParts);
    }
    if (doWrite) {
        // Actual write has to be outside sync block to avoid possible
        // deadlock between messagePartLock and writeLock in
        // o.a.coyote.http11.upgrade.AbstractServletOutputStream
        writeMessagePart(mp);
    }
}
 
 方法所在类
 同类方法