javax.sound.sampled.TargetDataLine#stop ( )源码实例Demo

下面列出了javax.sound.sampled.TargetDataLine#stop ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: openjdk-jdk9   文件: bug6372428.java
void testRecord() throws LineUnavailableException {
    // prepare audio data
    AudioFormat format = new AudioFormat(22050, 8, 1, false, false);

    // create & open target data line
    //TargetDataLine line = AudioSystem.getTargetDataLine(format);
    DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);
    TargetDataLine line = (TargetDataLine)AudioSystem.getLine(info);

    line.open(format);

    // start read data thread
    byte[] data = new byte[(int) (format.getFrameRate() * format.getFrameSize() * DATA_LENGTH)];
    ReadThread p1 = new ReadThread(line, data);
    p1.start();

    // start line
    //new RecordThread(line).start();
    RecordThread p2 = new RecordThread(line);
    p2.start();

    // monitor line
    long endTime = currentTimeMillis() + DATA_LENGTH * 1000;

    long realTime1 = currentTimeMillis();
    long lineTime1 = line.getMicrosecondPosition() / 1000;

    while (realTime1 < endTime && !p1.isCompleted()) {
        delay(100);
        long lineTime2 = line.getMicrosecondPosition() / 1000;
        long realTime2 = currentTimeMillis();
        long dLineTime = lineTime2 - lineTime1;
        long dRealTime = realTime2 - realTime1;
        log("line pos: " + lineTime2 + "ms" + ", thread is " + (p2.isAlive() ? "alive" : "DIED"));
        if (dLineTime < 0) {
            line.stop();
            line.close();
            throw new RuntimeException("ERROR: line position have decreased from " + lineTime1 + " to " + lineTime2);
        }
        if (dRealTime < 450) {
            // delay() has been interrupted?
            continue;
        }
        lineTime1 = lineTime2;
        realTime1 = realTime2;
    }
    log("stopping line...");
    line.stop();
    line.close();

    /*
    log("");
    log("");
    log("");
    log("recording completed, delaying 5 sec");
    log("recorded " + p1.getCount() + " bytes, " + DATA_LENGTH + " seconds: " + (p1.getCount() * 8 / DATA_LENGTH) + " bit/sec");
    log("");
    log("");
    log("");
    delay(5000);
    log("starting playing...");
    playRecorded(format, data);
    */
}
 
/**
 * The main method.
 *
 * @param args the arguments
 * @throws Exception the exception
 */
public static void main(final String[] args) throws Exception {
  Authenticator authenticator = new IamAuthenticator("<iam_api_key>");
  SpeechToText service = new SpeechToText(authenticator);

  // Signed PCM AudioFormat with 16kHz, 16 bit sample size, mono
  int sampleRate = 16000;
  AudioFormat format = new AudioFormat(sampleRate, 16, 1, true, false);
  DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);

  if (!AudioSystem.isLineSupported(info)) {
    System.out.println("Line not supported");
    System.exit(0);
  }

  TargetDataLine line = (TargetDataLine) AudioSystem.getLine(info);
  line.open(format);
  line.start();

  AudioInputStream audio = new AudioInputStream(line);

  RecognizeOptions options =
      new RecognizeOptions.Builder()
          .audio(audio)
          .interimResults(true)
          .timestamps(true)
          .wordConfidence(true)
          // .inactivityTimeout(5) // use this to stop listening when the speaker pauses, i.e. for
          // 5s
          .contentType(HttpMediaType.AUDIO_RAW + ";rate=" + sampleRate)
          .build();

  service.recognizeUsingWebSocket(
      options,
      new BaseRecognizeCallback() {
        @Override
        public void onTranscription(SpeechRecognitionResults speechResults) {
          System.out.println(speechResults);
        }
      });

  System.out.println("Listening to your voice for the next 30s...");
  Thread.sleep(30 * 1000);

  // closing the WebSockets underlying InputStream will close the WebSocket itself.
  line.stop();
  line.close();

  System.out.println("Fin.");
}
 
源代码3 项目: java-docs-samples   文件: Recognize.java
/** Performs microphone streaming speech recognition with a duration of 1 minute. */
public static void streamingMicRecognize() throws Exception {

  ResponseObserver<StreamingRecognizeResponse> responseObserver = null;
  try (SpeechClient client = SpeechClient.create()) {

    responseObserver =
        new ResponseObserver<StreamingRecognizeResponse>() {
          ArrayList<StreamingRecognizeResponse> responses = new ArrayList<>();

          public void onStart(StreamController controller) {}

          public void onResponse(StreamingRecognizeResponse response) {
            responses.add(response);
          }

          public void onComplete() {
            for (StreamingRecognizeResponse response : responses) {
              StreamingRecognitionResult result = response.getResultsList().get(0);
              SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
              System.out.printf("Transcript : %s\n", alternative.getTranscript());
            }
          }

          public void onError(Throwable t) {
            System.out.println(t);
          }
        };

    ClientStream<StreamingRecognizeRequest> clientStream =
        client.streamingRecognizeCallable().splitCall(responseObserver);

    RecognitionConfig recognitionConfig =
        RecognitionConfig.newBuilder()
            .setEncoding(RecognitionConfig.AudioEncoding.LINEAR16)
            .setLanguageCode("en-US")
            .setSampleRateHertz(16000)
            .build();
    StreamingRecognitionConfig streamingRecognitionConfig =
        StreamingRecognitionConfig.newBuilder().setConfig(recognitionConfig).build();

    StreamingRecognizeRequest request =
        StreamingRecognizeRequest.newBuilder()
            .setStreamingConfig(streamingRecognitionConfig)
            .build(); // The first request in a streaming call has to be a config

    clientStream.send(request);
    // SampleRate:16000Hz, SampleSizeInBits: 16, Number of channels: 1, Signed: true,
    // bigEndian: false
    AudioFormat audioFormat = new AudioFormat(16000, 16, 1, true, false);
    DataLine.Info targetInfo =
        new Info(
            TargetDataLine.class,
            audioFormat); // Set the system information to read from the microphone audio stream

    if (!AudioSystem.isLineSupported(targetInfo)) {
      System.out.println("Microphone not supported");
      System.exit(0);
    }
    // Target data line captures the audio stream the microphone produces.
    TargetDataLine targetDataLine = (TargetDataLine) AudioSystem.getLine(targetInfo);
    targetDataLine.open(audioFormat);
    targetDataLine.start();
    System.out.println("Start speaking");
    long startTime = System.currentTimeMillis();
    // Audio Input Stream
    AudioInputStream audio = new AudioInputStream(targetDataLine);
    while (true) {
      long estimatedTime = System.currentTimeMillis() - startTime;
      byte[] data = new byte[6400];
      audio.read(data);
      if (estimatedTime > 60000) { // 60 seconds
        System.out.println("Stop speaking.");
        targetDataLine.stop();
        targetDataLine.close();
        break;
      }
      request =
          StreamingRecognizeRequest.newBuilder()
              .setAudioContent(ByteString.copyFrom(data))
              .build();
      clientStream.send(request);
    }
  } catch (Exception e) {
    System.out.println(e);
  }
  responseObserver.onComplete();
}
 
 同类方法