javax.sound.sampled.TargetDataLine#start ( )源码实例Demo

下面列出了javax.sound.sampled.TargetDataLine#start ( ) 实例代码,或者点击链接到github查看源代码,也可以在右侧发表评论。

源代码1 项目: jsyn   文件: JavaSoundAudioDevice.java
@Override
public void start() {
    DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);
    if (!AudioSystem.isLineSupported(info)) {
        // Handle the error.
        logger.severe("JavaSoundInputStream - not supported." + format);
    } else {
        try {
            line = (TargetDataLine) getDataLine(info);
            int bufferSize = calculateBufferSize(suggestedInputLatency);
            line.open(format, bufferSize);
            logger.fine("Input buffer size = " + bufferSize + " bytes.");
            line.start();
        } catch (Exception e) {
            e.printStackTrace();
            line = null;
        }
    }
}
 
源代码2 项目: jsyn   文件: JavaSoundAudioDevice.java
@Override
public void start() {
    DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);
    if (!AudioSystem.isLineSupported(info)) {
        // Handle the error.
        logger.severe("JavaSoundInputStream - not supported." + format);
    } else {
        try {
            line = (TargetDataLine) getDataLine(info);
            int bufferSize = calculateBufferSize(suggestedInputLatency);
            line.open(format, bufferSize);
            logger.fine("Input buffer size = " + bufferSize + " bytes.");
            line.start();
        } catch (Exception e) {
            e.printStackTrace();
            line = null;
        }
    }
}
 
源代码3 项目: cloudExplorer   文件: SoundRecorderThread.java
public void run() {
    try {
        AudioFormat format = getAudioFormat();
        DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);
        if (!AudioSystem.isLineSupported(info)) {
            NewJFrame.jTextArea1.append("\nError: Line not supported");
            calibrateTextArea();
        } else {
            line = (TargetDataLine) AudioSystem.getLine(info);
            line.open(format);
            line.start();
            NewJFrame.jTextArea1.append("\nRecording has started.");
            calibrateTextArea();
            AudioInputStream ais = new AudioInputStream(line);
            File wavFile = new File(temp_file);
            AudioSystem.write(ais, fileType, wavFile);
        }
    } catch (Exception recording) {
        NewJFrame.jTextArea1.append("\n" + recording.getMessage());
    }
}
 
private byte[] record() throws LineUnavailableException {
    AudioFormat format = AudioUtil.getAudioFormat(audioConf);
    DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);

    // Checks if system supports the data line
    if (!AudioSystem.isLineSupported(info)) {
        LOGGER.error("Line not supported");
        System.exit(0);
    }

    microphone = (TargetDataLine) AudioSystem.getLine(info);
    microphone.open(format);
    microphone.start();

    LOGGER.info("Listening, tap enter to stop ...");

    ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
    int numBytesRead;
    byte[] data = new byte[microphone.getBufferSize() / 5];

    // Begin audio capture.
    microphone.start();

    // Here, stopped is a global boolean set by another thread.
    while (!stopped) {
        // Read the next chunk of data from the TargetDataLine.
        numBytesRead = microphone.read(data, 0, data.length);
        // Save this chunk of data.
        byteArrayOutputStream.write(data, 0, numBytesRead);
    }

    return byteArrayOutputStream.toByteArray();
}
 
源代码5 项目: Quelea   文件: RecordingsHandler.java
/**
 * Initialize a new recording. Captures the sound and saves it to a WAV file
 *
 * @param pb
 * @param textField
 * @param tb
 */
public void start(ProgressBar pb, TextField textField, ToggleButton tb) {
    try {
        isRecording = true;
        String fileName = timeStamp;
        wavFile = new File(path, fileName + ".wav");
        Platform.runLater(() -> {
            textField.setText(fileName);
        });
        format = getAudioFormat();
        DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);

        // checks if system supports the data line
        if (AudioSystem.isLineSupported(info)) {
            LOGGER.log(Level.INFO, "Capturing audio");
            targetLine = (TargetDataLine) AudioSystem.getLine(info);
            targetLine.open(format);
            targetLine.start();   // start capturing
            ais = new AudioInputStream(targetLine);
            startBuffering(pb, tb);
        } else {
            LOGGER.log(Level.INFO, "No recording device found");
            Platform.runLater(() -> {
                Dialog.Builder setRecordingWarningBuilder = new Dialog.Builder()
                        .create()
                        .setTitle(LabelGrabber.INSTANCE.getLabel("recording.no.devices.title"))
                        .setMessage(LabelGrabber.INSTANCE.getLabel("recording.no.devices.message"))
                        .addLabelledButton(LabelGrabber.INSTANCE.getLabel("ok.button"), (ActionEvent t) -> {
                            noDevicesDialog.hide();
                            noDevicesDialog = null;
                        });
                noDevicesDialog = setRecordingWarningBuilder.setWarningIcon().build();
                noDevicesDialog.show();
            });
            Platform.runLater(() -> {
                QueleaApp.get().getMainWindow().getMainToolbar().stopRecording();
            });
        }
    } catch (LineUnavailableException ex) {
        LOGGER.log(Level.WARNING, "Line unavailable", ex);
    }
}
 
/**
 * The main method.
 *
 * @param args the arguments
 * @throws Exception the exception
 */
public static void main(final String[] args) throws Exception {
  Authenticator authenticator = new IamAuthenticator("<iam_api_key>");
  SpeechToText service = new SpeechToText(authenticator);

  // Signed PCM AudioFormat with 16kHz, 16 bit sample size, mono
  int sampleRate = 16000;
  AudioFormat format = new AudioFormat(sampleRate, 16, 1, true, false);
  DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);

  if (!AudioSystem.isLineSupported(info)) {
    System.out.println("Line not supported");
    System.exit(0);
  }

  TargetDataLine line = (TargetDataLine) AudioSystem.getLine(info);
  line.open(format);
  line.start();

  AudioInputStream audio = new AudioInputStream(line);

  RecognizeOptions options =
      new RecognizeOptions.Builder()
          .audio(audio)
          .interimResults(true)
          .timestamps(true)
          .wordConfidence(true)
          // .inactivityTimeout(5) // use this to stop listening when the speaker pauses, i.e. for
          // 5s
          .contentType(HttpMediaType.AUDIO_RAW + ";rate=" + sampleRate)
          .build();

  service.recognizeUsingWebSocket(
      options,
      new BaseRecognizeCallback() {
        @Override
        public void onTranscription(SpeechRecognitionResults speechResults) {
          System.out.println(speechResults);
        }
      });

  System.out.println("Listening to your voice for the next 30s...");
  Thread.sleep(30 * 1000);

  // closing the WebSockets underlying InputStream will close the WebSocket itself.
  line.stop();
  line.close();

  System.out.println("Fin.");
}
 
源代码7 项目: java-docs-samples   文件: Recognize.java
/** Performs microphone streaming speech recognition with a duration of 1 minute. */
public static void streamingMicRecognize() throws Exception {

  ResponseObserver<StreamingRecognizeResponse> responseObserver = null;
  try (SpeechClient client = SpeechClient.create()) {

    responseObserver =
        new ResponseObserver<StreamingRecognizeResponse>() {
          ArrayList<StreamingRecognizeResponse> responses = new ArrayList<>();

          public void onStart(StreamController controller) {}

          public void onResponse(StreamingRecognizeResponse response) {
            responses.add(response);
          }

          public void onComplete() {
            for (StreamingRecognizeResponse response : responses) {
              StreamingRecognitionResult result = response.getResultsList().get(0);
              SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
              System.out.printf("Transcript : %s\n", alternative.getTranscript());
            }
          }

          public void onError(Throwable t) {
            System.out.println(t);
          }
        };

    ClientStream<StreamingRecognizeRequest> clientStream =
        client.streamingRecognizeCallable().splitCall(responseObserver);

    RecognitionConfig recognitionConfig =
        RecognitionConfig.newBuilder()
            .setEncoding(RecognitionConfig.AudioEncoding.LINEAR16)
            .setLanguageCode("en-US")
            .setSampleRateHertz(16000)
            .build();
    StreamingRecognitionConfig streamingRecognitionConfig =
        StreamingRecognitionConfig.newBuilder().setConfig(recognitionConfig).build();

    StreamingRecognizeRequest request =
        StreamingRecognizeRequest.newBuilder()
            .setStreamingConfig(streamingRecognitionConfig)
            .build(); // The first request in a streaming call has to be a config

    clientStream.send(request);
    // SampleRate:16000Hz, SampleSizeInBits: 16, Number of channels: 1, Signed: true,
    // bigEndian: false
    AudioFormat audioFormat = new AudioFormat(16000, 16, 1, true, false);
    DataLine.Info targetInfo =
        new Info(
            TargetDataLine.class,
            audioFormat); // Set the system information to read from the microphone audio stream

    if (!AudioSystem.isLineSupported(targetInfo)) {
      System.out.println("Microphone not supported");
      System.exit(0);
    }
    // Target data line captures the audio stream the microphone produces.
    TargetDataLine targetDataLine = (TargetDataLine) AudioSystem.getLine(targetInfo);
    targetDataLine.open(audioFormat);
    targetDataLine.start();
    System.out.println("Start speaking");
    long startTime = System.currentTimeMillis();
    // Audio Input Stream
    AudioInputStream audio = new AudioInputStream(targetDataLine);
    while (true) {
      long estimatedTime = System.currentTimeMillis() - startTime;
      byte[] data = new byte[6400];
      audio.read(data);
      if (estimatedTime > 60000) { // 60 seconds
        System.out.println("Stop speaking.");
        targetDataLine.stop();
        targetDataLine.close();
        break;
      }
      request =
          StreamingRecognizeRequest.newBuilder()
              .setAudioContent(ByteString.copyFrom(data))
              .build();
      clientStream.send(request);
    }
  } catch (Exception e) {
    System.out.println(e);
  }
  responseObserver.onComplete();
}
 
 同类方法