项目有需求,需要集成语音转文字,于是开始研究,一步一个坑

1.集成录音 react-native-audio

2.集成文件操作 react-native-fs

3.集成音频格式转换 react-native-audiotransition。上面两个问题不大,第三步有个坑,rn-audio的录音格式为AAC,需要将AAC转成其他音频格式再传给百度语音,所以需要安装 react-native-audiotransition,安装后如果不翻墙,需要在RN安卓项目的buils.gradle文件添加

allprojects {
    repositories {
        mavenLocal()
        jcenter()
        maven {
            // All of React Native (JS, Obj-C sources, Android binaries) is installed from npm
            url "$rootDir/../node_modules/react-native/android"
        }
        maven {
            url"https://jitpack.io"
        }
    }
}

4.鉴权认证:使用appKey secretKey 访问 https://openapi.baidu.com 换取 token(有效期一个月)

5.百度语音的HTTP POST 请求

下面上全部代码

import React, { Component } from 'react';
import {
  StyleSheet,
  Text,
  View,
  Dimensions,
  Platform,
  TouchableHighlight
} from 'react-native';


import { AudioRecorder, AudioUtils } from 'react-native-audio';
import RNFS from 'react-native-fs';
import RNAudiotransition from 'react-native-audiotransition';

const ISIPHONEX = Dimensions.get('window').width == 375 && Dimensions.get('window').height == 812


export default class Kedaxunfei extends Component {

  state = {
    currentTime: 0.0,           //开始录音到现在的持续时间
    recording: false,           //是否正在录音
    paused: false,              //是否暂停了录音
    stoppedRecording: false,    //是否停止了录音
    finished: false,            //是否完成录音
    audioPath: AudioUtils.DownloadsDirectoryPath + '/luyin.aac',//路径下的文件名
    hasPermission: undefined,   //是否获取权限
    audioSize: 0,                //音频size
    result: "",                  //语音返回值
  };

  prepareRecordingPath(audioPath) {
    AudioRecorder.prepareRecordingAtPath(audioPath, {
      SampleRate: 16000,            //采样率
      Channels: 1,                  //通道
      AudioQuality: "High",         //音质(Low, Medium, High)
      AudioEncoding: "aac",         //音频编码(aac编码iOS和Android均支持)
      AudioEncodingBitRate: 48000,  //音频编码比特率
      IncludeBase64: true,          //是否是base64格式
    });
  }
  componentDidMount() {
      //初始化音频格式转化
    RNAudiotransition.initAudioTransition();

    //初始化录音
    AudioRecorder.requestAuthorization().then((isAuthorised) => {
      this.setState({ hasPermission: isAuthorised });

      if (!isAuthorised) return;

      this.prepareRecordingPath(this.state.audioPath);

      AudioRecorder.onProgress = (data) => {
        this.setState({ currentTime: Math.floor(data.currentTime) });
      };

      AudioRecorder.onFinished = (data) => {
        // Android callback comes in the form of a promise instead.
        if (Platform.OS === 'ios') {
          this._finishRecording(data.status === "OK", data.audioFileURL, data.audioFileSize);
        }
      };
    });


  }
  _renderButton(title, onPress, active) {
    var style = (active) ? styles.activeButtonText : styles.buttonText;

    return (
      <TouchableHighlight style={styles.button} onPress={onPress}>
        <Text style={style}>
          {title}
        </Text>
      </TouchableHighlight>
    );
  }

  _renderPauseButton(onPress, active) {
    var style = (active) ? styles.activeButtonText : styles.buttonText;
    var title = this.state.paused ? "RESUME" : "PAUSE";
    return (
      <TouchableHighlight style={styles.button} onPress={onPress}>
        <Text style={style}>
          {title}
        </Text>
      </TouchableHighlight>
    );
  }

  async _pause() {
    if (!this.state.recording) {
      console.warn('Can\'t pause, not recording!');
      return;
    }

    try {
      const filePath = await AudioRecorder.pauseRecording();
      this.setState({ paused: true });
    } catch (error) {
      console.error(error);
    }
  }

  async _resume() {
    if (!this.state.paused) {
      console.warn('Can\'t resume, not paused!');
      return;
    }

    try {
      await AudioRecorder.resumeRecording();
      this.setState({ paused: false });
    } catch (error) {
      console.error(error);
    }
  }

  async _stop() {
    if (!this.state.recording) {
      console.warn('Can\'t stop, not recording!');
      return;
    }

    this.setState({ stoppedRecording: true, recording: false, paused: false });

    try {
      const filePath = await AudioRecorder.stopRecording();

      if (Platform.OS === 'android') {
        this._finishRecording(true, filePath);
      }
      return filePath;
    } catch (error) {
      console.error(error);
    }
  }



  async _record() {
    if (this.state.recording) {
      console.warn('Already recording!');
      return;
    }

    if (!this.state.hasPermission) {
      console.warn('Can\'t record, no permission granted!');
      return;
    }

    if (this.state.stoppedRecording) {
      this.prepareRecordingPath(this.state.audioPath);
    }

    this.setState({ recording: true, paused: false });

    try {
      const filePath = await AudioRecorder.startRecording();
    } catch (error) {
      console.error(error);
    }
  }

  _finishRecording(didSucceed, filePath, fileSize) {
    this.setState({ finished: didSucceed });
    console.log(`Finished recording of duration ${this.state.currentTime} seconds at path: ${filePath} and size of ${fileSize || 0} bytes`);

    this.timer = setTimeout(() => {
      //录音文件是aac格式,转写成wav格式
      RNAudiotransition.audioToStart(AudioUtils.DownloadsDirectoryPath + "/luyin.aac", 'wav', (res) => {
        console.log(res)
        //获取音频文件len
        RNFS.stat(AudioUtils.DownloadsDirectoryPath + "/luyin.wav")
          .then((StatResult) => {
            this.setState({
              audioSize: StatResult.size
            })
          })
          //音频文件转base64
        RNFS.readFile(AudioUtils.DownloadsDirectoryPath + "/luyin.wav", "base64")
          .then((content) => {
            //base64传给百度后台
            let url = "https://vop.baidu.com/pro_api";
            let params = {
              "format": "wav",                    //语音文件的格式,pcm、wav、amr、m4a
              "rate": 16000,                      //采样率,16000,固定值
              "dev_pid": 80001,                   //普通话
              "channel": 1,                       //声道数,仅支持单声道,请填写固定值 1
              "token": "*****",//开放平台获取到的开发者[access_token]
              "cuid": "baidu_workshop",            //用户唯一标识,用来区分用户,计算UV值。
              "len": this.state.audioSize,         //本地语音文件的的字节数,单位字节
              "speech": content,                   //本地语音文件的的二进制语音数据 ,需要进行base64 编码
            };
            console.log("content.length" + content.length)
            fetch(url, {
              method: 'POST',
              body: JSON.stringify(params),   //请求体
              headers: {
                Accept: "application/json",
                "Content-Type": "application/json",
              }
            })
              .then((response) => response.json())
              .then((data) => {
                console.log(data)
                this.setState({
                  result: data.result
                })
              })
              .catch((error) => {
                console.log('语音识别错误')

              });
          }).catch((err) => {
            console.log("文件读取失败")
          });

      })
    }, 1000);

  }

  render() {

    return (
      <View style={styles.container}>
        <View style={styles.controls}>
          {this._renderButton("RECORD", () => { this._record() }, this.state.recording)}
          {this._renderButton("STOP", () => { this._stop() })}
          <Text style={styles.progressText}>{this.state.currentTime}s</Text>
          <Text style={styles.progressText2}>{this.state.result}</Text>
        </View>
      </View>
    );
  }
}

var styles = StyleSheet.create({
  container: {
    flex: 1,
    backgroundColor: "#2b608a",
  },
  controls: {
    justifyContent: 'center',
    alignItems: 'center',
    flex: 1,
  },
  progressText: {
    paddingTop: 50,
    fontSize: 50,
    color: "#fff"
  },
  progressText2: {
    paddingTop: 50,
    fontSize: 20,
    color: "#fff"
  },
  button: {
    padding: 20
  },
  disabledButtonText: {
    color: '#eee'
  },
  buttonText: {
    fontSize: 20,
    color: "#fff"
  },
  activeButtonText: {
    fontSize: 20,
    color: "#B81F00"
  }

});

有描述不清或者有什么不懂得问题可以私信我。

Logo

CSDN联合极客时间,共同打造面向开发者的精品内容学习社区,助力成长!

更多推荐