Android 使用 AudioRecord 原生录屏 , 并且将pcm转为wav,只需要加上header即可
Android 使用 AudioRecord 原生录屏 , 并且将pcm转为wav,只需要加上header即可
VoiceScreen
package edu.tyut.webviewlearn.ui.screen
import android.content.Context
import android.content.pm.PackageManager
import android.media.AudioDeviceCallback
import android.media.AudioDeviceInfo
import android.media.AudioManager
import android.net.Uri
import android.os.Environment
import android.os.Handler
import android.os.Looper
import android.util.Log
import androidx.activity.compose.ManagedActivityResultLauncher
import androidx.activity.compose.rememberLauncherForActivityResult
import androidx.activity.result.contract.ActivityResultContracts
import androidx.compose.foundation.background
import androidx.compose.foundation.clickable
import androidx.compose.foundation.layout.Column
import androidx.compose.foundation.layout.padding
import androidx.compose.material3.SnackbarHostState
import androidx.compose.material3.Text
import androidx.compose.runtime.Composable
import androidx.compose.runtime.DisposableEffect
import androidx.compose.runtime.getValue
import androidx.compose.runtime.mutableStateOf
import androidx.compose.runtime.remember
import androidx.compose.runtime.rememberCoroutineScope
import androidx.compose.runtime.setValue
import androidx.compose.ui.Modifier
import androidx.compose.ui.graphics.Color
import androidx.compose.ui.platform.LocalContext
import androidx.compose.ui.unit.dp
import androidx.core.content.ContextCompat
import androidx.core.content.FileProvider
import edu.tyut.webviewlearn.ui.theme.RoundedCornerShape10
import edu.tyut.webviewlearn.voice.VoiceManager
import kotlinx.coroutines.CoroutineScope
import kotlinx.coroutines.launch
import java.io.File
private const val TAG: String = "VoiceScreen"
@Composable
internal fun VoiceScreen(
modifier: Modifier,
snackBarHostState: SnackbarHostState
){
val context: Context = LocalContext.current
val audioManager: AudioManager = context.getSystemService<AudioManager>(AudioManager::class.java)
val coroutineScope: CoroutineScope = rememberCoroutineScope()
val voiceManager: VoiceManager by remember {
mutableStateOf(value = VoiceManager())
}
var isStart: Boolean by remember {
mutableStateOf(value = false)
}
val launcher: ManagedActivityResultLauncher<String, Boolean>
= rememberLauncherForActivityResult(
contract = ActivityResultContracts.RequestPermission()
) { isSuccess: Boolean ->
coroutineScope.launch {
snackBarHostState.showSnackbar("获取权限${if (isSuccess) "成功" else "失败"}")
}
}
val uri: Uri by lazy {
FileProvider.getUriForFile(context, "${context.packageName}.provider", File("${Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_DOWNLOADS)}/hello.pcm").apply {
Log.i(TAG, "VoiceScreen -> path: $this")
})
}
DisposableEffect(key1 = Unit) {
val audioDeviceCallback: AudioDeviceCallback = object : AudioDeviceCallback() {
override fun onAudioDevicesAdded(addedDevices: Array<out AudioDeviceInfo?>?) {
Log.i(TAG, "onAudioDevicesAdded -> addedDevices: ${addedDevices?.joinToString()}")
// AudioDeviceInfo.TYPE_BLE_HEADSET -> 26
}
override fun onAudioDevicesRemoved(removedDevices: Array<out AudioDeviceInfo?>?) {
Log.i(TAG, "onAudioDevicesRemoved -> removedDevices: ${removedDevices?.joinToString()}")
}
}
audioManager.registerAudioDeviceCallback(audioDeviceCallback, Handler(Looper.getMainLooper()))
onDispose {
voiceManager.release()
audioManager.unregisterAudioDeviceCallback(audioDeviceCallback)
}
}
Column(
modifier = modifier
){
Text(text = if (isStart) "停止录音" else "开始录音", modifier = Modifier
.background(color = Color.Cyan, shape = RoundedCornerShape10)
.padding(all = 10.dp)
.clickable {
if (ContextCompat.checkSelfPermission(
context,
android.Manifest.permission.RECORD_AUDIO
) != PackageManager.PERMISSION_GRANTED
) {
launcher.launch(android.Manifest.permission.RECORD_AUDIO)
return@clickable
}
if (isStart) {
voiceManager.stopRecord()
} else {
coroutineScope.launch {
voiceManager.startRecord(context, uri)
}
}
isStart = !isStart
Log.i(TAG, "VoiceScreen -> isRecording: ${voiceManager.isRecording}")
})
}
}
VoiceManager
将内容进行了优化
package edu.tyut.webviewlearn.voice
import android.Manifest
import android.content.Context
import android.content.pm.PackageManager
import android.media.AudioFormat
import android.media.AudioRecord
import android.media.MediaRecorder
import android.net.Uri
import android.os.Environment
import android.util.Log
import androidx.annotation.RequiresPermission
import androidx.core.app.ActivityCompat
import androidx.core.content.ContextCompat
import androidx.core.content.FileProvider
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.withContext
import okio.BufferedSink
import okio.BufferedSource
import okio.buffer
import okio.sink
import okio.source
import java.io.File
import java.nio.ByteBuffer
import java.nio.ByteOrder
private const val TAG: String = "VoiceManager"
/**
* http://soundfile.sapp.org/doc/WaveFormat/ <br/>
*
* ✅ 一句话理解 <br/>
*
* WAV 文件 = 44 字节 WAV Header + 原始 PCM 数据 <br/>
*
* 🧩 WAV 文件头格式 <br/>
*
* 字段 长度 内容 <br/>
*
* RIFF 4 bytes "RIFF" 字符串 <br/>
*
* 文件大小 4 bytes 36 + PCM 数据大小 <br/>
*
* WAVE 4 bytes "WAVE" 字符串 <br/>
*
* fmt 4 bytes "fmt " <br/>
*
* 子块1大小 4 bytes 16(PCM) <br/>
*
* 音频格式 2 bytes 1(PCM)<br/>
*
* 声道数 2 bytes 1 = mono, 2 = stereo <br/>
*
* 采样率 4 bytes eg. 44100 <br/>
*
* 字节率 4 bytes = 采样率 × 声道数 × 每样本字节数 <br/>
*
* 每帧字节数 2 bytes = 声道数 × 每样本字节数 <br/>
* 每样本位数 2 bytes eg. 16 <br/>
*
* data 4 bytes "data" <br/>
*
* 数据大小 4 bytes PCM 数据字节数 <br/>
*
*/
internal class VoiceManager internal constructor(
private val context: Context,
) {
// 单声道
private val channelMask: Int = AudioFormat.CHANNEL_IN_MONO
private val sampleRate = 16000
private val bufferSize: Int =
AudioRecord.getMinBufferSize(sampleRate, channelMask, AudioFormat.ENCODING_PCM_16BIT)
private val audioRecord: AudioRecord by lazy {
getAudioRecord()
}
private fun getAudioRecord(): AudioRecord {
return if (ActivityCompat.checkSelfPermission(
context,
Manifest.permission.RECORD_AUDIO
) != PackageManager.PERMISSION_GRANTED
) {
throw Exception("Microphone permission required...")
} else {
AudioRecord.Builder()
.setAudioSource(MediaRecorder.AudioSource.MIC)
.setAudioFormat(
AudioFormat.Builder().setEncoding(AudioFormat.ENCODING_PCM_16BIT)
.setSampleRate(sampleRate).setChannelMask(channelMask).build()
)
.setBufferSizeInBytes(bufferSize)
.build()
}
}
internal val isRecording: Boolean
get() {
if (
ActivityCompat.checkSelfPermission(
context,
Manifest.permission.RECORD_AUDIO
) != PackageManager.PERMISSION_GRANTED
) {
return false
}
return audioRecord.recordingState == AudioRecord.RECORDSTATE_RECORDING
}
internal suspend fun startRecord(context: Context, uri: Uri) =
withContext(context = Dispatchers.IO) {
require(value = hasRecordPermission()) {
"Microphone permission required..."
}
val wavUri: Uri = FileProvider.getUriForFile(
context,
"${context.packageName}.provider",
File(
"${Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_DOWNLOADS)}/${
uri.lastPathSegment?.replaceAfterLast(
".",
"wav"
)
}"
).apply {
Log.i(TAG, "startRecord -> wavPath: $this")
}
)
audioRecord.startRecording()
var totalLength = 0
val bytes = ByteArray(bufferSize)
var length: Int
context.contentResolver.openOutputStream(uri)?.sink()?.buffer()
?.use { bufferedSink: BufferedSink ->
while (audioRecord.read(bytes, 0, bytes.size).also { length = it } > 0) {
bufferedSink.write(bytes, 0, length)
totalLength += length
}
// 写入wav头
bufferedSink.flush()
Log.i(TAG, "startRecord -> 录制完成...")
}
context.contentResolver?.openInputStream(uri)?.source()?.buffer()
?.use { bufferedSource: BufferedSource ->
context.contentResolver.openOutputStream(wavUri)?.sink()?.buffer()
?.use { bufferedSink: BufferedSink ->
val wavHeader: ByteArray = writeWavHeader(totalLength = totalLength)
bufferedSink.write(source = wavHeader)
bufferedSource.readAll(sink = bufferedSink)
bufferedSink.flush()
}
}
}
private fun hasRecordPermission(): Boolean {
return ContextCompat.checkSelfPermission(
context,
Manifest.permission.RECORD_AUDIO
) == PackageManager.PERMISSION_GRANTED
}
// adb shell dumpsys activity activities | grep -E 'mResumedActivity|mCurrentFocus'
private fun writeWavHeader(
totalLength: Int,
bitsPerSample: Int = 16,
): ByteArray {
val header = ByteArray(size = 44)
val buffer: ByteBuffer = ByteBuffer.wrap(header).order(ByteOrder.LITTLE_ENDIAN)
buffer.put("RIFF".toByteArray(Charsets.US_ASCII)) // Chunk ID
buffer.putInt(totalLength + 36) // Chunk Size
buffer.put("WAVE".toByteArray(Charsets.US_ASCII)) // Format
buffer.put("fmt ".toByteArray(Charsets.US_ASCII)) // Subchunk1 ID
buffer.putInt(16) // Subchunk1 Size
buffer.putShort(1) // Audio format = PCM
buffer.putShort(1) // Channels
buffer.putInt(sampleRate) // Sample rate
buffer.putInt(sampleRate * 1 * bitsPerSample / 8) // Byte rate
buffer.putShort((1 * bitsPerSample / 8).toShort()) // Block align
buffer.putShort(bitsPerSample.toShort()) // Bits per sample
buffer.put("data".toByteArray(Charsets.US_ASCII)) // Subchunk2 ID
buffer.putInt(totalLength) // Subchunk2 size
return header
}
internal fun stopRecord() {
require(value = hasRecordPermission()) {
"Microphone permission required..."
}
if (audioRecord.recordingState == AudioRecord.RECORDSTATE_RECORDING) {
audioRecord.stop()
}
}
internal fun release() {
require(value = hasRecordPermission()) {
"Microphone permission required..."
}
if (audioRecord.recordingState == AudioRecord.RECORDSTATE_RECORDING) {
audioRecord.stop()
}
audioRecord.release()
}
}
Android 录制 pcm, 播放pcm
1、AudioRecordManager
package edu.tyut.helloktorfit.manager
import android.Manifest
import android.content.Context
import android.content.pm.PackageManager
import android.media.AudioFormat
import android.media.AudioRecord
import android.media.MediaRecorder
import android.net.Uri
import android.util.Log
import androidx.core.app.ActivityCompat
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.withContext
import okio.BufferedSink
import okio.buffer
import okio.sink
private const val TAG: String = "AudioRecordManager"
internal class AudioRecordManager internal constructor(
private val context: Context
) {
private val channelMask: Int = AudioFormat.CHANNEL_IN_MONO
private val sampleRate = 16000
private val bufferSize: Int =
AudioRecord.getMinBufferSize(sampleRate, channelMask, AudioFormat.ENCODING_PCM_16BIT)
private val audioRecord: AudioRecord by lazy {
initAudioRecord()
}
private fun initAudioRecord(): AudioRecord {
if (ActivityCompat.checkSelfPermission(
context,
Manifest.permission.RECORD_AUDIO
) != PackageManager.PERMISSION_GRANTED
) {
throw RuntimeException("Not RECORD_AUDIO permission...")
}
return AudioRecord.Builder()
.setAudioSource(MediaRecorder.AudioSource.MIC)
.setAudioFormat(
AudioFormat.Builder().setEncoding(AudioFormat.ENCODING_PCM_16BIT)
.setSampleRate(sampleRate).setChannelMask(channelMask).build()
)
.setBufferSizeInBytes(bufferSize)
.build()
}
internal suspend fun startRecord(uri: Uri): Unit = withContext(Dispatchers.IO){
audioRecord.startRecording()
context.contentResolver.openOutputStream(uri)?.sink()?.buffer()
?.use { bufferedSink: BufferedSink ->
var totalLength = 0L
val bytes = ByteArray(bufferSize)
var length: Int
while (audioRecord.read(bytes, 0, bytes.size).also { length = it } > 0) {
Log.i(TAG, "startRecord -> data: ${bytes.joinToString()}")
bufferedSink.write(bytes, 0, length)
totalLength += length
}
bufferedSink.flush()
Log.i(TAG, "startRecord -> 录制完成, 文件大小为: $totalLength bytes")
}
}
// @RequiresPermission(value = Manifest.permission.RECORD_AUDIO)
internal fun stopRecord(){
if (ActivityCompat.checkSelfPermission(
context,
Manifest.permission.RECORD_AUDIO
) != PackageManager.PERMISSION_GRANTED
) {
throw RuntimeException("Not RECORD_AUDIO permission...")
}
if (audioRecord.recordingState == AudioRecord.RECORDSTATE_RECORDING) {
audioRecord.stop()
}
}
internal fun release(){
if (ActivityCompat.checkSelfPermission(
context,
Manifest.permission.RECORD_AUDIO
) != PackageManager.PERMISSION_GRANTED
) {
throw RuntimeException("Not RECORD_AUDIO permission...")
}
audioRecord.release()
}
}
2、AudioTrackManager
package edu.tyut.helloktorfit.manager
import android.content.Context
import android.media.AudioFormat
import android.media.AudioTrack
import android.net.Uri
import android.util.Log
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.withContext
import java.io.InputStream
private const val TAG: String = "AudioTrackManager"
internal class AudioTrackManager internal constructor() {
private val channelMask: Int = AudioFormat.CHANNEL_OUT_MONO
private val sampleRate = 16000
private val bufferSize: Int =
AudioTrack.getMinBufferSize(sampleRate, channelMask, AudioFormat.ENCODING_PCM_16BIT)
private val audioTrack: AudioTrack by lazy {
val audioTrack = AudioTrack.Builder()
.setBufferSizeInBytes(bufferSize)
.setAudioFormat(
AudioFormat.Builder()
.setEncoding(AudioFormat.ENCODING_PCM_16BIT)
.setSampleRate(sampleRate).setChannelMask(channelMask).build()
).build()
audioTrack
}
internal suspend fun startPlay(context: Context, uri: Uri) = withContext(Dispatchers.IO){
audioTrack.play()
context.contentResolver.openInputStream(uri)?.use { inputStream: InputStream ->
val bytes = ByteArray(bufferSize)
var length: Int
while (inputStream.read(bytes, 0, bytes.size).also { length = it } > 0) {
Log.i(TAG, "startPlay -> data: ${bytes.joinToString()}")
val result: Int = audioTrack.write(bytes, 0, length)
if (result < 0) {
break
}
}
}
}
internal fun pause(){
Log.i(TAG, "pause -> audioTrack.playState: ${audioTrack.playState}")
if (audioTrack.playState == AudioTrack.PLAYSTATE_PLAYING) {
Log.i(TAG, "pause...")
audioTrack.pause()
}
}
internal fun stop(){
audioTrack.stop()
}
internal fun release(){
audioTrack.release()
}
}
3、AudioScreen
package edu.tyut.helloktorfit.ui.screen
import android.content.Context
import android.content.pm.PackageManager
import android.net.Uri
import android.os.Environment
import android.util.Log
import androidx.activity.compose.rememberLauncherForActivityResult
import androidx.activity.result.contract.ActivityResultContracts
import androidx.compose.foundation.background
import androidx.compose.foundation.clickable
import androidx.compose.foundation.layout.Column
import androidx.compose.foundation.layout.fillMaxSize
import androidx.compose.foundation.layout.padding
import androidx.compose.material3.SnackbarHostState
import androidx.compose.material3.Text
import androidx.compose.runtime.Composable
import androidx.compose.runtime.getValue
import androidx.compose.runtime.mutableStateOf
import androidx.compose.runtime.remember
import androidx.compose.runtime.rememberCoroutineScope
import androidx.compose.ui.Modifier
import androidx.compose.ui.graphics.Color
import androidx.compose.ui.platform.LocalContext
import androidx.compose.ui.unit.dp
import androidx.core.content.ContextCompat
import androidx.core.content.FileProvider
import androidx.hilt.navigation.compose.hiltViewModel
import androidx.navigation.NavHostController
import edu.tyut.helloktorfit.manager.AudioRecordManager
import edu.tyut.helloktorfit.manager.AudioTrackManager
import edu.tyut.helloktorfit.ui.theme.RoundedCornerShape10
import edu.tyut.helloktorfit.viewmodel.HelloViewModel
import kotlinx.coroutines.CoroutineScope
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.launch
import java.io.File
private const val TAG: String = "GifScreen"
@Composable
internal fun AudioScreen(
navHostController: NavHostController,
snackBarHostState: SnackbarHostState,
helloViewModel: HelloViewModel = hiltViewModel<HelloViewModel>()
) {
val context: Context = LocalContext.current
val coroutineScope: CoroutineScope = rememberCoroutineScope()
val recordManager: AudioRecordManager by remember {
mutableStateOf(value = AudioRecordManager(context = context))
}
val audioTrackManager: AudioTrackManager by remember {
mutableStateOf(value = AudioTrackManager())
}
val permissions: Array<String> = arrayOf(android.Manifest.permission.RECORD_AUDIO)
val launcher = rememberLauncherForActivityResult(
contract = ActivityResultContracts.RequestMultiplePermissions()
) { map ->
coroutineScope.launch {
snackBarHostState.showSnackbar("获取权限是否成功: ${map.values.all { it }}")
}
}
Column(
modifier = Modifier.fillMaxSize()
) {
Text(
text = "开始录音",
Modifier
.padding(top = 10.dp)
.background(color = Color.Black, shape = RoundedCornerShape10)
.padding(all = 5.dp)
.clickable {
if (permissions.any {
ContextCompat.checkSelfPermission(
context,
it
) != PackageManager.PERMISSION_GRANTED
}) {
launcher.launch(permissions)
return@clickable
}
val uri: Uri = FileProvider.getUriForFile(
context, "${context.packageName}.provider", File(
Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_DOWNLOADS),
"hello.pcm"
).apply {
Log.i(TAG, "AudioScreen path: $this")
}
)
coroutineScope.launch {
Log.i(TAG, "AudioScreen -> startRecord...")
recordManager.startRecord(uri = uri)
Log.i(TAG, "AudioScreen -> endRecord...")
}
},
color = Color.White
)
Text(
text = "停止录音",
Modifier
.padding(top = 10.dp)
.background(color = Color.Black, shape = RoundedCornerShape10)
.padding(all = 5.dp)
.clickable {
recordManager.stopRecord()
},
color = Color.White
)
Text(
text = "播放录音",
Modifier
.padding(top = 10.dp)
.background(color = Color.Black, shape = RoundedCornerShape10)
.padding(all = 5.dp)
.clickable {
val uri: Uri = FileProvider.getUriForFile(
context, "${context.packageName}.provider", File(
Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_DOWNLOADS),
"hello.pcm"
)
)
coroutineScope.launch {
audioTrackManager.startPlay(context, uri)
}
},
color = Color.White
)
Text(
text = "暂停播放录音",
Modifier
.padding(top = 10.dp)
.background(color = Color.Black, shape = RoundedCornerShape10)
.padding(all = 5.dp)
.clickable {
audioTrackManager.pause()
},
color = Color.White
)
}
}

Android 使用 AudioRecord 原生录屏 , 并且将pcm转为wav,只需要加上header即可
浙公网安备 33010602011771号