android camera2拍照图像输出过慢,华为手机比较明显
最近在camera2自定义相机拍照,在点击拍照按钮回调,在处理图像流的时候总是卡主尤其是华为手机,几乎所有手机都会拍照后拿不到imageReader读取的image然后我加了个300ms的延迟后,可以成功读取到完整的图像流了,但是体验感很不好在测试以后 ,发现华为手机在将图像流处理为jpeg的时候,需要243ms。这是造成卡主的原因,也解释了我加了300ms以后就不卡主的原因 原始代码val ca
·
最近在camera2自定义相机拍照,在点击拍照按钮回调,在处理图像流的时候总是卡主
尤其是华为手机,几乎所有手机都会拍照后拿不到imageReader读取的image
然后我加了个300ms的延迟后,可以成功读取到完整的图像流了,但是体验感很不好
在测试以后 ,发现华为手机在将图像流处理为jpeg的时候,需要243ms。这是造成卡主的原因,
也解释了我加了300ms以后就不卡主的原因 原始代码
val captureCallback = object : CameraCaptureSession.CaptureCallback() {
override fun onCaptureCompleted(
session: CameraCaptureSession,
request: CaptureRequest,
result: TotalCaptureResult
) {
state.set(STATE_PICTURE_TAKEN)
Flowable.timer(300, TimeUnit.MILLISECONDS)
.observeOn(AndroidSchedulers.mainThread())
.subscribe {
captureResult()
}
}
}
相机配置
//创建ImageReader接收拍照数据
val imageFormat = ImageFormat.JPEG
val streamConfigurationMap =
CameraConfig.getCurrentCameraCameraCharacteristics()[CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP]
if (streamConfigurationMap?.isOutputSupportedFor(imageFormat) == true) {
// JPEG is supported
previewImageReader =
ImageReader.newInstance(previewSize.width, previewSize.height, imageFormat, 1)
previewImageReader.setOnImageAvailableListener(
OnJpegImageAvailableListener(),
backgroundHandler
)
previewImageReaderSurface = previewImageReader.surface
}
在查阅资料以后发现,Android相机输出的是YUV的数据流,转成JPEG,拖慢了时间,于是该用读取YUV420_888数据流
//创建ImageReader接收拍照数据
val imageFormat = ImageFormat.YUV_420_888
val streamConfigurationMap =
CameraConfig.getCurrentCameraCameraCharacteristics()[CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP]
if (streamConfigurationMap?.isOutputSupportedFor(imageFormat) == true) {
// JPEG is supported
previewImageReader =
ImageReader.newInstance(previewSize.width, previewSize.height, imageFormat, 1)
previewImageReader.setOnImageAvailableListener(
OnJpegImageAvailableListener(),
backgroundHandler
)
previewImageReaderSurface = previewImageReader.surface
}
然后在处理图像的阶段去转化成JPEG
captureImage?.let { mImage ->
val timeStamp = getDate()
val filePath = "${CameraConfig.IMAGE_SAVE_DIR}/img_$timeStamp.jpg"
Observable.create<Boolean> {
val buffer = mImage.planes[0].buffer
val data = JPEGUtil.YUV420toNV21(mImage)
val b =
JPEGUtil.NV21toJPEG(data, previewSize.width, previewSize.height, 100)
var fos: FileOutputStream? = null
var bitmap: Bitmap? = null
var matBitmap: Bitmap? = null
try {
// fos = FileOutputStream(filePath)
// fos.write(b, 0, b.size)
// fos.flush()
// fos.close()
bitmap = BitmapFactory.decodeByteArray(b, 0, b.size)
bitmap = rotateImage(bitmap, sensorOrientation + 90f)
matBitmap = Bitmap.createBitmap(
bitmap,
0,
0,
bitmap?.width ?: 0,
bitmap?.height ?: 0
)
fos = FileOutputStream(filePath)
val bit = cropBitmap(matBitmap ?: bitmap)
bit?.compress(Bitmap.CompressFormat.JPEG, 90, fos)
} catch (e: IOException) {
e.printStackTrace()
} finally {
mImage.close()
try {
fos?.flush()
fos?.close()
bitmap?.recycle()
matBitmap?.recycle()
it.onNext(true)
} catch (e: IOException) {
e.printStackTrace()
}
}
it.onComplete()
}.subscribeOn(Schedulers.io())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(object : Observer<Boolean> {
override fun onSubscribe(d: Disposable) {}
override fun onError(e: Throwable) {}
override fun onComplete() {}
override fun onNext(t: Boolean) {
//拍摄仪表盘的时候 录像质量设置为100
if (CameraConfig.RECORD_QUALITY == 100) {
CameraConfig.callback?.onPhotoTake(filePath, null, null, 0)
} else {
val savePath =
CameraConfig.IMAGE_SAVE_DIR + "/" + System.currentTimeMillis() + ".jpg"
val intent = Intent(mActivity, StickerActivity::class.java)
intent.putExtra(StickerActivity.EXTRA_IMAGE_URI, filePath)
intent.putExtra(StickerActivity.EXTRA_IMAGE_SAVE_PATH, savePath)
startActivity(intent)
}
finish()
}
})
}
这样就不会出现卡主的情况了 最后附上JPEGUtil的代码
import android.graphics.ImageFormat;
import android.graphics.Rect;
import android.graphics.YuvImage;
import android.media.Image;
import java.io.ByteArrayOutputStream;
import java.nio.ByteBuffer;
public class JPEGUtil {
public static byte[] NV21toJPEG(byte[] nv21, int width, int height, int quality) {
ByteArrayOutputStream out = new ByteArrayOutputStream();
YuvImage yuv = new YuvImage(nv21, ImageFormat.NV21, width, height, null);
yuv.compressToJpeg(new Rect(0, 0, width, height), quality, out);
return out.toByteArray();
}
public static byte[] YUV420toNV21(Image image) {
Rect crop = image.getCropRect();
int format = image.getFormat();
int width = crop.width();
int height = crop.height();
Image.Plane[] planes = image.getPlanes();
byte[] data = new byte[width * height * ImageFormat.getBitsPerPixel(format) / 8];
byte[] rowData = new byte[planes[0].getRowStride()];
int channelOffset = 0;
int outputStride = 1;
for (int i = 0; i < planes.length; i++) {
switch (i) {
case 0:
channelOffset = 0;
outputStride = 1;
break;
case 1:
channelOffset = width * height + 1;
outputStride = 2;
break;
case 2:
channelOffset = width * height;
outputStride = 2;
break;
}
ByteBuffer buffer = planes[i].getBuffer();
int rowStride = planes[i].getRowStride();
int pixelStride = planes[i].getPixelStride();
int shift = (i == 0) ? 0 : 1;
int w = width >> shift;
int h = height >> shift;
buffer.position(rowStride * (crop.top >> shift) + pixelStride * (crop.left >> shift));
for (int row = 0; row < h; row++) {
int length;
if (pixelStride == 1 && outputStride == 1) {
length = w;
buffer.get(data, channelOffset, length);
channelOffset += length;
} else {
length = (w - 1) * pixelStride + 1;
buffer.get(rowData, 0, length);
for (int col = 0; col < w; col++) {
data[channelOffset] = rowData[col * pixelStride];
channelOffset += outputStride;
}
}
if (row < h - 1) {
buffer.position(buffer.position() + rowStride - length);
}
}
}
return data;
}
}
更多推荐
已为社区贡献1条内容
所有评论(0)