初识 Fresco
第一行代码
Fresco 最简单和入门级的 API 是 SimpleDraweeView.setImageURI(uri)
,那就先从这个方法走下去看看会遇到哪些概念
SimpleDraweeView.setImageURI(uri)
SimpleDraweeView.setImageURI(uri, null)
DraweeView.setController
DraweeHolder.setController
DraweeHolder.attachController
AbstractDraweeController.onAttach
AbstractDraweeController.submitRequest
class AbstractDraweeController {
protected void submitRequest() {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection("AbstractDraweeController#submitRequest");
}
// 检查缓存,有则直接返回缓存数据,没有则执行抓取操作;现在先不理缓存逻辑
final T closeableImage = getCachedImage();
if (closeableImage != null) {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection("AbstractDraweeController#submitRequest->cache");
}
mDataSource = null;
mIsRequestSubmitted = true;
mHasFetchFailed = false;
mEventTracker.recordEvent(Event.ON_SUBMIT_CACHE_HIT);
reportSubmit(mDataSource, getImageInfo(closeableImage));
onImageLoadedFromCacheImmediately(mId, closeableImage);
onNewResultInternal(mId, mDataSource, closeableImage, 1.0f, true, true, true);
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
return;
}
mEventTracker.recordEvent(Event.ON_DATASOURCE_SUBMIT);
mSettableDraweeHierarchy.setProgress(0, true);
mIsRequestSubmitted = true;
mHasFetchFailed = false;
mDataSource = getDataSource(); // 重要
reportSubmit(mDataSource, null);
if (FLog.isLoggable(FLog.VERBOSE)) {
FLog.v(
TAG,
"controller %x %s: submitRequest: dataSource: %x",
System.identityHashCode(this),
mId,
System.identityHashCode(mDataSource));
}
// 从数据源抓取,callback 模式,有三种情况:成功、失败和更新进度
final String id = mId;
final boolean wasImmediate = mDataSource.hasResult();
final DataSubscriber<T> dataSubscriber =
new BaseDataSubscriber<T>() {
@Override
public void onNewResultImpl(DataSource<T> dataSource) {
// isFinished must be obtained before image, otherwise we might set intermediate result
// as final image.
boolean isFinished = dataSource.isFinished();
boolean hasMultipleResults = dataSource.hasMultipleResults();
float progress = dataSource.getProgress();
T image = dataSource.getResult();
if (image != null) {
onNewResultInternal(
id, dataSource, image, progress, isFinished, wasImmediate, hasMultipleResults);
} else if (isFinished) {
onFailureInternal(id, dataSource, new NullPointerException(), /* isFinished */ true);
}
}
@Override
public void onFailureImpl(DataSource<T> dataSource) {
onFailureInternal(id, dataSource, dataSource.getFailureCause(), /* isFinished */ true);
}
@Override
public void onProgressUpdate(DataSource<T> dataSource) {
boolean isFinished = dataSource.isFinished();
float progress = dataSource.getProgress();
onProgressUpdateInternal(id, dataSource, progress, isFinished);
}
};
mDataSource.subscribe(dataSubscriber, mUiThreadImmediateExecutor);
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
}
}
class AbstractDataSource {
private enum DataSourceStatus {
// data source has not finished yet
IN_PROGRESS,
// data source has finished with success
SUCCESS,
// data source has finished with failure
FAILURE,
}
// 数据源有三种状态:行进中、成功和失败,一般情况下会是还没开始抓取操作也就是【行进中】
// callback 也就是订阅者被数据源保存起来以便后续通知
public void subscribe(final DataSubscriber<T> dataSubscriber, final Executor executor) {
Preconditions.checkNotNull(dataSubscriber);
Preconditions.checkNotNull(executor);
boolean shouldNotify;
synchronized (this) {
if (mIsClosed) {
return;
}
if (mDataSourceStatus == DataSourceStatus.IN_PROGRESS) {
mSubscribers.add(Pair.create(dataSubscriber, executor));
}
shouldNotify = hasResult() || isFinished() || wasCancelled();
}
if (shouldNotify) {
notifyDataSubscriber(dataSubscriber, executor, hasFailed(), wasCancelled());
}
}
}
// 到目前为止,只看到了 callback 但没看到是什么时候开始抓取数据(比如网络请求操作)
// 但看到 DataSource 的描述,隐隐感觉到网络请求操作很可能已经发出,而 DataSource 就是代表这个异步操作(类似于 Future)
// 它作为绳子的一端连接着另一端的 fetch task,从这端出发按图索骥即可找到 fetch task
/**
* An alternative to Java Futures for the image pipeline.
*/
public interface DataSource<T>
SimpleDraweeView.setImageURI(uri)
SimpleDraweeView.setImageURI(uri, null)
AbstractDraweeControllerBuilder.build
AbstractDraweeControllerBuilder.buildController
PipelineDraweeControllerBuilder.obtainController
AbstractDraweeControllerBuilder.obtainDataSourceSupplier
AbstractDraweeControllerBuilder.getDataSourceSupplierForRequest(controller, controllerId, imageRequest)
AbstractDraweeControllerBuilder.getDataSourceSupplierForRequest(controller, controllerId, imageRequest, CacheLevel.FULL_FETCH)
PipelineDraweeControllerBuilder.getDataSourceForRequest
class ImagePipeline {
public DataSource<CloseableReference<CloseableImage>> fetchDecodedImage(
ImageRequest imageRequest,
@Nullable Object callerContext,
ImageRequest.RequestLevel lowestPermittedRequestLevelOnSubmit,
@Nullable RequestListener requestListener,
@Nullable String uiComponentId) {
try {
Producer<CloseableReference<CloseableImage>> producerSequence =
mProducerSequenceFactory.getDecodedImageProducerSequence(imageRequest); // Producer 生产者负责从网络抓取图片
return submitFetchRequest( // 在这里将任务提交到任务队列等待执行
producerSequence,
imageRequest,
lowestPermittedRequestLevelOnSubmit,
callerContext,
requestListener,
uiComponentId);
} catch (Exception exception) {
return DataSources.immediateFailedDataSource(exception);
}
}
private <T> DataSource<CloseableReference<T>> submitFetchRequest(
Producer<CloseableReference<T>> producerSequence,
ImageRequest imageRequest,
ImageRequest.RequestLevel lowestPermittedRequestLevelOnSubmit,
@Nullable Object callerContext,
@Nullable RequestListener requestListener,
@Nullable String uiComponentId) {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection("ImagePipeline#submitFetchRequest");
}
final RequestListener2 requestListener2 =
new InternalRequestListener(
getRequestListenerForRequest(imageRequest, requestListener), mRequestListener2);
if (mCallerContextVerifier != null) {
mCallerContextVerifier.verifyCallerContext(callerContext, false);
}
try {
ImageRequest.RequestLevel lowestPermittedRequestLevel =
ImageRequest.RequestLevel.getMax(
imageRequest.getLowestPermittedRequestLevel(), lowestPermittedRequestLevelOnSubmit);
SettableProducerContext settableProducerContext =
new SettableProducerContext(
imageRequest,
generateUniqueFutureId(),
uiComponentId,
requestListener2,
callerContext,
lowestPermittedRequestLevel,
/* isPrefetch */ false,
imageRequest.getProgressiveRenderingEnabled()
|| !UriUtil.isNetworkUri(imageRequest.getSourceUri()),
imageRequest.getPriority(),
mConfig);
return CloseableProducerToDataSourceAdapter.create(
producerSequence, settableProducerContext, requestListener2);
} catch (Exception exception) {
return DataSources.immediateFailedDataSource(exception);
} finally {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
}
}
}
class CloseableProducerToDataSourceAdapter {
public static <T> DataSource<CloseableReference<T>> create(
Producer<CloseableReference<T>> producer,
SettableProducerContext settableProducerContext,
RequestListener2 listener) {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection("CloseableProducerToDataSourceAdapter#create");
}
CloseableProducerToDataSourceAdapter<T> result =
new CloseableProducerToDataSourceAdapter<T>(producer, settableProducerContext, listener);
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
return result;
}
}
class AbstractProducerToDataSourceAdapter {
protected AbstractProducerToDataSourceAdapter(
Producer<T> producer,
SettableProducerContext settableProducerContext,
RequestListener2 requestListener) {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection("AbstractProducerToDataSourceAdapter()");
}
mSettableProducerContext = settableProducerContext;
mRequestListener = requestListener;
setInitialExtras();
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection("AbstractProducerToDataSourceAdapter()->onRequestStart");
}
mRequestListener.onRequestStart(mSettableProducerContext);
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection("AbstractProducerToDataSourceAdapter()->produceResult");
}
producer.produceResults(createConsumer(), settableProducerContext); // 提交任务到任务队列的操作隐藏在这里
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
}
}
/**
* Fresco 设计了生产者这么一个概念来组合/组装整个图形处理流水线(ImagePipeline),上一个生产者的输出可以作为下一个生产者的输入
* Building block for image processing in the image pipeline
*/
public interface Producer<T> {
/**
* 此方法用以启动整个图像流水线,结果是通过 callback 模式(Consumer)回传的
* Start producing results for given context. Provided consumer is notified whenever progress is
* made (new value is ready or error occurs).
*/
void produceResults(Consumer<T> consumer, ProducerContext context);
}
public interface Consumer<T> {
void onNewResult(@Nullable T newResult, @Status int status);
void onFailure(Throwable t);
void onCancellation();
void onProgressUpdate(float progress);
}
// 回到 ImagePipeline.fetchDecodedImage 看看整个流水线是怎么组装起来的
class ProducerSequenceFactory {
public Producer<CloseableReference<CloseableImage>> getDecodedImageProducerSequence(
ImageRequest imageRequest) {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection("ProducerSequenceFactory#getDecodedImageProducerSequence");
}
Producer<CloseableReference<CloseableImage>> pipelineSequence =
getBasicDecodedImageSequence(imageRequest);
// 后处理器、预加载为 Bitmap 和延迟加载
if (imageRequest.getPostprocessor() != null) {
pipelineSequence = getPostprocessorSequence(pipelineSequence);
}
if (mUseBitmapPrepareToDraw) {
pipelineSequence = getBitmapPrepareSequence(pipelineSequence);
}
if (mAllowDelay && imageRequest.getDelayMs() > 0) {
pipelineSequence = getDelaySequence(pipelineSequence);
}
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
return pipelineSequence;
}
}
class ProducerSequenceFactory {
private Producer<CloseableReference<CloseableImage>> getBasicDecodedImageSequence(
ImageRequest imageRequest) {
try {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection("ProducerSequenceFactory#getBasicDecodedImageSequence");
}
Preconditions.checkNotNull(imageRequest);
Uri uri = imageRequest.getSourceUri();
Preconditions.checkNotNull(uri, "Uri is null.");
switch (imageRequest.getSourceUriType()) {
case SOURCE_TYPE_NETWORK: // 网络数据源
return getNetworkFetchSequence();
case SOURCE_TYPE_LOCAL_VIDEO_FILE:
return getLocalVideoFileFetchSequence();
case SOURCE_TYPE_LOCAL_IMAGE_FILE:
return getLocalImageFileFetchSequence();
case SOURCE_TYPE_LOCAL_CONTENT:
if (imageRequest.getLoadThumbnailOnly()
&& Build.VERSION.SDK_INT >= Build.VERSION_CODES.Q) {
return getLocalContentUriThumbnailFetchSequence();
} else if (MediaUtils.isVideo(mContentResolver.getType(uri))) {
return getLocalVideoFileFetchSequence();
}
return getLocalContentUriFetchSequence();
case SOURCE_TYPE_LOCAL_ASSET:
return getLocalAssetFetchSequence();
case SOURCE_TYPE_LOCAL_RESOURCE:
return getLocalResourceFetchSequence();
case SOURCE_TYPE_QUALIFIED_RESOURCE:
return getQualifiedResourceFetchSequence();
case SOURCE_TYPE_DATA:
return getDataFetchSequence();
default:
throw new IllegalArgumentException(
"Unsupported uri scheme! Uri is: " + getShortenedUriString(uri));
}
} finally {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
}
}
private synchronized Producer<CloseableReference<CloseableImage>> getNetworkFetchSequence() {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection("ProducerSequenceFactory#getNetworkFetchSequence");
}
if (mNetworkFetchSequence == null) {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection("ProducerSequenceFactory#getNetworkFetchSequence:init");
}
mNetworkFetchSequence =
newBitmapCacheGetToDecodeSequence(getCommonNetworkFetchToEncodedMemorySequence());
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
}
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
return mNetworkFetchSequence;
}
private synchronized Producer<EncodedImage> getCommonNetworkFetchToEncodedMemorySequence() {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection(
"ProducerSequenceFactory#getCommonNetworkFetchToEncodedMemorySequence");
}
if (mCommonNetworkFetchToEncodedMemorySequence == null) {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection(
"ProducerSequenceFactory#getCommonNetworkFetchToEncodedMemorySequence:init");
}
Producer<EncodedImage> inputProducer =
Preconditions.checkNotNull(
newEncodedCacheMultiplexToTranscodeSequence( // decode 和 cache
mProducerFactory.newNetworkFetchProducer(mNetworkFetcher))); // 从网络获取 byte array
mCommonNetworkFetchToEncodedMemorySequence =
ProducerFactory.newAddImageTransformMetaDataProducer(inputProducer);
mCommonNetworkFetchToEncodedMemorySequence =
mProducerFactory.newResizeAndRotateProducer( // 缩放和选择
mCommonNetworkFetchToEncodedMemorySequence,
mResizeAndRotateEnabledForNetwork && !mDownsampleEnabled,
mImageTranscoderFactory);
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
}
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
return mCommonNetworkFetchToEncodedMemorySequence;
}
}
class ProducerFactory {
public Producer<EncodedImage> newNetworkFetchProducer(NetworkFetcher networkFetcher) {
return new NetworkFetchProducer(mPooledByteBufferFactory, mByteArrayPool, networkFetcher);
}
}
// NetworkFetchProducer.produceResults 会将网络请求任务提交到任务队列
// 得到的数据(byte array)包装为 EncodedImage(并没有立刻 decode)交给下一个 Producer
class NetworkFetchProducer implements Producer<EncodedImage> {
public void produceResults(Consumer<EncodedImage> consumer, ProducerContext context) {
context.getProducerListener().onProducerStart(context, PRODUCER_NAME);
final FetchState fetchState = mNetworkFetcher.createFetchState(consumer, context);
mNetworkFetcher.fetch(
fetchState,
new NetworkFetcher.Callback() {
@Override
public void onResponse(InputStream response, int responseLength) throws IOException {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection("NetworkFetcher->onResponse");
}
NetworkFetchProducer.this.onResponse(fetchState, response, responseLength);
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
}
@Override
public void onFailure(Throwable throwable) {
NetworkFetchProducer.this.onFailure(fetchState, throwable);
}
@Override
public void onCancellation() {
NetworkFetchProducer.this.onCancellation(fetchState);
}
});
}
protected void onResponse(
FetchState fetchState, InputStream responseData, int responseContentLength)
throws IOException {
final PooledByteBufferOutputStream pooledOutputStream;
if (responseContentLength > 0) {
pooledOutputStream = mPooledByteBufferFactory.newOutputStream(responseContentLength);
} else {
pooledOutputStream = mPooledByteBufferFactory.newOutputStream();
}
final byte[] ioArray = mByteArrayPool.get(READ_SIZE);
try {
int length;
while ((length = responseData.read(ioArray)) >= 0) {
if (length > 0) {
pooledOutputStream.write(ioArray, 0, length);
maybeHandleIntermediateResult(pooledOutputStream, fetchState);
float progress = calculateProgress(pooledOutputStream.size(), responseContentLength);
fetchState.getConsumer().onProgressUpdate(progress);
}
}
mNetworkFetcher.onFetchCompletion(fetchState, pooledOutputStream.size());
handleFinalResult(pooledOutputStream, fetchState);
} finally {
mByteArrayPool.release(ioArray);
pooledOutputStream.close();
}
}
protected void handleFinalResult(
PooledByteBufferOutputStream pooledOutputStream, FetchState fetchState) {
Map<String, String> extraMap = getExtraMap(fetchState, pooledOutputStream.size());
ProducerListener2 listener = fetchState.getListener();
listener.onProducerFinishWithSuccess(fetchState.getContext(), PRODUCER_NAME, extraMap);
listener.onUltimateProducerReached(fetchState.getContext(), PRODUCER_NAME, true);
fetchState.getContext().putOriginExtra("network");
notifyConsumer(
pooledOutputStream,
Consumer.IS_LAST | fetchState.getOnNewResultStatusFlags(),
fetchState.getResponseBytesRange(),
fetchState.getConsumer(),
fetchState.getContext());
}
}
一些概念
Fresco 不能像 Glide 那样直接作用于 ImageView
,必须用 SimpleDraweeView
替换掉布局里的 ImageView(感觉侵入性有点强呀?)
还好 Fresco 内部逻辑并没有写到 UI 控件 SimpleDraweeView
里,而是抽离放在 DraweeHolder
和 DraweeController
里
图像的加载和处理是一个流水线 ImagePipeline
,Producer / Consumer
是流水线上的一个个节点,上一个节点的输出作为下一个节点的输出,各节点是一对一的,生产者和消费者是 callback 模式
ImagePipeline 的最终输出是一个 DataSource
,类似于 Future
的概念,可以用 DataSubscriber
订阅它的各种事件:成功(或者是一个新的结果)、失败、取消和进度更新,数据源和订阅者是一对多的
ImagePipeline
ImagePipeline
可以想象为一个栈(Stack),不断地往里边添加节点,第一个是头节点,最后一个是尾结点,先添加的节点在后添加节点的前面,后添加节点在先添加节点的后面
发一个请求时先触达尾结点然后流向头节点,这叫去程,响应从头结点开始流向尾结点,这叫回程
跟 OkHttp Interceptor Chain 和 Servlet Contianer Filter 一样,ImagePipeline 的一次工作包含去程和回程,以 [A, B, C, D] 为例(A 是头节点):
- 去程从
Producer.produceResults
开始,D.produceResults -> C.produceResults -> B.produceResults -> A.produceResults,A 也许是个 NetworkFetchProducer 执行网络请求,拿到 Response 后执行回程 - 回程从
Consumer.onNewResult
开始,A.onNewResult -> B.onNewResult -> C.onNewResult -> D.onNewResult -> my.onNewResult,当然可能会失败onFailure(throwable)
、取消onCancellation()
等,一样是这个顺序
NetworkFetchProducer
顾名思义,执行网络请求(HTTP)的 ImagePipeline 节点,,也是是整个流水线的第一个节点
它用接口 NetworkFetcher
抹平了底层各网络库的 API 差异,具体的实现有:
- OkHttpNetworkFetcher
- VolleyNetworkFetcher
- HttpUrlConnectionNetworkFetcher
- PriorityNetworkFetcher,它是一个装饰器(Decorator),具体的网络加载功能由上面的那些实现提供,它给请求提供了【优先队列】的特性,后续会说到
/**
* Interface that specifies network fetcher used by the image pipeline.
*/
public interface NetworkFetcher<FETCH_STATE extends FetchState> {
interface Callback {
void onResponse(InputStream response, int responseLength) throws IOException;
void onFailure(Throwable throwable);
void onCancellation();
}
/**
* Creates a new instance of the {@link FetchState}-derived object used to store state.
*/
FETCH_STATE createFetchState(Consumer<EncodedImage> consumer, ProducerContext producerContext);
/**
* Gets a map containing extra parameters to pass to the listeners.
*/
Map<String, String> getExtraMap(FETCH_STATE fetchState, int byteSize);
void fetch(FETCH_STATE fetchState, Callback callback);
boolean shouldPropagate(FETCH_STATE fetchState);
void onFetchCompletion(FETCH_STATE fetchState, int byteSize);
}
NetworkFetchProducer
从 Response 里读取 ByteArray 格式的内容,并包装为 EncodedImage
输出给下一节点;考虑到性能问题,在这一阶段 ByteArray 并没有 decode 为 Bitmap
,而是通过解析前 N 个字节长度的数据来获取图片格式、尺寸等一些元信息
同时加载进度 Consumer.onProgressUpdate
也是由 NetworkFetchProducer 在 Response Reading 时发出
class NetworkFetchProducer {
public void produceResults(Consumer<EncodedImage> consumer, ProducerContext context) { // 网络加载功能交由 NetworkFetcher 实现
context.getProducerListener().onProducerStart(context, PRODUCER_NAME);
final FetchState fetchState = mNetworkFetcher.createFetchState(consumer, context);
mNetworkFetcher.fetch(
fetchState,
new NetworkFetcher.Callback() {
@Override
public void onResponse(InputStream response, int responseLength) throws IOException {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection("NetworkFetcher->onResponse");
}
NetworkFetchProducer.this.onResponse(fetchState, response, responseLength);
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
}
@Override
public void onFailure(Throwable throwable) {
NetworkFetchProducer.this.onFailure(fetchState, throwable);
}
@Override
public void onCancellation() {
NetworkFetchProducer.this.onCancellation(fetchState);
}
});
}
protected void onResponse(
FetchState fetchState, InputStream responseData, int responseContentLength)
throws IOException {
final PooledByteBufferOutputStream pooledOutputStream;
if (responseContentLength > 0) {
pooledOutputStream = mPooledByteBufferFactory.newOutputStream(responseContentLength);
} else {
pooledOutputStream = mPooledByteBufferFactory.newOutputStream();
}
final byte[] ioArray = mByteArrayPool.get(READ_SIZE);
try {
int length;
while ((length = responseData.read(ioArray)) >= 0) {
if (length > 0) {
pooledOutputStream.write(ioArray, 0, length);
maybeHandleIntermediateResult(pooledOutputStream, fetchState);
float progress = calculateProgress(pooledOutputStream.size(), responseContentLength);
fetchState.getConsumer().onProgressUpdate(progress); // 更新加载进度
}
}
mNetworkFetcher.onFetchCompletion(fetchState, pooledOutputStream.size());
handleFinalResult(pooledOutputStream, fetchState);
} finally {
mByteArrayPool.release(ioArray);
pooledOutputStream.close();
}
}
protected void handleFinalResult(
PooledByteBufferOutputStream pooledOutputStream, FetchState fetchState) {
Map<String, String> extraMap = getExtraMap(fetchState, pooledOutputStream.size());
ProducerListener2 listener = fetchState.getListener();
listener.onProducerFinishWithSuccess(fetchState.getContext(), PRODUCER_NAME, extraMap);
listener.onUltimateProducerReached(fetchState.getContext(), PRODUCER_NAME, true);
fetchState.getContext().putOriginExtra("network");
notifyConsumer(
pooledOutputStream,
Consumer.IS_LAST | fetchState.getOnNewResultStatusFlags(),
fetchState.getResponseBytesRange(),
fetchState.getConsumer(),
fetchState.getContext());
}
protected static void notifyConsumer(
PooledByteBufferOutputStream pooledOutputStream,
@Consumer.Status int status,
@Nullable BytesRange responseBytesRange,
Consumer<EncodedImage> consumer,
ProducerContext context) { // 将 ByteArray 包装为 EncodedImage 输出给消费者
CloseableReference<PooledByteBuffer> result =
CloseableReference.of(pooledOutputStream.toByteBuffer());
EncodedImage encodedImage = null;
try {
encodedImage = new EncodedImage(result);
encodedImage.setBytesRange(responseBytesRange);
encodedImage.parseMetaData();
context.setEncodedImageOrigin(EncodedImageOrigin.NETWORK);
consumer.onNewResult(encodedImage, status);
} finally {
EncodedImage.closeSafely(encodedImage);
CloseableReference.closeSafely(result);
}
}
}
// 这里简单地看下 EncodedImage 是如何判断图片格式的,各种图片的格式和 SOI 可以深入代码细节,结合网上资料了解
class EncodedImage {
public void parseMetaData() {
if (!sUseCachedMetadata) {
internalParseMetaData();
return;
}
if (mHasParsedMetadata) {
return;
}
internalParseMetaData();
mHasParsedMetadata = true;
}
/** Sets the encoded image meta data. */
private void internalParseMetaData() {
final ImageFormat imageFormat =
ImageFormatChecker.getImageFormat_WrapIOException(getInputStream()); // 解析图片格式
mImageFormat = imageFormat;
// BitmapUtil.decodeDimensions has a bug where it will return 100x100 for some WebPs even though
// those are not its actual dimensions
final Pair<Integer, Integer> dimensions; // 解析尺寸
if (DefaultImageFormats.isWebpFormat(imageFormat)) {
dimensions = readWebPImageSize();
} else {
dimensions = readImageMetaData().getDimensions();
}
if (imageFormat == DefaultImageFormats.JPEG && mRotationAngle == UNKNOWN_ROTATION_ANGLE) {
// Load the JPEG rotation angle only if we have the dimensions
if (dimensions != null) {
mExifOrientation = JfifUtil.getOrientation(getInputStream());
mRotationAngle = JfifUtil.getAutoRotateAngleFromOrientation(mExifOrientation);
}
} else if (imageFormat == DefaultImageFormats.HEIF
&& mRotationAngle == UNKNOWN_ROTATION_ANGLE) {
mExifOrientation = HeifExifUtil.getOrientation(getInputStream());
mRotationAngle = JfifUtil.getAutoRotateAngleFromOrientation(mExifOrientation);
} else if (mRotationAngle == UNKNOWN_ROTATION_ANGLE) {
mRotationAngle = 0;
}
}
}
class ImageFormatChecker {
public static ImageFormat getImageFormat_WrapIOException(final InputStream is) {
try {
return getImageFormat(is);
} catch (IOException ioe) {
throw Throwables.propagate(ioe);
}
}
/**
* Tries to read up to MAX_HEADER_LENGTH bytes from InputStream is and use read bytes to determine
* type of the image contained in is.
*/
public static ImageFormat getImageFormat(final InputStream is) throws IOException {
return getInstance().determineImageFormat(is);
}
public ImageFormat determineImageFormat(final InputStream is) throws IOException {
Preconditions.checkNotNull(is);
final byte[] imageHeaderBytes = new byte[mMaxHeaderLength];
final int headerSize = readHeaderFromStream(mMaxHeaderLength, is, imageHeaderBytes);
ImageFormat format = mDefaultFormatChecker.determineFormat(imageHeaderBytes, headerSize);
if (format != null && format != ImageFormat.UNKNOWN) {
return format;
}
if (mCustomImageFormatCheckers != null) {
for (ImageFormat.FormatChecker formatChecker : mCustomImageFormatCheckers) {
format = formatChecker.determineFormat(imageHeaderBytes, headerSize);
if (format != null && format != ImageFormat.UNKNOWN) {
return format;
}
}
}
return ImageFormat.UNKNOWN;
}
}
class DefaultImageFormatChecker {
public final ImageFormat determineFormat(byte[] headerBytes, int headerSize) {
Preconditions.checkNotNull(headerBytes);
if (!mUseNewOrder && WebpSupportStatus.isWebpHeader(headerBytes, 0, headerSize)) {
return getWebpFormat(headerBytes, headerSize);
}
if (isJpegHeader(headerBytes, headerSize)) {
return DefaultImageFormats.JPEG;
}
if (isPngHeader(headerBytes, headerSize)) {
return DefaultImageFormats.PNG;
}
if (mUseNewOrder && WebpSupportStatus.isWebpHeader(headerBytes, 0, headerSize)) {
return getWebpFormat(headerBytes, headerSize);
}
if (isGifHeader(headerBytes, headerSize)) {
return DefaultImageFormats.GIF;
}
if (isBmpHeader(headerBytes, headerSize)) {
return DefaultImageFormats.BMP;
}
if (isIcoHeader(headerBytes, headerSize)) {
return DefaultImageFormats.ICO;
}
if (isHeifHeader(headerBytes, headerSize)) {
return DefaultImageFormats.HEIF;
}
if (isDngHeader(headerBytes, headerSize)) {
return DefaultImageFormats.DNG;
}
return ImageFormat.UNKNOWN;
}
/**
* Every JPEG image should start with SOI mark (0xFF, 0xD8) followed by beginning of another
* segment (0xFF)
*/
private static final byte[] JPEG_HEADER = new byte[] {(byte) 0xFF, (byte) 0xD8, (byte) 0xFF};
/**
* Checks if imageHeaderBytes starts with SOI (start of image) marker, followed by 0xFF. If
* headerSize is lower than 3 false is returned. Description of jpeg format can be found here: <a
* href="http://www.w3.org/Graphics/JPEG/itu-t81.pdf">
* http://www.w3.org/Graphics/JPEG/itu-t81.pdf</a> Annex B deals with compressed data format
*/
private static boolean isJpegHeader(final byte[] imageHeaderBytes, final int headerSize) {
return headerSize >= JPEG_HEADER.length
&& ImageFormatCheckerUtils.startsWithPattern(imageHeaderBytes, JPEG_HEADER);
}
}
更多的资源加载器
通过资源的 URI Scheme 来选择合适的加载器,NetworkFetchProducer
应该是最常用加载器之一了,它从网络获取资源,此外还有 LocalResourceFetchProducer
、LocalFileFetchProducer
等
uri | producer | core api |
---|---|---|
https://abc.com/avatar.jpg | NetworkFetchProducer | OkHttp & Volley & HttpUrlConnection |
res://com.example/75281679 | LocalResourceFetchProducer | Resources.openRawResource(resId) authority 部分是忽略掉的,直接取 path 作为资源 ID |
asset://com.example/default.jpg | LocalAssetFetchProducer | AssetManager.open authority 部分是忽略掉的,直接取 path 作为 asset path |
file:///sdcard/DCIM/avatar.jpg | LocalFileFetchProducer | File API |
content://media/images/2283 | LocalContentUriFetchProducer LocalThumbnailBitmapProducer |
ContentResolver.openInputStream/loadThumbnail |
data://base64 | DataFetchProducer | Base64.decode |
android.resource:// | QualifiedResourceFetchProducer | ContentResolver.openInputStream |
class ImageRequest {
private static @SourceUriType int getSourceUriType(final Uri uri) {
if (uri == null) {
return SOURCE_TYPE_UNKNOWN;
}
if (UriUtil.isNetworkUri(uri)) { // http:// & https://
return SOURCE_TYPE_NETWORK;
} else if (UriUtil.isLocalFileUri(uri)) { // file://
if (MediaUtils.isVideo(MediaUtils.extractMime(uri.getPath()))) { // 通过文件后缀名判断是 image 还是 video
return SOURCE_TYPE_LOCAL_VIDEO_FILE;
} else {
return SOURCE_TYPE_LOCAL_IMAGE_FILE;
}
} else if (UriUtil.isLocalContentUri(uri)) { // content://
return SOURCE_TYPE_LOCAL_CONTENT;
} else if (UriUtil.isLocalAssetUri(uri)) { // asset://
return SOURCE_TYPE_LOCAL_ASSET;
} else if (UriUtil.isLocalResourceUri(uri)) { // res://
return SOURCE_TYPE_LOCAL_RESOURCE;
} else if (UriUtil.isDataUri(uri)) { // data://
return SOURCE_TYPE_DATA;
} else if (UriUtil.isQualifiedResourceUri(uri)) { // android.resource://
return SOURCE_TYPE_QUALIFIED_RESOURCE;
} else {
return SOURCE_TYPE_UNKNOWN;
}
}
}
磁盘缓存
磁盘缓存这一 feature 由 ImagePipelineConfig.Builder.setDiskCacheEnabled
开启,相关的 ImagePipeline
节点是紧邻着 NetworkFetchProducer
等头节点之后被添加的,包括三个个节点(按添加顺序):
- PartialDiskCacheProducer
- DiskCacheWriteProducer
- DiskCacheReadProducer
添加 ImagePipeline 节点的方法栈如下:
SimpleDraweeView.setImageURI(uri)
SimpleDraweeView.setImageURI(uri, null)
AbstractDraweeControllerBuilder.build
AbstractDraweeControllerBuilder.buildController
PipelineDraweeControllerBuilder.obtainController
AbstractDraweeControllerBuilder.obtainDataSourceSupplier
AbstractDraweeControllerBuilder.getDataSourceSupplierForRequest(controller, controllerId, imageRequest)
AbstractDraweeControllerBuilder.getDataSourceSupplierForRequest(controller, controllerId, imageRequest, CacheLevel.FULL_FETCH)
PipelineDraweeControllerBuilder.getDataSourceForRequest
ImagePipeline.fetchDecodedImage
ProducerSequenceFactory.getDecodedImageProducerSequence
ProducerSequenceFactory.getBasicDecodedImageSequence
ProducerSequenceFactory.getNetworkFetchSequence
ProducerSequenceFactory.getCommonNetworkFetchToEncodedMemorySequence
ProducerSequenceFactory.newEncodedCacheMultiplexToTranscodeSequence
class ProducerSequenceFactory {
private Producer<EncodedImage> newDiskCacheSequence(Producer<EncodedImage> inputProducer) {
Producer<EncodedImage> cacheWriteProducer;
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection("ProducerSequenceFactory#newDiskCacheSequence");
}
if (mPartialImageCachingEnabled) {
Producer<EncodedImage> partialDiskCacheProducer =
mProducerFactory.newPartialDiskCacheProducer(inputProducer);
cacheWriteProducer = mProducerFactory.newDiskCacheWriteProducer(partialDiskCacheProducer);
} else {
cacheWriteProducer = mProducerFactory.newDiskCacheWriteProducer(inputProducer);
}
DiskCacheReadProducer result = mProducerFactory.newDiskCacheReadProducer(cacheWriteProducer);
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
return result;
}
}
PartialDiskCacheProducer
由 ImagePipelineConfig.Builder.experiment().setPartialImageCachingEnabled
开启
继续前先了解下 HTTP 分段下载 的概念,可以看到分段下载是可以分多段(按需分段)的,但 PartialDiskCacheProducer
不太一样它只能分两段,看看它代码的逻辑:
- 发起 ImagePipeline 请求时(去程)
- 先从 disk cache 里寻找 partial cache
- 如果请求的是 partial content 并且 partial cache 包含这一 range,就不需要执行网络请求了,直接返回缓存
- 否则 partial cache 作为
IS_PARTIAL_RESULT
返回,且执行网络请求获取剩下的内容 - 没有 partial cache 那执行原请求
- 收到响应时(回程)
- response header 里包含
Content-Range
,说明是 partial content,设置IS_PARTIAL_RESULT
标志 - 存在 partial cache 且是 partial response,合并 cache 和 response 后传递给下一节点,删除 partial cache
- 没有 partial cache 且是 partial response,将这部分内容缓存为 partical cache 以便后续合并
- response header 里包含
那么业务逻辑就是这样的:
- 请求一个完整的资源服务器返回部分资源(206,Content-Range),或者请求资源的部分内容(Range 头部),总之就是服务器返回 partial content
- 发现
IS_PARTIAL_RESULT
标志,将 response 缓存起来 - 下次请求此资源时(Range 头部不是必须的),先返回缓存的部分内容,再请求剩下未获取的内容
- 收到响应后(必须包含
Content-Range
,否则不认为是 partial content,没有IS_PARTIAL_RESULT
标志),合并 partial content 和 partial cache 为完整的资源返回,删除 partial cache
问题就来了,什么场景下会只请求部分内容(Range 头部)、请求一个完整的资源服务器却只返回部分资源(206,Content-Range)?不是很能理解它的使用场景
class PartialDiskCacheProducer {
public void produceResults(
final Consumer<EncodedImage> consumer, final ProducerContext producerContext) {
final ImageRequest imageRequest = producerContext.getImageRequest();
final boolean isDiskCacheEnabledForRead =
producerContext
.getImageRequest()
.isCacheEnabled(ImageRequest.CachesLocationsMasks.DISK_READ);
final ProducerListener2 listener = producerContext.getProducerListener();
listener.onProducerStart(producerContext, PRODUCER_NAME);
final Uri uriForPartialCacheKey = createUriForPartialCacheKey(imageRequest);
final CacheKey partialImageCacheKey =
mCacheKeyFactory.getEncodedCacheKey(
imageRequest, uriForPartialCacheKey, producerContext.getCallerContext());
if (!isDiskCacheEnabledForRead) {
listener.onProducerFinishWithSuccess(
producerContext, PRODUCER_NAME, getExtraMap(listener, producerContext, false, 0));
startInputProducer(consumer, producerContext, partialImageCacheKey, null);
return;
}
// 发起请求时,先从 disk cache 里找 partial cache
final AtomicBoolean isCancelled = new AtomicBoolean(false);
final Task<EncodedImage> diskLookupTask =
mDefaultBufferedDiskCache.get(partialImageCacheKey, isCancelled);
final Continuation<EncodedImage, Void> continuation =
onFinishDiskReads(consumer, producerContext, partialImageCacheKey);
diskLookupTask.continueWith(continuation);
subscribeTaskForRequestCancellation(isCancelled, producerContext);
}
private Continuation<EncodedImage, Void> onFinishDiskReads(
final Consumer<EncodedImage> consumer,
final ProducerContext producerContext,
final CacheKey partialImageCacheKey) {
final ProducerListener2 listener = producerContext.getProducerListener();
return new Continuation<EncodedImage, Void>() {
@Override
public Void then(Task<EncodedImage> task) throws Exception {
if (isTaskCancelled(task)) {
listener.onProducerFinishWithCancellation(producerContext, PRODUCER_NAME, null);
consumer.onCancellation();
} else if (task.isFaulted()) {
listener.onProducerFinishWithFailure(
producerContext, PRODUCER_NAME, task.getError(), null);
startInputProducer(consumer, producerContext, partialImageCacheKey, null);
} else {
EncodedImage cachedReference = task.getResult();
if (cachedReference != null) {
listener.onProducerFinishWithSuccess(
producerContext,
PRODUCER_NAME,
getExtraMap(listener, producerContext, true, cachedReference.getSize()));
final BytesRange cachedRange = BytesRange.toMax(cachedReference.getSize() - 1);
cachedReference.setBytesRange(cachedRange);
// Create a new ImageRequest for the remaining data
final int cachedLength = cachedReference.getSize();
final ImageRequest originalRequest = producerContext.getImageRequest();
// 如果请求的是 partial content 并且 partial cache 包含这一 range,就不需要执行网络请求了,直接返回缓存
if (cachedRange.contains(originalRequest.getBytesRange())) {
producerContext.putOriginExtra("disk", "partial");
listener.onUltimateProducerReached(producerContext, PRODUCER_NAME, true);
consumer.onNewResult(cachedReference, Consumer.IS_LAST | Consumer.IS_PARTIAL_RESULT);
} else {
// 否则 partial cache 作为 IS_PARTIAL_RESULT 返回,且执行网络请求获取剩下的内容
consumer.onNewResult(cachedReference, Consumer.IS_PARTIAL_RESULT);
// Pass the request on, but only for the remaining bytes
final ImageRequest remainingRequest =
ImageRequestBuilder.fromRequest(originalRequest)
.setBytesRange(BytesRange.from(cachedLength - 1))
.build();
final SettableProducerContext contextForRemainingRequest =
new SettableProducerContext(remainingRequest, producerContext);
startInputProducer(
consumer, contextForRemainingRequest, partialImageCacheKey, cachedReference);
}
} else {
// 没有 partial cache 那执行原请求
listener.onProducerFinishWithSuccess(
producerContext, PRODUCER_NAME, getExtraMap(listener, producerContext, false, 0));
startInputProducer(consumer, producerContext, partialImageCacheKey, cachedReference);
}
}
return null;
}
};
}
}
class OkHttpNetworkFetcher {
protected void fetchWithRequest(
final OkHttpNetworkFetchState fetchState,
final NetworkFetcher.Callback callback,
final Request request) {
final Call call = mCallFactory.newCall(request);
fetchState
.getContext()
.addCallbacks(
new BaseProducerContextCallbacks() {
@Override
public void onCancellationRequested() {
if (Looper.myLooper() != Looper.getMainLooper()) {
call.cancel();
} else {
mCancellationExecutor.execute(
new Runnable() {
@Override
public void run() {
call.cancel();
}
});
}
}
});
call.enqueue(
new okhttp3.Callback() {
@Override
public void onResponse(Call call, Response response) throws IOException {
fetchState.responseTime = SystemClock.elapsedRealtime();
final ResponseBody body = response.body();
if (body == null) {
handleException(call, new IOException("Response body null: " + response), callback);
return;
}
try {
if (!response.isSuccessful()) {
handleException(
call, new IOException("Unexpected HTTP code " + response), callback);
return;
}
// response header 里包含 Content-Range,说明是 partial content,设置 IS_PARTIAL_RESULT 标志
BytesRange responseRange =
BytesRange.fromContentRangeHeader(response.header("Content-Range"));
if (responseRange != null
&& !(responseRange.from == 0
&& responseRange.to == BytesRange.TO_END_OF_CONTENT)) {
// Only treat as a partial image if the range is not all of the content
fetchState.setResponseBytesRange(responseRange);
fetchState.setOnNewResultStatusFlags(Consumer.IS_PARTIAL_RESULT);
}
long contentLength = body.contentLength();
if (contentLength < 0) {
contentLength = 0;
}
callback.onResponse(body.byteStream(), (int) contentLength);
} catch (Exception e) {
handleException(call, e, callback);
} finally {
body.close();
}
}
@Override
public void onFailure(Call call, IOException e) {
handleException(call, e, callback);
}
});
}
}
class PartialDiskCacheConsumer {
public void onNewResultImpl(@Nullable EncodedImage newResult, @Status int status) {
if (isNotLast(status)) {
// TODO 19247361 Consider merging of non-final results
return;
}
// 存在 partial cache 且是 partial response,合并 cache 和 response 后传递给下一节点,删除 partial cache
if (mPartialEncodedImageFromCache != null
&& newResult != null
&& newResult.getBytesRange() != null) {
try {
final PooledByteBufferOutputStream pooledOutputStream =
merge(mPartialEncodedImageFromCache, newResult);
sendFinalResultToConsumer(pooledOutputStream);
} catch (IOException e) {
// TODO 19247425 Delete cached file and request full image
FLog.e(PRODUCER_NAME, "Error while merging image data", e);
getConsumer().onFailure(e);
} finally {
newResult.close();
mPartialEncodedImageFromCache.close();
}
mDefaultBufferedDiskCache.remove(mPartialImageCacheKey);
// 没有 partial cache 且是 partial response,将这部分内容缓存为 partical cache 以便后续合并
} else if (mIsDiskCacheEnabledForWrite
&& statusHasFlag(status, IS_PARTIAL_RESULT)
&& isLast(status)
&& newResult != null
&& newResult.getImageFormat() != ImageFormat.UNKNOWN) {
mDefaultBufferedDiskCache.put(mPartialImageCacheKey, newResult);
getConsumer().onNewResult(newResult, status);
} else {
getConsumer().onNewResult(newResult, status);
}
}
}
read && write
包含两个节点 DiskCacheWriteProducer
和 DiskCacheReadProducer
,它们按顺序被添加到 ImagePipeline,那么:
- 去程:DiskCacheReadProducer.produceResults -> DiskCacheWriteProducer.produceResults
- 回程:DiskCacheWriteProducer.onNewResult -> DiskCacheReadProducer.onNewResult
发起加载请求是,disk reader 从磁盘缓存里找是否有对应的缓存文件,有的话就无须做网络请求,加载缓存文件至内存后返回;因为是先添加 disk reader 后添加 disk writer 的,请求被 disk reader 截胡后就不经过 disk writer 和 network fetcher 了
如果没能找到磁盘缓存,请求流转到 network fetcher,拿到 response 后 disk writer 将其保存为磁盘缓存
class DiskCacheReadProducer {
public void produceResults(
final Consumer<EncodedImage> consumer, final ProducerContext producerContext) {
final ImageRequest imageRequest = producerContext.getImageRequest();
final boolean isDiskCacheEnabledForRead =
producerContext
.getImageRequest()
.isCacheEnabled(ImageRequest.CachesLocationsMasks.DISK_READ);
if (!isDiskCacheEnabledForRead) {
maybeStartInputProducer(consumer, producerContext);
return;
}
producerContext.getProducerListener().onProducerStart(producerContext, PRODUCER_NAME);
final CacheKey cacheKey =
mCacheKeyFactory.getEncodedCacheKey(imageRequest, producerContext.getCallerContext());
final boolean isSmallRequest = (imageRequest.getCacheChoice() == CacheChoice.SMALL);
final BufferedDiskCache preferredCache =
isSmallRequest ? mSmallImageBufferedDiskCache : mDefaultBufferedDiskCache;
final AtomicBoolean isCancelled = new AtomicBoolean(false);
final Task<EncodedImage> diskLookupTask = preferredCache.get(cacheKey, isCancelled); // 去程,从磁盘缓存里查找
final Continuation<EncodedImage, Void> continuation =
onFinishDiskReads(consumer, producerContext);
diskLookupTask.continueWith(continuation);
subscribeTaskForRequestCancellation(isCancelled, producerContext);
}
private Continuation<EncodedImage, Void> onFinishDiskReads(
final Consumer<EncodedImage> consumer, final ProducerContext producerContext) {
final ProducerListener2 listener = producerContext.getProducerListener();
return new Continuation<EncodedImage, Void>() {
@Override
public Void then(Task<EncodedImage> task) throws Exception {
if (isTaskCancelled(task)) {
listener.onProducerFinishWithCancellation(producerContext, PRODUCER_NAME, null);
consumer.onCancellation();
} else if (task.isFaulted()) {
listener.onProducerFinishWithFailure(
producerContext, PRODUCER_NAME, task.getError(), null);
mInputProducer.produceResults(consumer, producerContext);
} else {
EncodedImage cachedReference = task.getResult();
if (cachedReference != null) { // 找到磁盘缓存直接将去程截了,不继续流向下一节点(网络请求)
listener.onProducerFinishWithSuccess(
producerContext,
PRODUCER_NAME,
getExtraMap(listener, producerContext, true, cachedReference.getSize()));
listener.onUltimateProducerReached(producerContext, PRODUCER_NAME, true);
producerContext.putOriginExtra("disk");
consumer.onProgressUpdate(1);
consumer.onNewResult(cachedReference, Consumer.IS_LAST);
cachedReference.close();
} else { // 否则将请求传递给下一节点
listener.onProducerFinishWithSuccess(
producerContext, PRODUCER_NAME, getExtraMap(listener, producerContext, false, 0));
mInputProducer.produceResults(consumer, producerContext);
}
}
return null;
}
};
}
}
class DiskCacheWriteConsumer {
public void onNewResultImpl(@Nullable EncodedImage newResult, @Status int status) {
mProducerContext.getProducerListener().onProducerStart(mProducerContext, PRODUCER_NAME);
// intermediate, null or uncacheable results are not cached, so we just forward them
// as well as the images with unknown format which could be html response from the server
if (isNotLast(status)
|| newResult == null
|| statusHasAnyFlag(status, DO_NOT_CACHE_ENCODED | IS_PARTIAL_RESULT)
|| newResult.getImageFormat() == ImageFormat.UNKNOWN) {
mProducerContext
.getProducerListener()
.onProducerFinishWithSuccess(mProducerContext, PRODUCER_NAME, null);
getConsumer().onNewResult(newResult, status);
return;
}
final ImageRequest imageRequest = mProducerContext.getImageRequest();
final CacheKey cacheKey =
mCacheKeyFactory.getEncodedCacheKey(imageRequest, mProducerContext.getCallerContext());
if (imageRequest.getCacheChoice() == ImageRequest.CacheChoice.SMALL) { // 将 response 缓存起来
mSmallImageBufferedDiskCache.put(cacheKey, newResult);
} else {
mDefaultBufferedDiskCache.put(cacheKey, newResult);
}
mProducerContext
.getProducerListener()
.onProducerFinishWithSuccess(mProducerContext, PRODUCER_NAME, null);
getConsumer().onNewResult(newResult, status);
}
}
内存缓存(encoded)
添加 EncodedMemoryCacheProducer
节点以实现内存缓存,这里缓存的对象是 encoded
,也就是被图像压缩算法编码后的、各种图像格式的原始数据,尚未被 decode 为 Bitmap
去程
时从内存缓存里找,命中直接返回内存数据,未命中则交由下个节点处理回程
时将原始数据添加到内存缓存中,然后继续往下传递原始数据
默认开启,由 ImageRequestBuilder.disableMemoryCache
关闭
class EncodedMemoryCacheProducer {
public void produceResults(
final Consumer<EncodedImage> consumer, final ProducerContext producerContext) {
try {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection("EncodedMemoryCacheProducer#produceResults");
}
final ProducerListener2 listener = producerContext.getProducerListener();
listener.onProducerStart(producerContext, PRODUCER_NAME);
final ImageRequest imageRequest = producerContext.getImageRequest();
final CacheKey cacheKey =
mCacheKeyFactory.getEncodedCacheKey(imageRequest, producerContext.getCallerContext());
final boolean isEncodedCacheEnabledForRead =
producerContext
.getImageRequest()
.isCacheEnabled(ImageRequest.CachesLocationsMasks.ENCODED_READ);
CloseableReference<PooledByteBuffer> cachedReference =
isEncodedCacheEnabledForRead ? mMemoryCache.get(cacheKey) : null;
try {
if (cachedReference != null) { // 命中 encoded 内存缓存,将请求截胡,直接返回内存中的数据
EncodedImage cachedEncodedImage = new EncodedImage(cachedReference);
try {
listener.onProducerFinishWithSuccess(
producerContext,
PRODUCER_NAME,
listener.requiresExtraMap(producerContext, PRODUCER_NAME)
? ImmutableMap.of(EXTRA_CACHED_VALUE_FOUND, "true")
: null);
listener.onUltimateProducerReached(producerContext, PRODUCER_NAME, true);
producerContext.putOriginExtra("memory_encoded");
consumer.onProgressUpdate(1f);
consumer.onNewResult(cachedEncodedImage, Consumer.IS_LAST);
return;
} finally {
EncodedImage.closeSafely(cachedEncodedImage);
}
}
if (producerContext.getLowestPermittedRequestLevel().getValue()
>= ImageRequest.RequestLevel.ENCODED_MEMORY_CACHE.getValue()) {
listener.onProducerFinishWithSuccess(
producerContext,
PRODUCER_NAME,
listener.requiresExtraMap(producerContext, PRODUCER_NAME)
? ImmutableMap.of(EXTRA_CACHED_VALUE_FOUND, "false")
: null);
listener.onUltimateProducerReached(producerContext, PRODUCER_NAME, false);
producerContext.putOriginExtra("memory_encoded", "nil-result");
consumer.onNewResult(null, Consumer.IS_LAST);
return;
}
Consumer consumerOfInputProducer =
new EncodedMemoryCacheConsumer(
consumer,
mMemoryCache,
cacheKey,
producerContext
.getImageRequest()
.isCacheEnabled(ImageRequest.CachesLocationsMasks.ENCODED_WRITE),
producerContext.getImagePipelineConfig().getExperiments().isEncodedCacheEnabled());
listener.onProducerFinishWithSuccess(
producerContext,
PRODUCER_NAME,
listener.requiresExtraMap(producerContext, PRODUCER_NAME)
? ImmutableMap.of(EXTRA_CACHED_VALUE_FOUND, "false")
: null);
mInputProducer.produceResults(consumerOfInputProducer, producerContext);
} finally {
CloseableReference.closeSafely(cachedReference);
}
} finally {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
}
}
}
class EncodedMemoryCacheConsumer {
public void onNewResultImpl(@Nullable EncodedImage newResult, @Status int status) {
try {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection("EncodedMemoryCacheProducer#onNewResultImpl");
}
// intermediate, null or uncacheable results are not cached, so we just forward them
// as well as the images with unknown format which could be html response from the server
if (isNotLast(status)
|| newResult == null
|| statusHasAnyFlag(status, DO_NOT_CACHE_ENCODED | IS_PARTIAL_RESULT)
|| newResult.getImageFormat() == ImageFormat.UNKNOWN) {
getConsumer().onNewResult(newResult, status);
return;
}
// cache and forward the last result
CloseableReference<PooledByteBuffer> ref = newResult.getByteBufferRef();
if (ref != null) {
CloseableReference<PooledByteBuffer> cachedResult = null;
try {
if (mEncodedCacheEnabled && mIsEncodedCacheEnabledForWrite) { // 将原始数据缓存至内存
cachedResult = mMemoryCache.cache(mRequestedCacheKey, ref);
}
} finally {
CloseableReference.closeSafely(ref);
}
if (cachedResult != null) {
EncodedImage cachedEncodedImage;
try {
cachedEncodedImage = new EncodedImage(cachedResult);
cachedEncodedImage.copyMetaDataFrom(newResult);
} finally {
CloseableReference.closeSafely(cachedResult);
}
try {
getConsumer().onProgressUpdate(1f);
getConsumer().onNewResult(cachedEncodedImage, status);
return;
} finally {
EncodedImage.closeSafely(cachedEncodedImage);
}
}
}
getConsumer().onNewResult(newResult, status);
} finally {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
}
}
}
内存缓存(decoded, Bitmap)
BitmapMemoryCacheProducer
实现了 Bitmap 的内存缓存,整个逻辑比较简单:
- 默认开启,可以通过
ImageRequestBuilder.disableMemoryCache
关闭 - 去程时从缓存里读,命中的话截胡,返回缓存在内存中的 Bitmap
- 回程时写入缓存
class BitmapMemoryCacheProducer {
public void produceResults(
final Consumer<CloseableReference<CloseableImage>> consumer,
final ProducerContext producerContext) {
try {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection("BitmapMemoryCacheProducer#produceResults");
}
final ProducerListener2 listener = producerContext.getProducerListener();
listener.onProducerStart(producerContext, getProducerName());
final ImageRequest imageRequest = producerContext.getImageRequest();
final Object callerContext = producerContext.getCallerContext();
final CacheKey cacheKey = mCacheKeyFactory.getBitmapCacheKey(imageRequest, callerContext);
final boolean isBitmapCacheEnabledForRead =
producerContext
.getImageRequest()
.isCacheEnabled(ImageRequest.CachesLocationsMasks.BITMAP_READ);
CloseableReference<CloseableImage> cachedReference =
isBitmapCacheEnabledForRead ? mMemoryCache.get(cacheKey) : null;
if (cachedReference != null) {
maybeSetExtrasFromCloseableImage(cachedReference.get(), producerContext);
boolean isFinal = cachedReference.get().getQualityInfo().isOfFullQuality();
if (isFinal) {
listener.onProducerFinishWithSuccess(
producerContext,
getProducerName(),
listener.requiresExtraMap(producerContext, getProducerName())
? ImmutableMap.of(EXTRA_CACHED_VALUE_FOUND, "true")
: null);
listener.onUltimateProducerReached(producerContext, getProducerName(), true);
producerContext.putOriginExtra("memory_bitmap", getOriginSubcategory());
consumer.onProgressUpdate(1f);
}
consumer.onNewResult(cachedReference, BaseConsumer.simpleStatusForIsLast(isFinal));
cachedReference.close();
if (isFinal) {
return;
}
}
// ...
}
protected Consumer<CloseableReference<CloseableImage>> wrapConsumer(
final Consumer<CloseableReference<CloseableImage>> consumer,
final CacheKey cacheKey,
final boolean isBitmapCacheEnabledForWrite) {
return new DelegatingConsumer<
CloseableReference<CloseableImage>, CloseableReference<CloseableImage>>(consumer) {
@Override
public void onNewResultImpl(
@Nullable CloseableReference<CloseableImage> newResult, @Status int status) {
try {
// ...
// cache, if needed, and forward the new result
CloseableReference<CloseableImage> newCachedResult = null;
if (isBitmapCacheEnabledForWrite) {
newCachedResult = mMemoryCache.cache(cacheKey, newResult);
}
try {
if (isLast) getConsumer().onProgressUpdate(1f);
getConsumer().onNewResult((newCachedResult != null) ? newCachedResult : newResult, status);
} finally { CloseableReference.closeSafely(newCachedResult); }
} finally {
if (FrescoSystrace.isTracing()) FrescoSystrace.endSection();
}
}
};
}
}
LruCountingMemoryCache
内存缓存(包括 encoded 和 decoded)的实现是 LruCountingMemoryCache<CacheKey, CloseableImage>
,它有一些参数(可通过 ImagePipelineConfig.Builder.setBitmapMemoryCacheParamsSupplier
配置):
Name | Description | 默认值 |
---|---|---|
MemoryCacheParams.maxCacheSize | 整个缓存的容量(字节),只包括使用中的 | 堆内存 < 32M 则取 4M,< 64M 则取 6M,Android 3.0 HONEYCOMB API 11 以下则取 8M,否则取 1/4 堆内存大小 |
MemoryCacheParams.maxCacheEntries | 整个缓存里 Entry 数量的上限,只包括使用中的 | 256 |
MemoryCacheParams.maxEvictionQueueSize | eviction queue 的容量上限 | Integer.MAX_VALUE |
MemoryCacheParams.maxEvictionQueueEntries | eviction queue 里 Entry 数量的上限 | Integer.MAX_VALUE |
MemoryCacheParams.maxCacheEntrySize | 单个 Entry 的大小上限,超过此阈值不缓存 | Integer.MAX_VALUE |
eviction queue 指没有被使用、可被安全回收(evict)的缓存对象的集合
核心数据结构、也是实现 LRU
算法和 key-value 映射的是 LinkedHashMap,它内部有两个 Map:
mCachedEntries
:存放所有的缓存条目mExclusiveEntries
:存放空闲的、未被使用、可被安全回收的缓存条目,它是 mCachedEntries 的一个子集,也是上面所说的eviction queue
通过 cache(key, valueRef)
放入缓存或者 get(key)
取得缓存对象,会返回一个 CloseableReference
,直到 CloseableReference.close
释放/回收缓存对象前,此时缓存条目都只存在于 mCachedEntries
;只有当缓存条目不再被任何人持有/引用(CountingMemoryCache.Entry.clientCount == 0
),它才会被添加到 mExclusiveEntries
,表示它可以被安全释放
返回 CloseableReference (需要通过 close
释放)这一特性使它跟一般的 LRU 缓存有所不同:
maxCacheSize
容量上限和maxCacheEntries
Entry 数量上限,针对的是使用中的 Entry 集合,也即 mCachedEntries - mExclusiveEntries,这是因为 mExclusiveEntries 是空闲的 Entry 集合,没有人引用意味这可以被evict
,新增缓存条目后可以通过 evict 淘汰空闲的 Entry 以使缓存满足约束- Entry 是可以被多人持有的(被多人借出,用一个引用计数
Entry.clientCount
表示借出的次数,归还后减一) - 取出的 Entry 用完后执行下 close 就可以了,由缓存决定是继续保留这个 Entry 还是淘汰掉(满足容量等限制条件时会保留在空闲集合中,超过容量限制时如果引用计数为零则释放掉,否则等引用计数归零再释放)
- Entry 有
isOrphan
和clientCount
两个特征,isOrphan == true 表示 Entry 已经从缓存中移除,但此时如果还有人持有这个 Entry(Entry.clientCount > 0)则暂时还不能释放它,要等到引用计数归零才能释放,有些情况会导致引用计数不为零时强行将 Entry 变为孤儿:- 放入相同的 key 导致旧 Entry 被强制移除出缓存
- 手动执行
MemoryCache.removeAll
、CountingMemoryCache.clear
将 Entry 从缓存中移除 - 内存不足的情况下执行
MemoryTrimmable.trim(trimType)
强制移除一定比例的 Entry
class LruCountingMemoryCache {
// Contains the items that are not being used by any client and are hence viable for eviction.
final CountingLruMap<K, Entry<K, V>> mExclusiveEntries;
// Contains all the cached items including the exclusively owned ones.
final CountingLruMap<K, Entry<K, V>> mCachedEntries;
public CloseableReference<V> cache(final K key, final CloseableReference<V> valueRef)
public CloseableReference<V> get(final K key)
}
添加缓存的逻辑:
- key 下有无使用中的旧值,有的话将其标识为孤儿(
Entry.isOrphan = true
),那么当旧值用完、调用CloseableReference.close
时(最终会调用LruCountingMemoryCache.releaseClientReference(entry)
)就不会再添加到mExclusiveEntries
而是释放对象,因为此 key 对应的值已被替换为新值 - 当满足以下条件时,value 被添加到
mCachedEntries
,新添加的 Entry 的初始值 isOrphan == false && clientCount == 1,解释下:不是孤儿 Entry 且引用计数为 1 - value size 要小于等于
maxCacheEntrySize
- 使用中的 Entry 数量要小于
maxCacheEntries
,使用中
是指存在于mCachedEntries
但不在mExclusiveEntries
- 使用中的 Entry 的总大小加上 value 大小要小于等于
maxCacheSize
- 执行
evict
逻辑,淘汰/释放一些mExclusiveEntries
中的 Entry 以满足约束条件 - 返回一个
CloseableReference
,当它的CloseableReference.close
被调用的同时LruCountingMemoryCache.releaseClientReference(entry)
也被执行
class LruCountingMemoryCache {
public @Nullable CloseableReference<V> cache(
final K key,
final CloseableReference<V> valueRef,
final @Nullable EntryStateObserver<K> observer) {
Preconditions.checkNotNull(key);
Preconditions.checkNotNull(valueRef);
maybeUpdateCacheParams();
Entry<K, V> oldExclusive;
CloseableReference<V> oldRefToClose = null;
CloseableReference<V> clientRef = null;
synchronized (this) {
// remove the old item (if any) as it is stale now
oldExclusive = mExclusiveEntries.remove(key);
Entry<K, V> oldEntry = mCachedEntries.remove(key);
if (oldEntry != null) {
makeOrphan(oldEntry);
oldRefToClose = referenceToClose(oldEntry);
}
V value = valueRef.get();
int size = mValueDescriptor.getSizeInBytes(value);
if (canCacheNewValueOfSize(size)) {
Entry<K, V> newEntry;
if (mStoreEntrySize) {
newEntry = Entry.of(key, valueRef, size, observer);
} else {
newEntry = Entry.of(key, valueRef, observer);
}
mCachedEntries.put(key, newEntry);
clientRef = newClientReference(newEntry);
}
}
CloseableReference.closeSafely(oldRefToClose);
maybeNotifyExclusiveEntryRemoval(oldExclusive);
maybeEvictEntries();
return clientRef;
}
private synchronized void makeOrphan(Entry<K, V> entry) {
Preconditions.checkNotNull(entry);
Preconditions.checkState(!entry.isOrphan);
entry.isOrphan = true;
}
private synchronized boolean canCacheNewValueOfSize(int newValueSize) {
return (newValueSize <= mMemoryCacheParams.maxCacheEntrySize)
&& (getInUseCount() <= mMemoryCacheParams.maxCacheEntries - 1)
&& (getInUseSizeInBytes() <= mMemoryCacheParams.maxCacheSize - newValueSize);
}
private synchronized CloseableReference<V> newClientReference(final Entry<K, V> entry) {
increaseClientCount(entry);
return CloseableReference.of(
entry.valueRef.get(),
new ResourceReleaser<V>() {
@Override
public void release(V unused) {
releaseClientReference(entry);
}
});
}
}
释放一个 Entry 的逻辑:
- 引用数减一(
Entry.clientCount--
) - 如果没有被引用且不是孤儿(
Entry.isOrphan == false
),说明此 Entry 没有从 MemoryCache 中被移除(evict
或者被相同 key 的新值覆盖都会导致 Entry 被移除,成为孤儿),添加到空闲集合mExclusiveEntries
- 如果没有被引用且是孤儿,则将引用置空释放对象
- 往空闲集合添加了元素后可能会不符合 MemoryCache 的约束条件,需要执行一次
evict
逻辑
private void releaseClientReference(final Entry<K, V> entry) {
Preconditions.checkNotNull(entry);
boolean isExclusiveAdded;
CloseableReference<V> oldRefToClose;
synchronized (this) {
decreaseClientCount(entry);
isExclusiveAdded = maybeAddToExclusives(entry);
oldRefToClose = referenceToClose(entry);
}
CloseableReference.closeSafely(oldRefToClose);
maybeNotifyExclusiveEntryInsertion(isExclusiveAdded ? entry : null);
maybeUpdateCacheParams();
maybeEvictEntries();
}
淘汰逻辑(evict
),针对的是存在于 mExclusiveEntries
中的无人引用的 Entry:
- 两个方面的约束:Entry 的数量和 Entry 的总大小,从多个阈值中取最小值:
- maxEvictionQueueEntries/maxEvictionQueueSize,专门针对空闲 Entry 的约束
- maxCacheEntries/maxCacheSize,针对整个 memory cache(包括空闲的 Entry 和使用中的 Entry)的约束,减去使用中的就是针对空闲 Entry 的约束
- 从
mExclusiveEntries
逐个剔除 Entry 直到满足上面的容量和大小的约束条件,而剔除的顺序就是LinkedHashMap
迭代器的顺序,上面说过最终顺序是按访问排序(也就实现了LRU
) - 将剔除的 Entry 集合标识为
孤儿
并删除(CloseableReference.close
最终会调用LruCountingMemoryCache.releaseClientReference(entry)
)
class LruCountingMemoryCache {
public void maybeEvictEntries() {
ArrayList<Entry<K, V>> oldEntries;
synchronized (this) {
int maxCount =
Math.min(
mMemoryCacheParams.maxEvictionQueueEntries,
mMemoryCacheParams.maxCacheEntries - getInUseCount());
int maxSize =
Math.min(
mMemoryCacheParams.maxEvictionQueueSize,
mMemoryCacheParams.maxCacheSize - getInUseSizeInBytes());
oldEntries = trimExclusivelyOwnedEntries(maxCount, maxSize);
makeOrphans(oldEntries);
}
maybeClose(oldEntries);
maybeNotifyExclusiveEntryRemoval(oldEntries);
}
private synchronized ArrayList<Entry<K, V>> trimExclusivelyOwnedEntries(int count, int size) {
count = Math.max(count, 0);
size = Math.max(size, 0);
// fast path without array allocation if no eviction is necessary
if (mExclusiveEntries.getCount() <= count && mExclusiveEntries.getSizeInBytes() <= size) {
return null;
}
ArrayList<Entry<K, V>> oldEntries = new ArrayList<>();
while (mExclusiveEntries.getCount() > count || mExclusiveEntries.getSizeInBytes() > size) {
@Nullable K key = mExclusiveEntries.getFirstKey();
if (key == null) {
if (mIgnoreSizeMismatch) {
mExclusiveEntries.resetSize();
break;
}
throw new IllegalStateException(
String.format(
"key is null, but exclusiveEntries count: %d, size: %d",
mExclusiveEntries.getCount(), mExclusiveEntries.getSizeInBytes()));
}
mExclusiveEntries.remove(key);
oldEntries.add(mCachedEntries.remove(key));
}
return oldEntries;
}
}
class CountingLruMap<K, V> {
private final LinkedHashMap<K, V> mMap = new LinkedHashMap<>();
public synchronized K getFirstKey() {
return mMap.isEmpty() ? null : mMap.keySet().iterator().next();
}
}
取缓存时,是直接从 mExclusiveEntries
删除,CloseableReference.close
的时候再重新插入到 mExclusiveEntries
里,所以虽然 mExclusiveEntries
里的 LinkedHashMap
是按插入排序的,但从 MemoryCache 的角度看 mExclusiveEntries
是按访问排序的,evict
时就从最久未访问的 Entry 迭代起
public CloseableReference<V> get(final K key) {
Preconditions.checkNotNull(key);
Entry<K, V> oldExclusive;
CloseableReference<V> clientRef = null;
synchronized (this) {
oldExclusive = mExclusiveEntries.remove(key);
Entry<K, V> entry = mCachedEntries.get(key);
if (entry != null) {
clientRef = newClientReference(entry);
}
}
maybeNotifyExclusiveEntryRemoval(oldExclusive);
maybeUpdateCacheParams();
maybeEvictEntries();
return clientRef;
}
此外 LruCountingMemoryCache
还提供了以下方法
API | Description |
---|---|
inspect(key) | 返回 key 对应的缓存,但不影响它在 LRU 中的次序 |
probe(key) | 碰一下 key 对应的缓存但不获取,这个操作主要是改变它在 LRU 中的次序 |
reuse(key) | 从 MemoryCache 里获取并移除 key 对应的缓存,这个缓存必须是空闲的(存在于 mExclusiveEntries ) |
maybeEvictEntries | 手动淘汰一些 Entry |
getCount() | Entry 总数,包括空闲和使用的 Entry |
getSizeInBytes() | MemoryCache 的总大小,包括空闲和使用的 Entry |
绑定 View 生命周期
先让我们来看看 Fresco 是如何处理和保存 ImageRequest:
- 通过
ProducerSequenceFactory.getEncodedImageProducerSequence
,ImageRequest
转变为一个合适的图像流水线(ImagePipeline
,实际上是流水线上最后的那个节点) - 将
Producer
适配为DataSource
(就在此时,request 被添加到线程池里开始流水处理) PipelineDraweeController.mDataSourceSupplier
保存了Supplier<DataSource>
,而AbstractDraweeController.mDataSource
保存了DataSource
- DataSource 是一个异步的结果(类似于
Future
),通过订阅获得异步的结果(成功 or 失败),在AbstractDraweeController.submitRequest
订阅之,从而拿到Bitmap
- 也就说一个图像加载请求是保存在
DraweeView.mDraweeHolder.mController.mDataSource
SimpleDraweeView.setImageURI(uri)
SimpleDraweeView.setImageURI(uri, null)
AbstractDraweeControllerBuilder.build
AbstractDraweeControllerBuilder.buildController
PipelineDraweeControllerBuilder.obtainController
AbstractDraweeControllerBuilder.obtainDataSourceSupplier
AbstractDraweeControllerBuilder.getDataSourceSupplierForRequest(controller, controllerId, imageRequest)
AbstractDraweeControllerBuilder.getDataSourceSupplierForRequest(controller, controllerId, imageRequest, cacheLevel)
PipelineDraweeControllerBuilder.getDataSourceForRequest
ImagePipeline.fetchDecodedImage
ImagePipeline.submitFetchRequest
CloseableProducerToDataSourceAdapter.create
// 最终在这里启动流水线,并把流水线包装为 DataSource
// DataSource 其后被包装为 Supplier<DataSource>,保存在 PipelineDraweeController.mDataSourceSupplier
class AbstractProducerToDataSourceAdapter {
protected AbstractProducerToDataSourceAdapter(
Producer<T> producer,
SettableProducerContext settableProducerContext,
RequestListener2 requestListener) {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection("AbstractProducerToDataSourceAdapter()");
}
mSettableProducerContext = settableProducerContext;
mRequestListener = requestListener;
setInitialExtras();
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection("AbstractProducerToDataSourceAdapter()->onRequestStart");
}
mRequestListener.onRequestStart(mSettableProducerContext);
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection("AbstractProducerToDataSourceAdapter()->produceResult");
}
producer.produceResults(createConsumer(), settableProducerContext);
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
}
}
// 继续走下去来到 submitRequest
// 从 PipelineDraweeController.mDataSourceSupplier 获得 DataSource 并将其保存在 AbstractDraweeController.mDataSource
// 订阅 DataSource 从而在 callback 里拿到 Bitmap
DraweeView.setController
DraweeHolder.setController
DraweeHolder.attachController
AbstractDraweeController.onAttach
AbstractDraweeController.submitRequest
class AbstractDraweeController {
protected void submitRequest() {
// ...
mDataSource = getDataSource();
reportSubmit(mDataSource, null);
if (FLog.isLoggable(FLog.VERBOSE)) {
FLog.v(
TAG,
"controller %x %s: submitRequest: dataSource: %x",
System.identityHashCode(this),
mId,
System.identityHashCode(mDataSource));
}
final String id = mId;
final boolean wasImmediate = mDataSource.hasResult();
final DataSubscriber<T> dataSubscriber =
new BaseDataSubscriber<T>() {
@Override
public void onNewResultImpl(DataSource<T> dataSource) {
// isFinished must be obtained before image, otherwise we might set intermediate result
// as final image.
boolean isFinished = dataSource.isFinished();
boolean hasMultipleResults = dataSource.hasMultipleResults();
float progress = dataSource.getProgress();
T image = dataSource.getResult();
if (image != null) {
onNewResultInternal(
id, dataSource, image, progress, isFinished, wasImmediate, hasMultipleResults);
} else if (isFinished) {
onFailureInternal(id, dataSource, new NullPointerException(), /* isFinished */ true);
}
}
@Override
public void onFailureImpl(DataSource<T> dataSource) {
onFailureInternal(id, dataSource, dataSource.getFailureCause(), /* isFinished */ true);
}
@Override
public void onProgressUpdate(DataSource<T> dataSource) {
boolean isFinished = dataSource.isFinished();
float progress = dataSource.getProgress();
onProgressUpdateInternal(id, dataSource, progress, isFinished);
}
};
mDataSource.subscribe(dataSubscriber, mUiThreadImmediateExecutor);
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
}
}
class PipelineDraweeController {
protected DataSource<CloseableReference<CloseableImage>> getDataSource() {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection("PipelineDraweeController#getDataSource");
}
if (FLog.isLoggable(FLog.VERBOSE)) {
FLog.v(TAG, "controller %x: getDataSource", System.identityHashCode(this));
}
DataSource<CloseableReference<CloseableImage>> result = mDataSourceSupplier.get();
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
return result;
}
}
跟 Glide
不同的是,Fresco
并不能直接用在 ImageView
上,而是需要更换 UI 组件为 SimpleDraweeView
,然后通过 SimpleDraweeView.setImageURI(uri, callerContext)
发起图像加载请求,整个过程并没有看到有 Activity/Fragment/LifecycleOwner 等组件参加
通过 DraweeView
可以看出,Fresco image request 绑定的是 UI 层 View
组件的生命周期:onAttachedToWindow
和 onDetachedFromWindow
当 onDetachedFromWindow
触发时,事件会沿着 DraweeView -> DraweeHolder -> DraweeController -> DataSource -> ProducerContext -> ProducerContextCallbacks
的方向传递,凡是注册了 ProducerContextCallbacks
的都能收到 onCancellationRequested
上面介绍过的 ImagePipeline 节点随便拿两个看看(OkHttpNetworkFetcher
和 DiskCacheReadProducer
)都能够发现它们是注册了 callback 的,这样无论 request 流转至哪个节点都能及时响应 cancel 事件停止工作,从而实现 image request 与 View
生命周期的绑定
class DraweeView {
@Override
protected void onAttachedToWindow() {
super.onAttachedToWindow();
maybeOverrideVisibilityHandling();
onAttach();
}
@Override
protected void onDetachedFromWindow() {
super.onDetachedFromWindow();
maybeOverrideVisibilityHandling();
onDetach();
}
@Override
public void onStartTemporaryDetach() {
super.onStartTemporaryDetach();
maybeOverrideVisibilityHandling();
onDetach();
}
@Override
public void onFinishTemporaryDetach() {
super.onFinishTemporaryDetach();
maybeOverrideVisibilityHandling();
onAttach();
}
}
DraweeView.onDetach
DraweeView.doDetach
DraweeHolder.onDetach
DraweeHolder.attachOrDetachController
DraweeHolder.detachController
AbstractDraweeController.onDetach
AbstractDraweeController.release
class AbstractDraweeController {
private void releaseFetch() {
boolean wasRequestSubmitted = mIsRequestSubmitted;
mIsRequestSubmitted = false;
mHasFetchFailed = false;
Map<String, Object> datasourceExtras = null, imageExtras = null;
if (mDataSource != null) {
datasourceExtras = mDataSource.getExtras();
mDataSource.close(); // 关闭并置空 mDataSource
mDataSource = null;
}
if (mDrawable != null) {
releaseDrawable(mDrawable);
}
if (mContentDescription != null) {
mContentDescription = null;
}
mDrawable = null;
if (mFetchedImage != null) {
imageExtras = obtainExtrasFromImage(getImageInfo(mFetchedImage));
logMessageAndImage("release", mFetchedImage);
releaseImage(mFetchedImage);
mFetchedImage = null;
}
if (wasRequestSubmitted) {
reportRelease(datasourceExtras, imageExtras);
}
}
}
AbstractProducerToDataSourceAdapter.close // 还记得上面介绍过 DataSource 是适配自 Producer (ImagePipeline 节点)
BaseProducerContext.cancel
ProducerContextCallbacks.onCancellationRequested // 谁会往这里添加 callback 呢?
class OkHttpNetworkFetcher {
protected void fetchWithRequest(
final OkHttpNetworkFetchState fetchState,
final NetworkFetcher.Callback callback,
final Request request) {
final Call call = mCallFactory.newCall(request);
fetchState
.getContext()
.addCallbacks(
new BaseProducerContextCallbacks() {
@Override
public void onCancellationRequested() { // 注册一个 callback 接收 cancel request 事件
if (Looper.myLooper() != Looper.getMainLooper()) {
call.cancel();
} else {
mCancellationExecutor.execute(
new Runnable() {
@Override
public void run() {
call.cancel();
}
});
}
}
});
call.enqueue(...);
}
}
// 几乎所有的 ImagePipeline 节点都会注册 callback 接收 request cancel 事件
// 这样无论 request 流转至哪个节点,都能及时响应 cancel 事件停止工作
class DiskCacheReadProducer {
public void produceResults(
final Consumer<EncodedImage> consumer, final ProducerContext producerContext) {
final ImageRequest imageRequest = producerContext.getImageRequest();
final boolean isDiskCacheEnabledForRead =
producerContext
.getImageRequest()
.isCacheEnabled(ImageRequest.CachesLocationsMasks.DISK_READ);
if (!isDiskCacheEnabledForRead) {
maybeStartInputProducer(consumer, producerContext);
return;
}
producerContext.getProducerListener().onProducerStart(producerContext, PRODUCER_NAME);
final CacheKey cacheKey =
mCacheKeyFactory.getEncodedCacheKey(imageRequest, producerContext.getCallerContext());
final boolean isSmallRequest = (imageRequest.getCacheChoice() == CacheChoice.SMALL);
final BufferedDiskCache preferredCache =
isSmallRequest ? mSmallImageBufferedDiskCache : mDefaultBufferedDiskCache;
final AtomicBoolean isCancelled = new AtomicBoolean(false);
final Task<EncodedImage> diskLookupTask = preferredCache.get(cacheKey, isCancelled);
final Continuation<EncodedImage, Void> continuation =
onFinishDiskReads(consumer, producerContext);
diskLookupTask.continueWith(continuation);
subscribeTaskForRequestCancellation(isCancelled, producerContext);
}
private void subscribeTaskForRequestCancellation(
final AtomicBoolean isCancelled, ProducerContext producerContext) {
producerContext.addCallbacks(
new BaseProducerContextCallbacks() {
@Override
public void onCancellationRequested() {
isCancelled.set(true);
}
});
}
}
请求错乱的问题
比如在 RecyclerView
里快速地滑动,同一个 DraweeView
会在短时间内多次设置网络图像,但网络加载是需要较长时间的,如何保证旧的 request 不会覆盖掉新的 request?
如下面所示,通过 setImageURI
设置图片的同时会 close
旧的 DataSource
SimpleDraweeView.setImageURI(uri)
SimpleDraweeView.setImageURI(uri, null)
DraweeView.setController
class DraweeHolder {
public void setController(@Nullable DraweeController draweeController) {
boolean wasAttached = mIsControllerAttached;
if (wasAttached) {
detachController();
}
// Clear the old controller
if (isControllerValid()) {
mEventTracker.recordEvent(Event.ON_CLEAR_OLD_CONTROLLER);
mController.setHierarchy(null);
}
mController = draweeController;
if (mController != null) {
mEventTracker.recordEvent(Event.ON_SET_CONTROLLER);
mController.setHierarchy(mHierarchy);
} else {
mEventTracker.recordEvent(Event.ON_CLEAR_CONTROLLER);
}
if (wasAttached) {
attachController();
}
}
}
class AbstractDraweeController {
public void setHierarchy(@Nullable DraweeHierarchy hierarchy) {
if (FLog.isLoggable(FLog.VERBOSE)) {
FLog.v(
TAG, "controller %x %s: setHierarchy: %s", System.identityHashCode(this), mId, hierarchy);
}
mEventTracker.recordEvent(
(hierarchy != null) ? Event.ON_SET_HIERARCHY : Event.ON_CLEAR_HIERARCHY);
// force release in case request was submitted
if (mIsRequestSubmitted) {
mDeferredReleaser.cancelDeferredRelease(this);
release(); // 这里
}
// clear the existing hierarchy
if (mSettableDraweeHierarchy != null) {
mSettableDraweeHierarchy.setControllerOverlay(null);
mSettableDraweeHierarchy = null;
}
// set the new hierarchy
if (hierarchy != null) {
Preconditions.checkArgument(hierarchy instanceof SettableDraweeHierarchy);
mSettableDraweeHierarchy = (SettableDraweeHierarchy) hierarchy;
mSettableDraweeHierarchy.setControllerOverlay(mControllerOverlay);
}
if (mLoggingListener != null) {
setUpLoggingListener();
}
}
}
DecodeProducer - 解码
负责将各种压缩图片格式(JPEG、WEBP 等)解码(比如 Bitmap
)的 ImagePipeline 节点,它通过 SOI
识别其格式并交由对应的 ImageDecoder
处理,方法栈如下:
DecodeProducer.produceResults
ProgressiveDecoder.onNewResultImpl
NetworkImagesProgressiveDecoder.updateDecodeJob
ProgressiveDecoder.doDecode
ProgressiveDecoder.internalDecode
class DefaultImageDecoder {
public CloseableImage decode(
final EncodedImage encodedImage,
final int length,
final QualityInfo qualityInfo,
final ImageDecodeOptions options) {
// 自定义的解码器
if (options.customImageDecoder != null) {
return options.customImageDecoder.decode(encodedImage, length, qualityInfo, options);
}
ImageFormat imageFormat = encodedImage.getImageFormat();
if (imageFormat == null || imageFormat == ImageFormat.UNKNOWN) {
InputStream inputStream = encodedImage.getInputStream();
if (inputStream != null) {
imageFormat = ImageFormatChecker.getImageFormat_WrapIOException(inputStream);
encodedImage.setImageFormat(imageFormat);
}
}
if (mCustomDecoders != null) {
ImageDecoder decoder = mCustomDecoders.get(imageFormat);
if (decoder != null) {
return decoder.decode(encodedImage, length, qualityInfo, options);
}
}
// Fresco 内置的解码器
return mDefaultDecoder.decode(encodedImage, length, qualityInfo, options);
}
}
Fresco 内置了对常用图片格式如:JPEG、GIF、WEBP 等的支持,这些图片格式定义在 DefaultImageFormats
,并在 DefaultImageFormatChecker.determineFormat(headerBytes, headerSize)
实现了高效的、基于 SOI
的图片格式判别逻辑(start of image,也就是图片文件头几个字节的内容,作为特征码进行识别)
对于 Fresco 不支持的格式,可以通过 ImageDecoderConfig.Builder.addDecodingCapability(imageFormat, imageFormatChecker, decoder)
添加自定义解码器
class DefaultImageFormats {
public static final ImageFormat JPEG = new ImageFormat("JPEG", "jpeg");
public static final ImageFormat PNG = new ImageFormat("PNG", "png");
public static final ImageFormat GIF = new ImageFormat("GIF", "gif");
public static final ImageFormat BMP = new ImageFormat("BMP", "bmp");
public static final ImageFormat ICO = new ImageFormat("ICO", "ico");
public static final ImageFormat WEBP_SIMPLE = new ImageFormat("WEBP_SIMPLE", "webp");
public static final ImageFormat WEBP_LOSSLESS = new ImageFormat("WEBP_LOSSLESS", "webp");
public static final ImageFormat WEBP_EXTENDED = new ImageFormat("WEBP_EXTENDED", "webp");
public static final ImageFormat WEBP_EXTENDED_WITH_ALPHA =
new ImageFormat("WEBP_EXTENDED_WITH_ALPHA", "webp");
public static final ImageFormat WEBP_ANIMATED = new ImageFormat("WEBP_ANIMATED", "webp");
public static final ImageFormat HEIF = new ImageFormat("HEIF", "heif");
public static final ImageFormat DNG = new ImageFormat("DNG", "dng");
}
class ImageFormatChecker {
public static ImageFormat getImageFormat(final InputStream is) throws IOException {
return getInstance().determineImageFormat(is);
}
public ImageFormat determineImageFormat(final InputStream is) throws IOException {
Preconditions.checkNotNull(is);
final byte[] imageHeaderBytes = new byte[mMaxHeaderLength];
final int headerSize = readHeaderFromStream(mMaxHeaderLength, is, imageHeaderBytes);
ImageFormat format = mDefaultFormatChecker.determineFormat(imageHeaderBytes, headerSize);
if (format != null && format != ImageFormat.UNKNOWN) {
return format;
}
if (mCustomImageFormatCheckers != null) {
for (ImageFormat.FormatChecker formatChecker : mCustomImageFormatCheckers) {
format = formatChecker.determineFormat(imageHeaderBytes, headerSize);
if (format != null && format != ImageFormat.UNKNOWN) {
return format;
}
}
}
return ImageFormat.UNKNOWN;
}
}
class DefaultImageFormatChecker {
/**
* Every JPEG image should start with SOI mark (0xFF, 0xD8) followed by beginning of another
* segment (0xFF)
*/
private static final byte[] JPEG_HEADER = new byte[] {(byte) 0xFF, (byte) 0xD8, (byte) 0xFF};
public final ImageFormat determineFormat(byte[] headerBytes, int headerSize) {
Preconditions.checkNotNull(headerBytes);
if (!mUseNewOrder && WebpSupportStatus.isWebpHeader(headerBytes, 0, headerSize)) {
return getWebpFormat(headerBytes, headerSize);
}
if (isJpegHeader(headerBytes, headerSize)) {
return DefaultImageFormats.JPEG;
}
if (isPngHeader(headerBytes, headerSize)) {
return DefaultImageFormats.PNG;
}
if (mUseNewOrder && WebpSupportStatus.isWebpHeader(headerBytes, 0, headerSize)) {
return getWebpFormat(headerBytes, headerSize);
}
if (isGifHeader(headerBytes, headerSize)) {
return DefaultImageFormats.GIF;
}
if (isBmpHeader(headerBytes, headerSize)) {
return DefaultImageFormats.BMP;
}
if (isIcoHeader(headerBytes, headerSize)) {
return DefaultImageFormats.ICO;
}
if (isHeifHeader(headerBytes, headerSize)) {
return DefaultImageFormats.HEIF;
}
if (isDngHeader(headerBytes, headerSize)) {
return DefaultImageFormats.DNG;
}
return ImageFormat.UNKNOWN;
}
private static boolean isJpegHeader(final byte[] imageHeaderBytes, final int headerSize) {
return headerSize >= JPEG_HEADER.length
&& ImageFormatCheckerUtils.startsWithPattern(imageHeaderBytes, JPEG_HEADER);
}
}
JPEG
Condition | Decoder | Core API |
---|---|---|
>= Android 8 Oreo API 26 | OreoDecoder: DefaultDecoder | BitmapFactory.decodeStream BitmapRegionDecoder.decodeRegion |
>= Android 5 Lolipop API 21 | ArtDecoder: DefaultDecoder | BitmapFactory.decodeStream BitmapRegionDecoder.decodeRegion |
<= Android 4.4 KitKat API 19 | KitKatPurgeableDecoder: DalvikPurgeableDecoder | BitmapFactory.decodeByteArray |
< Android 4.4 KitKat API 19 & ImagePipelineExperiments.isGingerbreadDecoderEnabled |
GingerbreadPurgeableDecoder: DalvikPurgeableDecoder | MemoryFile BitmapFactory.decodeStream native webp decoder |
其他 ImagePipeline 节点
MultiplexProducer
multiplex
:多路复用,multiplexer
:多路复用器,顾名思义这个节点的作用是合并相同的 ImageRequest
以节约网络和 IO 资源,因为同一时刻相同的请求只需要执行一次,多个 consumer 可以等待和接收同一个 response,看看是如何实现的:
- 多路复用器有两个:
EncodedCacheKeyMultiplexProducer
(排在 encoded memory cache 后面) 和BitmapMemoryCacheKeyMultiplexProducer
(排在 bitmap memory cache 后面) - 一个请求对应一个
Multiplexer
,相同请求(MultiplexProducer.getKey
)的 consumer 都挂在同一个 Multiplexer 里 - 第一个请求才会通过此节点,流向下一节点(执行真正的请求操作:网络、缓存),后续的相同的请求都被终止,它们的 consumer 被挂在同一 Multiplexer 里
- 第一个(也是唯一一个)请求的 response 将被分发给各个 consumer
- 涉及到多线程,创建 Multiplexer 和添加 consumer 的操作需要被
synchronized
保护
class MultiplexProducer {
public void produceResults(Consumer<T> consumer, ProducerContext context) {
try {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.beginSection("MultiplexProducer#produceResults");
}
context.getProducerListener().onProducerStart(context, mProducerName);
K key = getKey(context);
Multiplexer multiplexer;
boolean createdNewMultiplexer;
// We do want to limit scope of this lock to guard only accesses to mMultiplexers map.
// However what we would like to do here is to atomically lookup mMultiplexers, add new
// consumer to consumers set associated with the map's entry and call consumer's callback with
// last intermediate result. We should not do all of those things under this lock.
do {
createdNewMultiplexer = false;
synchronized (this) {
multiplexer = getExistingMultiplexer(key);
if (multiplexer == null) {
multiplexer = createAndPutNewMultiplexer(key);
createdNewMultiplexer = true;
}
}
// addNewConsumer may call consumer's onNewResult method immediately. For this reason
// we release "this" lock. If multiplexer is removed from mMultiplexers in the meantime,
// which is not very probable, then addNewConsumer will fail and we will be able to retry.
} while (!multiplexer.addNewConsumer(consumer, context));
if (createdNewMultiplexer) {
multiplexer.startInputProducerIfHasAttachedConsumers(
TriState.valueOf(context.isPrefetch()));
}
} finally {
if (FrescoSystrace.isTracing()) {
FrescoSystrace.endSection();
}
}
}
}