YYKit探究之YYImage(一)

1.读取图片数据

+ (nullable YYImage *)imageNamed:(NSString *)name; // no cache!
+ (nullable YYImage *)imageWithContentsOfFile:(NSString *)path;
+ (nullable YYImage *)imageWithData:(NSData *)data;
+ (nullable YYImage *)imageWithData:(NSData *)data scale:(CGFloat)scale;

这4种类方法用于获取图片数据/路径。前三种方法的实现最后都归结为 imageWithData:(NSData *)data scale:(CGFloat)scale 这个方法。让我们看看imageWithData:(NSData *)data scale:(CGFloat)scale 这个方法的实现逻辑。

 

2.由实例方法演变而来。

+ (YYImage *)imageWithData:(NSData *)data scale:(CGFloat)scale {
    return [[self alloc] initWithData:data scale:scale];
}

具体看实例方法的逻辑。

- (instancetype)initWithData:(NSData *)data scale:(CGFloat)scale {
    if (data.length == 0) return nil;
  //获取屏幕分辨率,传入解码decoder
if (scale <= 0) scale = [UIScreen mainScreen].scale;
  //创建信号量,参数:信号量的初值,如果小于0则会返回NULL,crate的value表示,最多几个资源可访问 _preloadedLock
= dispatch_semaphore_create(1); @autoreleasepool {
  //  解析图片数据,见2 YYImageDecoder
*decoder = [YYImageDecoder decoderWithData:data scale:scale];
  //对当前的图片进行解码操作,见3 YYImageFrame
*frame = [decoder frameAtIndex:0 decodeForDisplay:YES];
  //获取图片第一帧 UIImage
*image = frame.image; if (!image) return nil;
  //Image初始化 self
= [self initWithCGImage:image.CGImage scale:decoder.scale orientation:image.imageOrientation]; if (!self) return nil; _animatedImageType = decoder.type;
  //  动图的处理方法
if (decoder.frameCount > 1) { _decoder = decoder; _bytesPerFrame = CGImageGetBytesPerRow(image.CGImage) * CGImageGetHeight(image.CGImage); _animatedImageMemorySize = _bytesPerFrame * decoder.frameCount; } self.isDecodedForDisplay = YES; } return self; }

2.1,解析图片数据

+ (instancetype)decoderWithData:(NSData *)data scale:(CGFloat)scale {
    if (!data) return nil;
    YYImageDecoder *decoder = [[YYImageDecoder alloc] initWithScale:scale];
    [decoder updateData:data final:YES];
    if (decoder.frameCount == 0) return nil;
    return decoder;
}

 

这一步主要看updateData里面做了什么

- (BOOL)updateData:(NSData *)data final:(BOOL)final {
    BOOL result = NO;
    //对线程上锁,此时其他线程阻塞等待该线程释放锁
    pthread_mutex_lock(&_lock);
    //这里先判断图片格式YYImageDetectType((__bridge CFDataRef)data),再根据格式获取图片帧
    result = [self _updateData:data final:final];
//   //执行完后释放锁
    pthread_mutex_unlock(&_lock);
    return result;
}

 

2.2.获取图片帧数据

  updateData里面除了判断图片格式,是否是动图,还要更新源(可以是data,也可以是路径),下面以updataSourceAPNG为例,剖析实现过程

[self _updateSourceImageIO]; // decode first frame
    if (_frameCount == 0) return; // png decode failed
    if (!_finalized) return; // ignore multi-frame before finalized
        //_data.bytes 提供存储apng的内存对应的指针,获取动图的数据信息
    yy_png_info *apng = yy_png_info_create(_data.bytes, (uint32_t)_data.length);
    if (!apng) return; // apng decode failed
    if (apng->apng_frame_num == 0 ||
        (apng->apng_frame_num == 1 && apng->apng_first_frame_is_cover)) {
        yy_png_info_release(apng);
        return; // no animation
    }
    if (_source) { // apng decode succeed, no longer need image souce
        CFRelease(_source);
        _source = NULL;
    }

  其中_data.bytes 提供存储apng的内存对应的指针,获取动图的数据信息,接下来就是对动图各个帧数据处理

  uint32_t canvasWidth = apng->header.width;
    uint32_t canvasHeight = apng->header.height;
    NSMutableArray *frames = [NSMutableArray new];
    BOOL needBlend = NO;
    uint32_t lastBlendIndex = 0;
  //PNG动画的逐帧信息返回到Frames数组中
for (uint32_t i = 0; i < apng->apng_frame_num; i++) { _YYImageDecoderFrame *frame = [_YYImageDecoderFrame new]; [frames addObject:frame]; yy_png_frame_info *fi = apng->apng_frames + i; frame.index = i; frame.duration = yy_png_delay_to_seconds(fi->frame_control.delay_num, fi->frame_control.delay_den); frame.hasAlpha = YES; frame.width = fi->frame_control.width; frame.height = fi->frame_control.height; frame.offsetX = fi->frame_control.x_offset; frame.offsetY = canvasHeight - fi->frame_control.y_offset - fi->frame_control.height; BOOL sizeEqualsToCanvas = (frame.width == canvasWidth && frame.height == canvasHeight); BOOL offsetIsZero = (fi->frame_control.x_offset == 0 && fi->frame_control.y_offset == 0); frame.isFullSize = (sizeEqualsToCanvas && offsetIsZero); switch (fi->frame_control.dispose_op) { case YY_PNG_DISPOSE_OP_BACKGROUND: { frame.dispose = YYImageDisposeBackground; } break; case YY_PNG_DISPOSE_OP_PREVIOUS: { frame.dispose = YYImageDisposePrevious; } break; default: { frame.dispose = YYImageDisposeNone; } break; } switch (fi->frame_control.blend_op) { case YY_PNG_BLEND_OP_OVER: { frame.blend = YYImageBlendOver; } break; default: { frame.blend = YYImageBlendNone; } break; } if (frame.blend == YYImageBlendNone && frame.isFullSize) { frame.blendFromIndex = i; if (frame.dispose != YYImageDisposePrevious) lastBlendIndex = i; } else { if (frame.dispose == YYImageDisposeBackground && frame.isFullSize) { frame.blendFromIndex = lastBlendIndex; lastBlendIndex = i + 1; } else { frame.blendFromIndex = lastBlendIndex; } } if (frame.index != frame.blendFromIndex) needBlend = YES; } _width = canvasWidth; _height = canvasHeight; _frameCount = frames.count; _loopCount = apng->apng_loop_num; _needBlend = needBlend; _apngSource = apng; dispatch_semaphore_wait(_framesLock, DISPATCH_TIME_FOREVER); _frames = frames; dispatch_semaphore_signal(_framesLock); }

 

3.各个帧数据进行解码和显示,获取imageRef的步骤如3.1所示

 

  //解码,从GPU中拿到imageRef
        CGImageRef imageRef = [self _newUnblendedImageAtIndex:index extendToCanvas:extendToCanvas decoded:&decoded];
        if (!imageRef) return nil;
        if (decodeForDisplay && !decoded) {
            CGImageRef imageRefDecoded = YYCGImageCreateDecodedCopy(imageRef, YES);
            if (imageRefDecoded) {
                CFRelease(imageRef);
                imageRef = imageRefDecoded;
                decoded = YES;
            }
        }
    //获取当前帧 UIImage
*image = [UIImage imageWithCGImage:imageRef scale:_scale orientation:_orientation]; CFRelease(imageRef); if (!image) return nil; image.isDecodedForDisplay = decoded; frame.image = image; return frame;

 

3.1,获取imageRef

 //开始解码
        CGImageRef imageRef = CGImageSourceCreateImageAtIndex(_source, index, (CFDictionaryRef)@{(id)kCGImageSourceShouldCache:@(YES)});
        if (imageRef && extendToCanvas) {
            size_t width = CGImageGetWidth(imageRef);
            size_t height = CGImageGetHeight(imageRef);
            if (width == _width && height == _height) {
                CGImageRef imageRefExtended = YYCGImageCreateDecodedCopy(imageRef, YES);
                if (imageRefExtended) {
                    CFRelease(imageRef);
                    imageRef = imageRefExtended;
                    if (decoded) *decoded = YES;
                }
            } else {
                //没有就绘制
                CGContextRef context = CGBitmapContextCreate(NULL, _width, _height, 8, 0, YYCGColorSpaceGetDeviceRGB(), kCGBitmapByteOrder32Host | kCGImageAlphaPremultipliedFirst);
                if (context) {
                    //获取像素信息
                    CGContextDrawImage(context, CGRectMake(0, _height - height, width, height), imageRef);
                    CGImageRef imageRefExtended = CGBitmapContextCreateImage(context);
                    CFRelease(context);
                    if (imageRefExtended) {
                        CFRelease(imageRef);
                        imageRef = imageRefExtended;
                        if (decoded) *decoded = YES;
                    }
                }
            }
        }
        return imageRef;

 

YYImage处理图片流程

1.用imageWithData:(NSData *)data scale:(CGFloat)scale获取图片源数据。

2.解析图片数据,分为两步,判断图片格式(如果是动图,就开启loop),以及返回当前帧的信息yy_png_info_create(_data.bytes, (uint32_t)_data.length)。

3.根据帧信息开始解码CGImageSourceCreateImageAtIndex,并展示出来。

 

posted @ 2020-10-27 20:27  vkkkkkkkkkk  阅读(509)  评论(0编辑  收藏  举报