redis6.0.5之aof.c阅读笔记-增量的持久化保存

***********************************************************************************************
AOF = Append-only file

void aofUpdateCurrentSize(void);
void aofClosePipes(void);

/* ----------------------------------------------------------------------------
 * AOF rewrite buffer implementation. AOF重写缓冲区实现
 *
 * The following code implement a simple buffer used in order to accumulate
 * changes while the background process is rewriting the AOF file.
下面的代码实现了一个简单的缓冲区,用于在后台进程重写AOF文件时累积更改
 * We only need to append, but can't just use realloc with a large block
 * because 'huge' reallocs are not always handled as one could expect
 * (via remapping of pages at OS level) but may involve copying data.
 我们只需要追加,但不能只将realloc用于大数据块,因为“巨大”realloc并不总是像人们期望的那样处理(通过在操作系统级别重新映射页面),
 而是可能涉及复制数据。
 *
 * For this reason we use a list of blocks, every block is
 * AOF_RW_BUF_BLOCK_SIZE bytes.
 因此,我们使用块列表,每个块都是AOF_RW_BUF_block_大小字节
 * ------------------------------------------------------------------------- */

#define AOF_RW_BUF_BLOCK_SIZE (1024*1024*10)    /* 10 MB per block */ 每块大小为10M

AOF读写块的结构
typedef struct aofrwblock {
    unsigned long used, free;  记录已经使用和空闲的空间大小
    char buf[AOF_RW_BUF_BLOCK_SIZE]; 具体存储内容的空间
} aofrwblock;

/* This function free the old AOF rewrite buffer if needed, and initialize
 * a fresh new one. It tests for server.aof_rewrite_buf_blocks equal to NULL
 * so can be used for the first initialization as well. */
如果有需要,这个函数释放原来的AOF重写缓存,并且初始化一个新的。
它通过测试参数server.aof_rewrite_buf_blocks是否为空,这样就也能判断是否是第一次初始化。
void aofRewriteBufferReset(void) {
    if (server.aof_rewrite_buf_blocks) 如果重写缓存列表为空
        listRelease(server.aof_rewrite_buf_blocks);释放列表本身

    server.aof_rewrite_buf_blocks = listCreate(); 创建新的列表
    listSetFreeMethod(server.aof_rewrite_buf_blocks,zfree); 设置释放函数
}

/* Return the current size of the AOF rewrite buffer. */
返回AOF重写缓冲区的当前使用大小
unsigned long aofRewriteBufferSize(void) {
    listNode *ln;
    listIter li;
    unsigned long size = 0;

    listRewind(server.aof_rewrite_buf_blocks,&li); 初始化迭代列表
    while((ln = listNext(&li))) {  遍历列表中的块
        aofrwblock *block = listNodeValue(ln); 获取每块
        size += block->used;每块使用的空间
    }
    return size; 返回总的使用空间
}

/* Event handler used to send data to the child process doing the AOF
 * rewrite. We send pieces of our AOF differences buffer so that the final
 * write when the child finishes the rewrite will be small. */
事件处理器用来发送数据到执行重写AOF的子进程。
我们发送差异的AOF缓存数据碎片,这样到最后写的时候,子进程完成写的数据就会比较小。
void aofChildWriteDiffData(aeEventLoop *el, int fd, void *privdata, int mask) {
    listNode *ln;
    aofrwblock *block;
    ssize_t nwritten;
    UNUSED(el);
    UNUSED(fd);
    UNUSED(privdata);
    UNUSED(mask);

    while(1) {
        ln = listFirst(server.aof_rewrite_buf_blocks); 获取列表的第一个重写缓存数据块
        block = ln ? ln->value : NULL; 获取数据块的数据
        if (server.aof_stop_sending_diff || !block) { 停止发送差异数据 或者 为空
            aeDeleteFileEvent(server.el,server.aof_pipe_write_data_to_child,
                              AE_WRITABLE);  注册删除重写事件
            return;
        }
        if (block->used > 0) { 如果有重写数据
            nwritten = write(server.aof_pipe_write_data_to_child,
                             block->buf,block->used); 将数据写到到子进程
            if (nwritten <= 0) return; 写入失败返回
            memmove(block->buf,block->buf+nwritten,block->used-nwritten); 成功的情况下,将还没有写的数据移动到前面
            block->used -= nwritten; 使用的缓存空出来的空间
            block->free += nwritten; 增加的空闲空间
        }
        if (block->used == 0) listDelNode(server.aof_rewrite_buf_blocks,ln); 如果节点的数据为0,那么可以删除该节点
    }
}

/* Append data to the AOF rewrite buffer, allocating new blocks if needed. */
将数据追加到AOF重写缓冲区,如果需要分配新块。
void aofRewriteBufferAppend(unsigned char *s, unsigned long len) {
    listNode *ln = listLast(server.aof_rewrite_buf_blocks); 获取最后一个节点块
    aofrwblock *block = ln ? ln->value : NULL;

    while(len) {
        /* If we already got at least an allocated block, try appending
         * at least some piece into it. */
如果我们已经获得了至少一个已分配的块,尝试在其中添加至少一些数据。
        if (block) {
            unsigned long thislen = (block->free < len) ? block->free : len; 能装的最大数据长度
            if (thislen) {  /* The current block is not already full. */ 当前块还没有满
                memcpy(block->buf+block->used, s, thislen); 拷贝字符串的S的内容到重写缓冲区块
                block->used += thislen; 使用的字节长度
                block->free -= thislen; 空闲的字节长度
                s += thislen;
                len -= thislen;
            }
        }

        if (len) { /* First block to allocate, or need another block. */  
        要分配的第一个块,这个时候block为空, 或需要另一个块,这个死后block已满
            int numblocks;

            block = zmalloc(sizeof(*block)); 分配空间
            block->free = AOF_RW_BUF_BLOCK_SIZE;
            block->used = 0;
            listAddNodeTail(server.aof_rewrite_buf_blocks,block);加入到AOF重写块列表

            /* Log every time we cross more 10 or 100 blocks, respectively
             * as a notice or warning. */
每次我们超过10个或100个块时都要记录,分别作为通知或警告
            numblocks = listLength(server.aof_rewrite_buf_blocks);
            if (((numblocks+1) % 10) == 0) { 超过10个,通知,100警告
                int level = ((numblocks+1) % 100) == 0 ? LL_WARNING :
                                                         LL_NOTICE;
                serverLog(level,"Background AOF buffer size: %lu MB",
                    aofRewriteBufferSize()/(1024*1024));
            }
        }
    }

    /* Install a file event to send data to the rewrite child if there is
     * not one already. */
如果没有注册事件触发器,安装一个事件触发器,用来发送数据到重写子进程
    if (aeGetFileEvents(server.el,server.aof_pipe_write_data_to_child) == 0) {
        aeCreateFileEvent(server.el, server.aof_pipe_write_data_to_child,
            AE_WRITABLE, aofChildWriteDiffData, NULL);
    }
}

/* Write the buffer (possibly composed of multiple blocks) into the specified
 * fd. If a short write or any other error happens -1 is returned,
 * otherwise the number of bytes written is returned. */
将缓冲区(可能由多个块组成)写入指定的文件句柄。如果发生写少了或任何其他错误,则返回-1,否则返回写入的字节数。
ssize_t aofRewriteBufferWrite(int fd) {
    listNode *ln;
    listIter li;
    ssize_t count = 0;

    listRewind(server.aof_rewrite_buf_blocks,&li);初始化aof写的列表迭代器
    while((ln = listNext(&li))) { 遍历列表
        aofrwblock *block = listNodeValue(ln); 获取列表节点的数据
        ssize_t nwritten;

        if (block->used) {
            nwritten = write(fd,block->buf,block->used); 
            if (nwritten != (ssize_t)block->used) {  没有写完
                if (nwritten == 0) errno = EIO; 
                return -1;
            }
            count += nwritten; 写入的数据数量加到总数量中
        }
    }
    return count;
}

/* ----------------------------------------------------------------------------
 * AOF file implementation  AOF文件实现
 * ------------------------------------------------------------------------- */

/* Return true if an AOf fsync is currently already in progress in a
 * BIO thread. */
如果一个AOF的写磁盘任务已经在后台IO进程中,那么返回真
int aofFsyncInProgress(void) {
    return bioPendingJobsOfType(BIO_AOF_FSYNC) != 0;  挂起的任务数量不为0
}

/* Starts a background task that performs fsync() against the specified
 * file descriptor (the one of the AOF file) in another thread. */
启动后台任务,该任务对另一个线程中的指定文件描述符(AOF文件之一)执行fsync操作。
void aof_background_fsync(int fd) {
    bioCreateBackgroundJob(BIO_AOF_FSYNC,(void*)(long)fd,NULL,NULL);
}

/* Kills an AOFRW child process if exists */
停止一个存在的AOFRW的子进程
void killAppendOnlyChild(void) {
    int statloc;
    /* No AOFRW child? return. */ 不存在AOFRW的子进程,返回
    if (server.aof_child_pid == -1) return;
    /* Kill AOFRW child, wait for child exit. */ 停止AOFRW的子进程,等待子进程退出
    serverLog(LL_NOTICE,"Killing running AOF rewrite child: %ld",
        (long) server.aof_child_pid);
    if (kill(server.aof_child_pid,SIGUSR1) != -1) {
        while(wait3(&statloc,0,NULL) != server.aof_child_pid); 一直等到子进程退出
    }
    /* Reset the buffer accumulating changes while the child saves. */
    当子进程保存之后,重置累积改变的缓存数据(就是清空原来累积的缓存数据)
    aofRewriteBufferReset();清空缓存数据
    aofRemoveTempFile(server.aof_child_pid); 根据子进程id删除文件
    server.aof_child_pid = -1;
    server.aof_rewrite_time_start = -1;
    /* Close pipes used for IPC between the two processes. */
关闭用于两个进程之间通信的管道流
    aofClosePipes(); 关闭进程管道流
    closeChildInfoPipe();关闭子进程管道流
    updateDictResizePolicy(); 更新字典迁移策略,根据有无活的子进程情况下,更新字典迁移策略
}

/* Called when the user switches from "appendonly yes" to "appendonly no"
 * at runtime using the CONFIG command. */
运行时根据配置命令,用户将配置项从"appendonly yes" 转变为"appendonly no"
void stopAppendOnly(void) {
    serverAssert(server.aof_state != AOF_OFF); 确认AOF还没有关闭
    flushAppendOnlyFile(1); 强制将缓存写入磁盘
    redis_fsync(server.aof_fd);写入文件
    close(server.aof_fd);关闭文件

    server.aof_fd = -1;
    server.aof_selected_db = -1;
    server.aof_state = AOF_OFF;
    server.aof_rewrite_scheduled = 0;
    killAppendOnlyChild();
}

/* Called when the user switches from "appendonly no" to "appendonly yes"
 * at runtime using the CONFIG command. */
 运行时根据配置命令,用户将配置项从"appendonly to" 转变为"appendonly yes"
int startAppendOnly(void) {
    char cwd[MAXPATHLEN]; /* Current working dir path for error messages. */ 错误消息的当前工作目录路径
    int newfd;

    newfd = open(server.aof_filename,O_WRONLY|O_APPEND|O_CREAT,0644);创建新文件
    serverAssert(server.aof_state == AOF_OFF); 确认AOF状态是关闭的
    if (newfd == -1) { 文件打不开
        char *cwdp = getcwd(cwd,MAXPATHLEN); 获取目录

        serverLog(LL_WARNING,
            "Redis needs to enable the AOF but can't open the "
            "append only file %s (in server root dir %s): %s",
            server.aof_filename,
            cwdp ? cwdp : "unknown",
            strerror(errno));
        return C_ERR;
    }
    if (hasActiveChildProcess() && server.aof_child_pid == -1) { 有正在活跃的子进程 但是 不是AOF的子进程
        server.aof_rewrite_scheduled = 1; 允许开启AOF子进程
        serverLog(LL_WARNING,"AOF was enabled but there is already another background operation. An AOF background was scheduled to start when possible.");
    } else {
        /* If there is a pending AOF rewrite, we need to switch it off and
         * start a new one: the old one cannot be reused because it is not
         * accumulating the AOF buffer. */
如果有一个挂起的AOF重写,我们需要关闭它并启动一个新的:旧的不能重用,因为它没有积累AOF缓冲区。
        if (server.aof_child_pid != -1) {
            serverLog(LL_WARNING,"AOF was enabled but there is already an AOF rewriting in background. Stopping background AOF and starting a rewrite now.");
            killAppendOnlyChild();
        }
        if (rewriteAppendOnlyFileBackground() == C_ERR) { 
            close(newfd);
            serverLog(LL_WARNING,"Redis needs to enable the AOF but can't trigger a background AOF rewrite operation. Check the above logs for more info about the error.");
            return C_ERR;
        }
    }
    /* We correctly switched on AOF, now wait for the rewrite to be complete
     * in order to append data on disk. */
我们正确地打开了AOF,现在等待重写完成,以便在磁盘上追加数据
    server.aof_state = AOF_WAIT_REWRITE;
    server.aof_last_fsync = server.unixtime;
    server.aof_fd = newfd;
    return C_OK;
}

/* This is a wrapper to the write syscall in order to retry on short writes
 * or if the syscall gets interrupted. It could look strange that we retry
 * on short writes given that we are writing to a block device: normally if
 * the first call is short, there is a end-of-space condition, so the next
 * is likely to fail. However apparently in modern systems this is no longer
 * true, and in general it looks just more resilient to retry the write. If
 * there is an actual error condition we'll get it at the next try. */
这个函数是对系统写调用的一个包装,为了在被系统调用中断或者少量写(不到预定长度)时候做重新尝试写。
当我们尝试对一个块设备进行少量写(不到预定长度)之后继续写,这个看上去有点奇怪:
因为正常来说如果第一次调用是少量的写(不到预定长度),这是一个结束的条件,接下来的写很可能是失败的。
然而在现代系统中,这个明显不成立了,一般来说这样尝试写看上去更加合理。
如果真的有错误,那么下次写的时候就会发生错误。
ssize_t aofWrite(int fd, const char *buf, size_t len) {
    ssize_t nwritten = 0, totwritten = 0;

    while(len) {
        nwritten = write(fd, buf, len); 写入文件

        if (nwritten < 0) {
            if (errno == EINTR) continue; 系统终端继续
            return totwritten ? totwritten : -1;
        }

        len -= nwritten; 要写入的数据减少
        buf += nwritten;
        totwritten += nwritten;
    }

    return totwritten;
}

/* Write the append only file buffer on disk. 将AOF缓存写入到磁盘
 *
 * Since we are required to write the AOF before replying to the client,
 * and the only way the client socket can get a write is entering when the
 * the event loop, we accumulate all the AOF writes in a memory
 * buffer and write it on disk using this function just before entering
 * the event loop again.
由于我们需要在回复客户机之前写入AOF,而客户机套接字获得写入的唯一方法是进入循环事件,
因此我们将所有AOF写入累积到内存缓冲区中,并在再次进入事件循环之前使用此函数将其写入磁盘。
 *
 * About the 'force' argument: 关于强制参数:
 *
 * When the fsync policy is set to 'everysec' we may delay the flush if there
 * is still an fsync() going on in the background thread, since for instance
 * on Linux write(2) will be blocked by the background fsync anyway.
 * When this happens we remember that there is some aof buffer to be
 * flushed ASAP, and will try to do that in the serverCron() function.
当同步策略被设置为每秒,我们可能会阻塞本次刷新,因为上一个fsync函数还在后台执行中。
因为例如在Linux上写入无论如何都会被后台fsync阻止。当这种情况发生时,
我们记得有一些aof缓冲区需要尽快刷新,我们将尝试在serverCron函数中这样做。
 * However if force is set to 1 we'll write regardless of the background
 * fsync. */
但是,如果force设置为1,我们将不考虑后台进程fsync而写入(这总情况会出现什么情况?)
#define AOF_WRITE_LOG_ERROR_RATE 30 /* Seconds between errors logging. */ 记录错误之间的秒数
void flushAppendOnlyFile(int force) {
    ssize_t nwritten;
    int sync_in_progress = 0;
    mstime_t latency;

    if (sdslen(server.aof_buf) == 0) {
        /* Check if we need to do fsync even the aof buffer is empty,
         * because previously in AOF_FSYNC_EVERYSEC mode, fsync is
         * called only when aof buffer is not empty, so if users
         * stop write commands before fsync called in one second,
         * the data in page cache cannot be flushed in time. */
检查是否需要进行fsync,即使aof缓冲区为空,
因为以前在AOF_FSYNC_EVERYSEC(每秒同步)模式下,只有当AOF缓存不为空时才会调用FSYNC,
因此如果用户在一秒钟内调用FSYNC之前停止写入命令,则无法及时刷新页面缓存中的数据(就不会有数据)
        if (server.aof_fsync == AOF_FSYNC_EVERYSEC && 每秒进行一次同步
            server.aof_fsync_offset != server.aof_current_size && 还没有没有同步的数据
            server.unixtime > server.aof_last_fsync && 当前时间已经过了上次执行的时间
            !(sync_in_progress = aofFsyncInProgress())) { 不在同步中
            goto try_fsync; 尝试同步
        } else {
            return;
        }
    }

    if (server.aof_fsync == AOF_FSYNC_EVERYSEC)
        sync_in_progress = aofFsyncInProgress();

    if (server.aof_fsync == AOF_FSYNC_EVERYSEC && !force) { 每秒刷新 并且 非强制
        /* With this append fsync policy we do background fsyncing.
         * If the fsync is still in progress we can try to delay
         * the write for a couple of seconds. */
通过这个附加fsync策略,我们可以进行后台fsync。如果fsync仍在进行中,我们可以尝试将写入延迟几秒钟。
        if (sync_in_progress) {还在后台同步刷新
            if (server.aof_flush_postponed_start == 0) {判断之前有没有设置延迟
                /* No previous write postponing, remember that we are
                 * postponing the flush and return. */
                 如果之前没有设置过延迟,记住我们正在延迟刷新并且返回
                server.aof_flush_postponed_start = server.unixtime; 设置延迟开始时间
                return;
            } else if (server.unixtime - server.aof_flush_postponed_start < 2) {
                /* We were already waiting for fsync to finish, but for less
                 * than two seconds this is still ok. Postpone again. */
                如果我们已经开始等待延迟同步结束,到目前为止还没有超过2秒,继续延迟
                return;
            }
            /* Otherwise fall trough, and go write since we can't wait
             * over two seconds. */
            否则一直执行到底,开始写,因为我们不能等待超过2秒
            server.aof_delayed_fsync++; 同步计数++
            serverLog(LL_NOTICE,"Asynchronous AOF fsync is taking too long (disk is busy?). Writing the AOF buffer without waiting for fsync to complete, this may slow down Redis.");
        }
    }
    /* We want to perform a single write. This should be guaranteed atomic
     * at least if the filesystem we are writing is a real physical one.
     * While this will save us against the server being killed I don't think
     * there is much to do about the whole server stopping for power problems
     * or alike */
我们想要执行一次单独的写。如果我们正在写的文件系统是一个实际的物理设备,那么这个应该至少需要保证原子写。
这个(原子写)保证了不对导致服务器被停止,
但我认为对于整个服务器因电源问题或类似问题而停止运行,没有什么可做的

    if (server.aof_flush_sleep && sdslen(server.aof_buf)) {
        usleep(server.aof_flush_sleep); 挂起线程
    }

    latencyStartMonitor(latency); 开启执行时间监控
    nwritten = aofWrite(server.aof_fd,server.aof_buf,sdslen(server.aof_buf));
    latencyEndMonitor(latency);
    /* We want to capture different events for delayed writes:
    我们想要为延迟写捕获不同事件
     * when the delay happens with a pending fsync, or with a saving child
     * active, and when the above two conditions are missing.
当延迟发生在挂起的fsync或保存子项处于活动状态时,以及上述两个条件缺失时。     
     * We also use an additional event name to save all samples which is
     * useful for graphing / monitoring purposes. */
我们还使用一个额外的事件名称来保存所有样本,这对于图形绘制/监控非常有用
    if (sync_in_progress) {
        latencyAddSampleIfNeeded("aof-write-pending-fsync",latency);
    } else if (hasActiveChildProcess()) {
        latencyAddSampleIfNeeded("aof-write-active-child",latency);
    } else {
        latencyAddSampleIfNeeded("aof-write-alone",latency);
    }
    latencyAddSampleIfNeeded("aof-write",latency);

    /* We performed the write so reset the postponed flush sentinel to zero. */
    我们执行了写操作,因此将延迟的刷新哨兵重置为零。
    server.aof_flush_postponed_start = 0;

    if (nwritten != (ssize_t)sdslen(server.aof_buf)) {
        static time_t last_write_error_log = 0;
        int can_log = 0;

        /* Limit logging rate to 1 line per AOF_WRITE_LOG_ERROR_RATE seconds. */
        将日志记录速率限制为每AOF_WRITE_LOG_ERROR_RATE(30)秒1行。
        if ((server.unixtime - last_write_error_log) > AOF_WRITE_LOG_ERROR_RATE) { 超过了指定的时间间隔
            can_log = 1;
            last_write_error_log = server.unixtime;
        }

        /* Log the AOF write error and record the error code. */
        记录AOF写入错误并记录错误代码
        if (nwritten == -1) {
            if (can_log) {
                serverLog(LL_WARNING,"Error writing to the AOF file: %s",
                    strerror(errno));
                server.aof_last_write_errno = errno;
            }
        } else {
            if (can_log) {
                serverLog(LL_WARNING,"Short write while writing to "
                                       "the AOF file: (nwritten=%lld, "
                                       "expected=%lld)",
                                       (long long)nwritten,
                                       (long long)sdslen(server.aof_buf));
            }

            if (ftruncate(server.aof_fd, server.aof_current_size) == -1) {
                if (can_log) {
                    serverLog(LL_WARNING, "Could not remove short write "
                             "from the append-only file.  Redis may refuse "
                             "to load the AOF the next time it starts.  "
                             "ftruncate: %s", strerror(errno));
                }
            } else {
                /* If the ftruncate() succeeded we can set nwritten to
                 * -1 since there is no longer partial data into the AOF. */
                 如果函数ftruncate成功执行,我们可以设置写入数量为-1,因为已经没有数据在AOF
                nwritten = -1;
            }
            server.aof_last_write_errno = ENOSPC;
        }

        /* Handle the AOF write error. */
        处理AOF写入错误
        if (server.aof_fsync == AOF_FSYNC_ALWAYS) { 每执行一个命令保存一次 模式
            /* We can't recover when the fsync policy is ALWAYS since the
             * reply for the client is already in the output buffers, and we
             * have the contract with the user that on acknowledged write data
             * is synced on disk. */
当时AOF的策略模式是ALWAYS的时候,我们不能恢复,因为回复给客户端的内容已经在输出缓存中了,
我们与用户签订了合同,在确认的写入数据上,数据将同步到磁盘上。
            serverLog(LL_WARNING,"Can't recover from AOF write error when the AOF fsync policy is 'always'. Exiting...");
            exit(1);
        } else {
            /* Recover from failed write leaving data into the buffer. However
             * set an error to stop accepting writes as long as the error
             * condition is not cleared. */
从失败中恢复,写入剩余的数据到缓存区。然而,只要错误转台标志没有清除,就设置一个错误停止接受写入
            server.aof_last_write_status = C_ERR;

            /* Trim the sds buffer if there was a partial write, and there
             * was no way to undo it with ftruncate(2). */
             如果存在部分写入,而且函数ftruncate中没有方法去掉空的部分,所以需要裁减SDS缓存(因为部分数据为空)
            if (nwritten > 0) {
                server.aof_current_size += nwritten;
                sdsrange(server.aof_buf,nwritten,-1);
            }
            return; /* We'll try again on the next call... */ 下次调用我们继续尝试
        }
    } else {
        /* Successful write(2). If AOF was in error state, restore the
         * OK state and log the event. */
         成功写入,如果AOF处于错误状态,恢复成功状态和记录事件
        if (server.aof_last_write_status == C_ERR) {
            serverLog(LL_WARNING,
                "AOF write error looks solved, Redis can write again.");
            server.aof_last_write_status = C_OK;
        }
    }
    server.aof_current_size += nwritten;

    /* Re-use AOF buffer when it is small enough. The maximum comes from the
     * arena size of 4k minus some overhead (but is otherwise arbitrary). */
当AOF缓冲区足够小时,重新使用它。最大值来自4k大小减去一些开销(但在其他方面是任意的)。
    if ((sdslen(server.aof_buf)+sdsavail(server.aof_buf)) < 4000) {
        sdsclear(server.aof_buf);
    } else {
        sdsfree(server.aof_buf);
        server.aof_buf = sdsempty();
    }

try_fsync:
    /* Don't fsync if no-appendfsync-on-rewrite is set to yes and there are
     * children doing I/O in the background. */
如果no-appendfsync-on-rewrite设置为“是”,并且有子进程在后台执行I/O,则不要执行fsync。
    if (server.aof_no_fsync_on_rewrite && hasActiveChildProcess())
        return;

    /* Perform the fsync if needed. */ 如果有需要执行fsync
    if (server.aof_fsync == AOF_FSYNC_ALWAYS) {
        /* redis_fsync is defined as fdatasync() for Linux in order to avoid
         * flushing metadata. */
         redis_fsync在Linux中被定义为函数fdatasync,是Wie了避免刷入元数据(只刷入实际数据)
        latencyStartMonitor(latency);
        redis_fsync(server.aof_fd); /* Let's try to get this data on the disk */
        latencyEndMonitor(latency);
        latencyAddSampleIfNeeded("aof-fsync-always",latency);
        server.aof_fsync_offset = server.aof_current_size;
        server.aof_last_fsync = server.unixtime;
    } else if ((server.aof_fsync == AOF_FSYNC_EVERYSEC && 每秒执行
                server.unixtime > server.aof_last_fsync)) {
        if (!sync_in_progress) {
            aof_background_fsync(server.aof_fd);
            server.aof_fsync_offset = server.aof_current_size;
        }
        server.aof_last_fsync = server.unixtime; 更新时间
    }
}

拼接执行命令成字符串(这个格式就是REdis Serialization Protocol)
sds catAppendOnlyGenericCommand(sds dst, int argc, robj **argv) {
    char buf[32];
    int len, j;
    robj *o;

    buf[0] = '*';  数组开始
    len = 1+ll2string(buf+1,sizeof(buf)-1,argc); 参数个数
    buf[len++] = '\r'; 回车
    buf[len++] = '\n'; 换行
    dst = sdscatlen(dst,buf,len);

    for (j = 0; j < argc; j++) { 拼接参数
        o = getDecodedObject(argv[j]);
        buf[0] = '$'; 字符块开头
        len = 1+ll2string(buf+1,sizeof(buf)-1,sdslen(o->ptr)); 参数的长度
        buf[len++] = '\r';
        buf[len++] = '\n';
        dst = sdscatlen(dst,buf,len);
        dst = sdscatlen(dst,o->ptr,sdslen(o->ptr)); 具体的参数字符串值
        dst = sdscatlen(dst,"\r\n",2);结束的回车换行
        decrRefCount(o);
    }
    return dst; 返回拼接的字符串
}

/* Create the sds representation of an PEXPIREAT command, using
 * 'seconds' as time to live and 'cmd' to understand what command
 * we are translating into a PEXPIREAT.
创建命令PEXPIREAT的sds字符串表达式,使用秒作为生存时间,使用'cmd'来理解什么命令被转化为PEXPIREAT表示
 * This command is used in order to translate EXPIRE and PEXPIRE commands
 * into PEXPIREAT command so that we retain precision in the append only
 * file, and the time is always absolute and not relative. */
这个命令用于将EXPIRE和PEXPIRE命令转换为PEXPIREAT命令,
这样就可以在AOF文件中保持精度,使用的是绝对时间而不是相对时间
sds catAppendOnlyExpireAtCommand(sds buf, struct redisCommand *cmd, robj *key, robj *seconds) {
    long long when;
    robj *argv[3];

    /* Make sure we can use strtoll */ 确认我们可以使用字符串转长整形
    seconds = getDecodedObject(seconds); 获取秒的redis对象
    when = strtoll(seconds->ptr,NULL,10); 进行字符串转化为整数,以10为基
    /* Convert argument into milliseconds for EXPIRE, SETEX, EXPIREAT */
    对于命令EXPIRE, SETEX, EXPIREAT,将参数转为毫秒
    if (cmd->proc == expireCommand || cmd->proc == setexCommand ||
        cmd->proc == expireatCommand)
    {
        when *= 1000;
    }
    /* Convert into absolute time for EXPIRE, PEXPIRE, SETEX, PSETEX */
将命令EXPIRE, PEXPIRE, SETEX, PSETEX设置的相对时间变成绝对时间
    if (cmd->proc == expireCommand || cmd->proc == pexpireCommand ||
        cmd->proc == setexCommand || cmd->proc == psetexCommand)
    {
        when += mstime();
    }
    decrRefCount(seconds); 针对上述getDecodedObject的反操作

    argv[0] = createStringObject("PEXPIREAT",9);创建命令字符串
    argv[1] = key; 指定的键
    argv[2] = createStringObjectFromLongLong(when); 系统绝对时间
    buf = catAppendOnlyGenericCommand(buf, 3, argv); 拼接命令
    decrRefCount(argv[0]);
    decrRefCount(argv[2]);
    return buf;
}

void feedAppendOnlyFile(struct redisCommand *cmd, int dictid, robj **argv, int argc) {
    sds buf = sdsempty(); 创建空sds字符串
    robj *tmpargv[3];

    /* The DB this command was targeting is not the same as the last command
     * we appended. To issue a SELECT command is needed. */
此命令所针对的DB与我们附加的上一个命令不同。需要使用SELECT命令切换数据库。
    if (dictid != server.aof_selected_db) { 不和之前的数据库相同,需要切换
        char seldb[64];

        snprintf(seldb,sizeof(seldb),"%d",dictid);
        buf = sdscatprintf(buf,"*2\r\n$6\r\nSELECT\r\n$%lu\r\n%s\r\n",
            (unsigned long)strlen(seldb),seldb);
        server.aof_selected_db = dictid;
    }

    if (cmd->proc == expireCommand || cmd->proc == pexpireCommand ||
        cmd->proc == expireatCommand) {
        /* Translate EXPIRE/PEXPIRE/EXPIREAT into PEXPIREAT */
        将命令EXPIRE/PEXPIRE/EXPIREAT转化为PEXPIREAT命令
        buf = catAppendOnlyExpireAtCommand(buf,cmd,argv[1],argv[2]);
    } else if (cmd->proc == setexCommand || cmd->proc == psetexCommand) {
        /* Translate SETEX/PSETEX to SET and PEXPIREAT */
        将命令SETEX/PSETEX转化为SET和PEXPIREAT命令
        tmpargv[0] = createStringObject("SET",3);
        tmpargv[1] = argv[1];
        tmpargv[2] = argv[3];
        buf = catAppendOnlyGenericCommand(buf,3,tmpargv);
        decrRefCount(tmpargv[0]);
        buf = catAppendOnlyExpireAtCommand(buf,cmd,argv[1],argv[2]);
    } else if (cmd->proc == setCommand && argc > 3) {  参数大于3个
        int i;
        robj *exarg = NULL, *pxarg = NULL;
        for (i = 3; i < argc; i ++) {
            if (!strcasecmp(argv[i]->ptr, "ex")) exarg = argv[i+1]; 秒
            if (!strcasecmp(argv[i]->ptr, "px")) pxarg = argv[i+1]; 毫秒
        }
        serverAssert(!(exarg && pxarg));

        if (exarg || pxarg) { 存在时间参数
            /* Translate SET [EX seconds][PX milliseconds] to SET and PEXPIREAT */
            将SET [EX seconds][PX milliseconds]格式的命令转化为SET和PEXPIREAT
            buf = catAppendOnlyGenericCommand(buf,3,argv);
            if (exarg)
                buf = catAppendOnlyExpireAtCommand(buf,server.expireCommand,argv[1],
                                                   exarg);
            if (pxarg)
                buf = catAppendOnlyExpireAtCommand(buf,server.pexpireCommand,argv[1],
                                                   pxarg);
        } else {
            buf = catAppendOnlyGenericCommand(buf,argc,argv);
        }
    } else {
        /* All the other commands don't need translation or need the
         * same translation already operated in the command vector
         * for the replication itself. */
所有其他命令不需要转换,或者需要在复制本身的命令向量中已经进行了相同转换的操作。
        buf = catAppendOnlyGenericCommand(buf,argc,argv);
    }

    /* Append to the AOF buffer. This will be flushed on disk just before
     * of re-entering the event loop, so before the client will get a
     * positive reply about the operation performed. */
附加到AOF缓冲区。这将在重新进入事件循环之前刷新到磁盘上,
因此需要客户端将获取一个对于执行操作的正向的回复之前处理。
    if (server.aof_state == AOF_ON)
        server.aof_buf = sdscatlen(server.aof_buf,buf,sdslen(buf));

    /* If a background append only file rewriting is in progress we want to
     * accumulate the differences between the child DB and the current one
     * in a buffer, so that when the child process will do its work we
     * can append the differences to the new append only file. */
如果一个后台AOF写进程正在处理中,我们希望在缓冲区中累积子数据库与当前数据库之间的差异,
以便在子进程完成其写工作时,我们可以将差异附加到新的AOF文件。
    if (server.aof_child_pid != -1)
        aofRewriteBufferAppend((unsigned char*)buf,sdslen(buf));

    sdsfree(buf);
}

/* ----------------------------------------------------------------------------
 * AOF loading  AOF文件加载
 * ------------------------------------------------------------------------- */

/* In Redis commands are always executed in the context of a client, so in
 * order to load the append only file we need to create a fake client. */
redis命令总是在客户端的上下文中执行,因此为了加载AOF文件,我们需要创建一个假的客户端
struct client *createAOFClient(void) {
    struct client *c = zmalloc(sizeof(*c)); 给客户端分配空间

    selectDb(c,0);
    c->id = CLIENT_ID_AOF; /* So modules can identify it's the AOF client. */ 
    设置一个特殊的客户端ID,这样方便模块识别
    c->conn = NULL;
    c->name = NULL;
    c->querybuf = sdsempty();
    c->querybuf_peak = 0;
    c->argc = 0;
    c->argv = NULL;
    c->bufpos = 0;
    c->flags = 0;
    c->btype = BLOCKED_NONE;
    /* We set the fake client as a slave waiting for the synchronization
     * so that Redis will not try to send replies to this client. */
我们将假客户端设置为等待同步的从属客户端,以便Redis不会尝试向该客户端发送回复。
    c->replstate = SLAVE_STATE_WAIT_BGSAVE_START;
    c->reply = listCreate();
    c->reply_bytes = 0;
    c->obuf_soft_limit_reached_time = 0;
    c->watched_keys = listCreate();
    c->peerid = NULL;
    c->resp = 2;
    c->user = NULL;
    listSetFreeMethod(c->reply,freeClientReplyValue);
    listSetDupMethod(c->reply,dupClientReplyValue);
    initClientMultiState(c);
    return c;
}

释放假客户端的参数
void freeFakeClientArgv(struct client *c) {
    int j;

    for (j = 0; j < c->argc; j++)
        decrRefCount(c->argv[j]);
    zfree(c->argv);
}
释放假客户端(主要是释放创建时候申请的资源)
void freeFakeClient(struct client *c) {
    sdsfree(c->querybuf);
    listRelease(c->reply);
    listRelease(c->watched_keys);
    freeClientMultiState(c);
    zfree(c);
}

/* Replay the append log file. On success C_OK is returned. On non fatal
 * error (the append only file is zero-length) C_ERR is returned. On
 * fatal error an error message is logged and the program exists. */
重放AOF文件,如果成功就返回C_OK.非致命错误(AOF文件长度为0),就返回C_ERR.
如果是致命错误,就记录错误信息,退出程序
int loadAppendOnlyFile(char *filename) {
    struct client *fakeClient;
    FILE *fp = fopen(filename,"r"); 以只读方式打开AOF文件
    struct redis_stat sb;
    int old_aof_state = server.aof_state;
    long loops = 0;
    off_t valid_up_to = 0; /* Offset of latest well-formed command loaded. */ 加载的最新格式正确的命令的偏移量
    off_t valid_before_multi = 0; /* Offset before MULTI command loaded. */ 加载事务命令之前的偏移量。

    if (fp == NULL) { 加载文件打开失败
        serverLog(LL_WARNING,"Fatal error: can't open the append log file for reading: %s",strerror(errno));
        exit(1);
    }

    /* Handle a zero-length AOF file as a special case. An empty AOF file
     * is a valid AOF because an empty server with AOF enabled will create
     * a zero length file at startup, that will remain like that if no write
     * operation is received. */
处理一个空的AOF文件是一种特殊情况。因为一个空的AOF文件是有效的AOF文件,
当空的服务器启动时候允许AOF就会产生一个空的AOF文件,如果没有收到写入操作,则仍将保持这种状态
    if (fp && redis_fstat(fileno(fp),&sb) != -1 && sb.st_size == 0) {
        server.aof_current_size = 0;
        server.aof_fsync_offset = server.aof_current_size;
        fclose(fp);
        return C_ERR;
    }

    /* Temporarily disable AOF, to prevent EXEC from feeding a MULTI
     * to the same file we're about to read. */
临时关闭AOF,防止事务向我们要读取的文件执行命令
    server.aof_state = AOF_OFF;

    fakeClient = createAOFClient(); 创建假客户端
    startLoadingFile(fp, filename, RDBFLAGS_AOF_PREAMBLE); 开始加载

    /* Check if this AOF file has an RDB preamble. In that case we need to
     * load the RDB file and later continue loading the AOF tail. */
检查AOF文件是否有一个RDB格式的前缀。在这种情况下,我们需要先加载RDB文件,然后再加载后面的AOF文件
    char sig[5]; /* "REDIS" */ RE
    if (fread(sig,1,5,fp) != 5 || memcmp(sig,"REDIS",5) != 0) { 判断是否是RDB文件格式
        /* No RDB preamble, seek back at 0 offset. */ 非RDB前缀格式,退回到文件开始
        if (fseek(fp,0,SEEK_SET) == -1) goto readerr;
    } else {
        /* RDB preamble. Pass loading the RDB functions. */ RDB格式。 通过加载RDB文件(混合模式的情况)
        rio rdb;

        serverLog(LL_NOTICE,"Reading RDB preamble from AOF file...");
        if (fseek(fp,0,SEEK_SET) == -1) goto readerr; 
        rioInitWithFile(&rdb,fp);
        if (rdbLoadRio(&rdb,RDBFLAGS_AOF_PREAMBLE,NULL) != C_OK) { 加载rdb前缀不成功的情况
            serverLog(LL_WARNING,"Error reading the RDB preamble of the AOF file, AOF loading aborted");
            goto readerr;
        } else {
            serverLog(LL_NOTICE,"Reading the remaining AOF tail...");
        }
    }

    /* Read the actual AOF file, in REPL format, command by command. */
读取实际的AOF文件,用 Read-Eval-Print-Loop(交互式解释器)  格式,一个命令接着一个命令。
    while(1) {
        int argc, j;
        unsigned long len;
        robj **argv;
        char buf[128];
        sds argsds;
        struct redisCommand *cmd;

        /* Serve the clients from time to time */ 随时为客户服务
        if (!(loops++ % 1000)) {  循环每一千次的时候
            loadingProgress(ftello(fp));
            processEventsWhileBlocked();
            processModuleLoadingProgressEvent(1);
        }

        if (fgets(buf,sizeof(buf),fp) == NULL) {
            if (feof(fp)) 文件结尾
                break;
            else
                goto readerr;
        }
        if (buf[0] != '*') goto fmterr;
        if (buf[1] == '\0') goto readerr; 
        argc = atoi(buf+1);参数的个数
        if (argc < 1) goto fmterr;

        /* Load the next command in the AOF as our fake client
         * argv. */ 将AOF中的下一个命令作为伪客户端参数加载
        argv = zmalloc(sizeof(robj*)*argc);
        fakeClient->argc = argc;
        fakeClient->argv = argv;

        for (j = 0; j < argc; j++) {
            /* Parse the argument len. */ 分析参数长度
            char *readres = fgets(buf,sizeof(buf),fp);
            if (readres == NULL || buf[0] != '$') { 参数为空,或者开始的不是字符串块
                fakeClient->argc = j; /* Free up to j-1. */  释放到J-1的位置,后面没有没有参数了
                freeFakeClientArgv(fakeClient);
                if (readres == NULL) 如果是为空,表示读取数据出现错误
                    goto readerr;
                else
                    goto fmterr; 否则就是格式有问题
            }
            len = strtol(buf+1,NULL,10);字符串转长整形,以10为基 buf[0]是$符号

            /* Read it into a string object. */ 
             将字符读取到字符串对象
            argsds = sdsnewlen(SDS_NOINIT,len);
            if (len && fread(argsds,len,1,fp) == 0) { 读取一个长度为len的数据块到argsds, 失败的情况
                sdsfree(argsds); 释放申请的资源
                fakeClient->argc = j; /* Free up to j-1. */ 
                freeFakeClientArgv(fakeClient);
                goto readerr;
            }
            argv[j] = createObject(OBJ_STRING,argsds);创建sds对象

            /* Discard CRLF. */ 去掉回车换行
            if (fread(buf,2,1,fp) == 0) {
                fakeClient->argc = j+1; /* Free up to j. */
                freeFakeClientArgv(fakeClient);
                goto readerr;
            }
        }

        /* Command lookup */ 命令查询
        cmd = lookupCommand(argv[0]->ptr);
        if (!cmd) { 不认识的命令
            serverLog(LL_WARNING,
                "Unknown command '%s' reading the append only file",
                (char*)argv[0]->ptr);
            exit(1);
        }

        if (cmd == server.multiCommand) valid_before_multi = valid_up_to; 事务的情况下,偏移量

        /* Run the command in the context of a fake client */ 在假客户端的上下文中运行该命令
        fakeClient->cmd = fakeClient->lastcmd = cmd;
        if (fakeClient->flags & CLIENT_MULTI &&
            fakeClient->cmd->proc != execCommand) 是事务且不在执行
        {
            queueMultiCommand(fakeClient); 将待执行命令加入到事务队列
        } else {
            cmd->proc(fakeClient); 执行命令
        }

        /* The fake client should not have a reply */ 假客户端不需要回复
        serverAssert(fakeClient->bufpos == 0 &&
                     listLength(fakeClient->reply) == 0);

        /* The fake client should never get blocked */ 假客户端不会阻塞
        serverAssert((fakeClient->flags & CLIENT_BLOCKED) == 0);

        /* Clean up. Command code may have changed argv/argc so we use the
         * argv/argc of the client instead of the local variables. */
清理工作。命令代码可能已更改argv/argc参数,因此我们使用客户端的argv/argc,而不是本地变量。
        freeFakeClientArgv(fakeClient); 
        fakeClient->cmd = NULL;
        if (server.aof_load_truncated) valid_up_to = ftello(fp); 返回文件当前的指向位置
        if (server.key_load_delay) 存在延迟加载测试时间
            usleep(server.key_load_delay);延时 单位微秒
    }

    /* This point can only be reached when EOF is reached without errors.
     * If the client is in the middle of a MULTI/EXEC, handle it as it was
     * a short read, even if technically the protocol is correct: we want
     * to remove the unprocessed tail and continue. */
到这里就意味着文件结束了而且没有出错。如果客户端在事务中间,即使技术上看协议是正确的,也需要尽快按照只读处理它,
我们希望移除未处理的尾部并继续。
    if (fakeClient->flags & CLIENT_MULTI) {
        serverLog(LL_WARNING,
            "Revert incomplete MULTI/EXEC transaction in AOF file");
        valid_up_to = valid_before_multi;
        goto uxeof;
    }

loaded_ok: /* DB loaded, cleanup and return C_OK to the caller. */ 成功加载数据库,清理并且返回成功给调用者
    fclose(fp);
    freeFakeClient(fakeClient);
    server.aof_state = old_aof_state;
    stopLoading(1);
    aofUpdateCurrentSize();
    server.aof_rewrite_base_size = server.aof_current_size;
    server.aof_fsync_offset = server.aof_current_size;
    return C_OK;

readerr: /* Read error. If feof(fp) is true, fall through to unexpected EOF. */
读取错误。如果feof(fp)为真(文件结束,返回非0),则直接转到文件结束块 uxeof:
    if (!feof(fp)) { 文件还没有结束
        if (fakeClient) freeFakeClient(fakeClient); /* avoid valgrind warning */ 避免valgrind工具报警
        fclose(fp);关闭文件
        serverLog(LL_WARNING,"Unrecoverable error reading the append only file: %s", strerror(errno));
        exit(1); 退出程序
    }

uxeof: /* Unexpected AOF end of file. */ 文件结束的情况
    if (server.aof_load_truncated) { 允许文件截断,这时候需要给出警告信息
        serverLog(LL_WARNING,"!!! Warning: short read while loading the AOF file !!!");
        serverLog(LL_WARNING,"!!! Truncating the AOF at offset %llu !!!",
            (unsigned long long) valid_up_to);
        if (valid_up_to == -1 || truncate(filename,valid_up_to) == -1) { 修剪文件大小为valid_up_to失败
            if (valid_up_to == -1) { 
                serverLog(LL_WARNING,"Last valid command offset is invalid");
            } else {
                serverLog(LL_WARNING,"Error truncating the AOF file: %s",
                    strerror(errno));
            }
        } else {
            /* Make sure the AOF file descriptor points to the end of the
             * file after the truncate call. */
确保AOF文件描述符在truncate调用后指向文件的末尾。
            if (server.aof_fd != -1 && lseek(server.aof_fd,0,SEEK_END) == -1) { 移动到文件末尾失败
                serverLog(LL_WARNING,"Can't seek the end of the AOF file: %s",
                    strerror(errno));
            } else {
                serverLog(LL_WARNING,
                    "AOF loaded anyway because aof-load-truncated is enabled");
                goto loaded_ok;
            }
        }
    }
    if (fakeClient) freeFakeClient(fakeClient); /* avoid valgrind warning */ 避免检测工具valgrind报警
    fclose(fp);
    serverLog(LL_WARNING,"Unexpected end of file reading the append only file. You can: 1) Make a backup of your AOF file, then use ./redis-check-aof --fix <filename>. 2) Alternatively you can set the 'aof-load-truncated' configuration option to yes and restart the server.");
    exit(1);

fmterr: /* Format error. */ 格式错误
    if (fakeClient) freeFakeClient(fakeClient); /* avoid valgrind warning */
    fclose(fp);
    serverLog(LL_WARNING,"Bad file format reading the append only file: make a backup of your AOF file, then use ./redis-check-aof --fix <filename>");
    exit(1);
}

/* ----------------------------------------------------------------------------
 * AOF rewrite AOF重写
 * ------------------------------------------------------------------------- */

/* Delegate writing an object to writing a bulk string or bulk long long.
 * This is not placed in rio.c since that adds the server.h dependency. */
将写入对象委托给写入字符串块或者长整型块,这个没有放在文件rio.c中,是因为需要依赖于server.h文件
(但是我们看到这个版本的rio.c中是包含server.h,所以应该是注释没有及时修改)
int rioWriteBulkObject(rio *r, robj *obj) {
    /* Avoid using getDecodedObject to help copy-on-write (we are often
     * in a child process when this function is called). */
避免使用getDecodedObject来帮助写时复制
(当这个函数被调用时我们总是打开一个子进程)
    if (obj->encoding == OBJ_ENCODING_INT) {
        return rioWriteBulkLongLong(r,(long)obj->ptr); 写入长整型
    } else if (sdsEncodedObject(obj)) {
        return rioWriteBulkString(r,obj->ptr,sdslen(obj->ptr)); 写入sds字符串
    } else {
        serverPanic("Unknown string encoding");
    }
}

/* Emit the commands needed to rebuild a list object.
 * The function returns 0 on error, 1 on success. */
发出需要重建一个列表对象的命令。这个函数错误返回0,成功返回1.
int rewriteListObject(rio *r, robj *key, robj *o) {
    long long count = 0, items = listTypeLength(o); 列表元素个数
 
    if (o->encoding == OBJ_ENCODING_QUICKLIST) {压缩列表
        quicklist *list = o->ptr;
        quicklistIter *li = quicklistGetIterator(list, AL_START_HEAD);从头部开始迭代
        quicklistEntry entry;

        while (quicklistNext(li,&entry)) {
            if (count == 0) {
            命令的参数个数是否超过AOF_REWRITE_ITEMS_PER_CMD个,超过需要截为AOF_REWRITE_ITEMS_PER_CMD
                int cmd_items = (items > AOF_REWRITE_ITEMS_PER_CMD) ? 
                    AOF_REWRITE_ITEMS_PER_CMD : items;
                if (rioWriteBulkCount(r,'*',2+cmd_items) == 0) return 0; 写入数量
                if (rioWriteBulkString(r,"RPUSH",5) == 0) return 0;
                if (rioWriteBulkObject(r,key) == 0) return 0;
            }

            if (entry.value) { 存在字符串
                if (rioWriteBulkString(r,(char*)entry.value,entry.sz) == 0) return 0; 写入字符串
            } else { 整型
                if (rioWriteBulkLongLong(r,entry.longval) == 0) return 0; 写入整数
            }
            if (++count == AOF_REWRITE_ITEMS_PER_CMD) count = 0; 每AOF_REWRITE_ITEMS_PER_CMD为一组
            items--;
        }
        quicklistReleaseIterator(li);释放迭代器
    } else {
        serverPanic("Unknown list encoding");
    }
    return 1;
}

/* Emit the commands needed to rebuild a set object.
 * The function returns 0 on error, 1 on success. */
发出重建集合对象所需的命令。这个函数错误返回0,成功返回1.
int rewriteSetObject(rio *r, robj *key, robj *o) {
    long long count = 0, items = setTypeSize(o);

    if (o->encoding == OBJ_ENCODING_INTSET) { 整数编码
        int ii = 0;
        int64_t llval;

        while(intsetGet(o->ptr,ii++,&llval)) {
            if (count == 0) {
                int cmd_items = (items > AOF_REWRITE_ITEMS_PER_CMD) ? 最多64个元素,多个话就需要分开成多个命令写入
                    AOF_REWRITE_ITEMS_PER_CMD : items;

                if (rioWriteBulkCount(r,'*',2+cmd_items) == 0) return 0;  *是前缀 \r\n是结尾 中间的才是计数
                if (rioWriteBulkString(r,"SADD",4) == 0) return 0; 
Sadd 命令将一个或多个成员元素加入到集合中,已经存在于集合的成员元素将被忽略。
假如集合 key 不存在,则创建一个只包含添加的元素作成员的集合。当集合 key 不是集合类型时,返回一个错误。
                if (rioWriteBulkObject(r,key) == 0) return 0;
            }
            if (rioWriteBulkLongLong(r,llval) == 0) return 0; 写入整数值
            if (++count == AOF_REWRITE_ITEMS_PER_CMD) count = 0;
            items--;
        }
    } else if (o->encoding == OBJ_ENCODING_HT) { 字典编码
        dictIterator *di = dictGetIterator(o->ptr);
        dictEntry *de;

        while((de = dictNext(di)) != NULL) {
            sds ele = dictGetKey(de);
            if (count == 0) {
                int cmd_items = (items > AOF_REWRITE_ITEMS_PER_CMD) ?
                    AOF_REWRITE_ITEMS_PER_CMD : items;

                if (rioWriteBulkCount(r,'*',2+cmd_items) == 0) return 0;
                if (rioWriteBulkString(r,"SADD",4) == 0) return 0;
                if (rioWriteBulkObject(r,key) == 0) return 0;
            }
            if (rioWriteBulkString(r,ele,sdslen(ele)) == 0) return 0;写入字符串
            if (++count == AOF_REWRITE_ITEMS_PER_CMD) count = 0;
            items--;
        }
        dictReleaseIterator(di);
    } else {
        serverPanic("Unknown set encoding");
    }
    return 1;
}

/* Emit the commands needed to rebuild a sorted set object.
 * The function returns 0 on error, 1 on success. */
发出重建排序集对象所需的命令。这个函数错误返回0,成功返回1.
int rewriteSortedSetObject(rio *r, robj *key, robj *o) {
    long long count = 0, items = zsetLength(o);

    if (o->encoding == OBJ_ENCODING_ZIPLIST) { 压缩列表
        unsigned char *zl = o->ptr;
        unsigned char *eptr, *sptr;
        unsigned char *vstr;
        unsigned int vlen;
        long long vll;
        double score;

        eptr = ziplistIndex(zl,0);指向压缩列表的头
        serverAssert(eptr != NULL);
        sptr = ziplistNext(zl,eptr); 获取指向下个元素的指针
        serverAssert(sptr != NULL);

        while (eptr != NULL) { 当前元素非空
            serverAssert(ziplistGet(eptr,&vstr,&vlen,&vll));确认元素有值
            score = zzlGetScore(sptr);获取排序值

            if (count == 0) {
                int cmd_items = (items > AOF_REWRITE_ITEMS_PER_CMD) ?
                    AOF_REWRITE_ITEMS_PER_CMD : items;

                if (rioWriteBulkCount(r,'*',2+cmd_items*2) == 0) return 0;
                if (rioWriteBulkString(r,"ZADD",4) == 0) return 0;
                if (rioWriteBulkObject(r,key) == 0) return 0;
            }
            if (rioWriteBulkDouble(r,score) == 0) return 0;
            if (vstr != NULL) {
                if (rioWriteBulkString(r,(char*)vstr,vlen) == 0) return 0;
            } else {
                if (rioWriteBulkLongLong(r,vll) == 0) return 0;
            }
            zzlNext(zl,&eptr,&sptr);
            if (++count == AOF_REWRITE_ITEMS_PER_CMD) count = 0;
            items--;
        }
    } else if (o->encoding == OBJ_ENCODING_SKIPLIST) {
        zset *zs = o->ptr;
        dictIterator *di = dictGetIterator(zs->dict);
        dictEntry *de;

        while((de = dictNext(di)) != NULL) {
            sds ele = dictGetKey(de);
            double *score = dictGetVal(de);

            if (count == 0) {
                int cmd_items = (items > AOF_REWRITE_ITEMS_PER_CMD) ?
                    AOF_REWRITE_ITEMS_PER_CMD : items;

                if (rioWriteBulkCount(r,'*',2+cmd_items*2) == 0) return 0;
                if (rioWriteBulkString(r,"ZADD",4) == 0) return 0;
                if (rioWriteBulkObject(r,key) == 0) return 0;
            }
            if (rioWriteBulkDouble(r,*score) == 0) return 0; 写入排序数值
            if (rioWriteBulkString(r,ele,sdslen(ele)) == 0) return 0; 写入字符串本身
            if (++count == AOF_REWRITE_ITEMS_PER_CMD) count = 0;
            items--;
        }
        dictReleaseIterator(di);
    } else {
        serverPanic("Unknown sorted zset encoding");
    }
    return 1;
}

/* Write either the key or the value of the currently selected item of a hash.
 * The 'hi' argument passes a valid Redis hash iterator.
 * The 'what' filed specifies if to write a key or a value and can be
 * either OBJ_HASH_KEY or OBJ_HASH_VALUE.
写入散列中当前选定项的键或值。“hi”参数传递有效的Redis哈希迭代器。
“what”字段指定是写入键还是写入值,可以是OBJ_HASH_KEY或OBJ_HASH_VALUE。
 * The function returns 0 on error, non-zero on success. */
返回0表示函数执行错误,返回非0表示成功
static int rioWriteHashIteratorCursor(rio *r, hashTypeIterator *hi, int what) {
    if (hi->encoding == OBJ_ENCODING_ZIPLIST) { 压缩列表
        unsigned char *vstr = NULL;
        unsigned int vlen = UINT_MAX;
        long long vll = LLONG_MAX;

        hashTypeCurrentFromZiplist(hi, what, &vstr, &vlen, &vll);
        if (vstr) 字符串
            return rioWriteBulkString(r, (char*)vstr, vlen);
        else  整型
            return rioWriteBulkLongLong(r, vll);
    } else if (hi->encoding == OBJ_ENCODING_HT) { 哈希字典编码
        sds value = hashTypeCurrentFromHashTable(hi, what);
        return rioWriteBulkString(r, value, sdslen(value));
    }

    serverPanic("Unknown hash encoding");
    return 0;
}

/* Emit the commands needed to rebuild a hash object.
 * The function returns 0 on error, 1 on success. */
发出重建哈希对象所需的命令。这个函数错误返回0,成功返回1.
int rewriteHashObject(rio *r, robj *key, robj *o) {
    hashTypeIterator *hi;
    long long count = 0, items = hashTypeLength(o);

    hi = hashTypeInitIterator(o); 初始化迭代器
    while (hashTypeNext(hi) != C_ERR) { 
        if (count == 0) {
            int cmd_items = (items > AOF_REWRITE_ITEMS_PER_CMD) ?
                AOF_REWRITE_ITEMS_PER_CMD : items; 一个命令有规定的最大参数个数,这里是64

            if (rioWriteBulkCount(r,'*',2+cmd_items*2) == 0) return 0;
            if (rioWriteBulkString(r,"HMSET",5) == 0) return 0;
            if (rioWriteBulkObject(r,key) == 0) return 0;
        }

        if (rioWriteHashIteratorCursor(r, hi, OBJ_HASH_KEY) == 0) return 0; 写入键
        if (rioWriteHashIteratorCursor(r, hi, OBJ_HASH_VALUE) == 0) return 0; 写入值
        if (++count == AOF_REWRITE_ITEMS_PER_CMD) count = 0;
        items--;
    }

    hashTypeReleaseIterator(hi);

    return 1;
}

/* Helper for rewriteStreamObject() that generates a bulk string into the
 * AOF representing the ID 'id'. */
函数rewriteStreamObject的帮助程序,该程序产生一个大块的字符串给AOF,用来表示序列号id
int rioWriteBulkStreamID(rio *r,streamID *id) {
    int retval;

    sds replyid = sdscatfmt(sdsempty(),"%U-%U",id->ms,id->seq);
    retval = rioWriteBulkString(r,replyid,sdslen(replyid));
    sdsfree(replyid);
    return retval;
}

/* Helper for rewriteStreamObject(): emit the XCLAIM needed in order to
 * add the message described by 'nack' having the id 'rawid', into the pending
 * list of the specified consumer. All this in the context of the specified
 * key and group. */
函数rewriteStreamObject的辅助程序:发出所需的XCLAIM命令,
以便将id为“rawid”的“nack”描述的消息添加到指定使用者的挂起列表中。所有这些都在指定键和组的上下文中进行。
int rioWriteStreamPendingEntry(rio *r, robj *key, const char *groupname, size_t groupname_len, streamConsumer *consumer, unsigned char *rawid, streamNACK *nack) {
     /* XCLAIM <key> <group> <consumer> 0 <id> TIME <milliseconds-unix-time>
               RETRYCOUNT <count> JUSTID FORCE. */
    streamID id;
    streamDecodeID(rawid,&id); 获取id ,写入XCLAIM命令
    if (rioWriteBulkCount(r,'*',12) == 0) return 0;
    if (rioWriteBulkString(r,"XCLAIM",6) == 0) return 0;
    if (rioWriteBulkObject(r,key) == 0) return 0;
    if (rioWriteBulkString(r,groupname,groupname_len) == 0) return 0;
    if (rioWriteBulkString(r,consumer->name,sdslen(consumer->name)) == 0) return 0;
    if (rioWriteBulkString(r,"0",1) == 0) return 0;
    if (rioWriteBulkStreamID(r,&id) == 0) return 0;
    if (rioWriteBulkString(r,"TIME",4) == 0) return 0;
    if (rioWriteBulkLongLong(r,nack->delivery_time) == 0) return 0;
    if (rioWriteBulkString(r,"RETRYCOUNT",10) == 0) return 0;
    if (rioWriteBulkLongLong(r,nack->delivery_count) == 0) return 0;
    if (rioWriteBulkString(r,"JUSTID",6) == 0) return 0;
    if (rioWriteBulkString(r,"FORCE",5) == 0) return 0;
    return 1;
}

/* Emit the commands needed to rebuild a stream object.
 * The function returns 0 on error, 1 on success. */
发出重建流对象所需的命令。成功返回1失败返回0
int rewriteStreamObject(rio *r, robj *key, robj *o) {
    stream *s = o->ptr;
    streamIterator si;
    streamIteratorStart(&si,s,NULL,NULL,0);
    streamID id;
    int64_t numfields;

    if (s->length) {
        /* Reconstruct the stream data using XADD commands. */使用XADD命令重构流数据
        while(streamIteratorGetID(&si,&id,&numfields)) {
            /* Emit a two elements array for each item. The first is
             * the ID, the second is an array of field-value pairs. */
为每个条目发出一个二维数组。第一个是ID,第二个是键值对的数组
            /* Emit the XADD <key> <id> ...fields... command. */
            if (rioWriteBulkCount(r,'*',3+numfields*2) == 0) return 0;
            if (rioWriteBulkString(r,"XADD",4) == 0) return 0;
            if (rioWriteBulkObject(r,key) == 0) return 0;
            if (rioWriteBulkStreamID(r,&id) == 0) return 0;
            while(numfields--) {
                unsigned char *field, *value;
                int64_t field_len, value_len;
                streamIteratorGetField(&si,&field,&value,&field_len,&value_len);
                if (rioWriteBulkString(r,(char*)field,field_len) == 0) return 0;
                if (rioWriteBulkString(r,(char*)value,value_len) == 0) return 0;
            }
        }
    } else {
        /* Use the XADD MAXLEN 0 trick to generate an empty stream if
         * the key we are serializing is an empty string, which is possible
         * for the Stream type. */
如果要序列化的键是空字符串,则使用XADD MAXLEN 0技巧生成空流,这种情况对于流类型是可能存在的。
        id.ms = 0; id.seq = 1; 
        if (rioWriteBulkCount(r,'*',7) == 0) return 0;
        if (rioWriteBulkString(r,"XADD",4) == 0) return 0;
        if (rioWriteBulkObject(r,key) == 0) return 0;
        if (rioWriteBulkString(r,"MAXLEN",6) == 0) return 0;
        if (rioWriteBulkString(r,"0",1) == 0) return 0;
        if (rioWriteBulkStreamID(r,&id) == 0) return 0;
        if (rioWriteBulkString(r,"x",1) == 0) return 0;
        if (rioWriteBulkString(r,"y",1) == 0) return 0;
    }

    /* Append XSETID after XADD, make sure lastid is correct,
     * in case of XDEL lastid. */
在命令XADD后面追加命令XSETID,如果是XDEL 最后一个id的情况,确保最后一个id是正确的。
    if (rioWriteBulkCount(r,'*',3) == 0) return 0;
    if (rioWriteBulkString(r,"XSETID",6) == 0) return 0;
    if (rioWriteBulkObject(r,key) == 0) return 0;
    if (rioWriteBulkStreamID(r,&s->last_id) == 0) return 0;


    /* Create all the stream consumer groups. */ 创建所有流消费者的组
    if (s->cgroups) {
        raxIterator ri;
        raxStart(&ri,s->cgroups);
        raxSeek(&ri,"^",NULL,0);
        while(raxNext(&ri)) {
            streamCG *group = ri.data;
            /* Emit the XGROUP CREATE in order to create the group. */
            为了创建组发出XGROUP CREATE 命令
            if (rioWriteBulkCount(r,'*',5) == 0) return 0;
            if (rioWriteBulkString(r,"XGROUP",6) == 0) return 0;
            if (rioWriteBulkString(r,"CREATE",6) == 0) return 0;
            if (rioWriteBulkObject(r,key) == 0) return 0;
            if (rioWriteBulkString(r,(char*)ri.key,ri.key_len) == 0) return 0;
            if (rioWriteBulkStreamID(r,&group->last_id) == 0) return 0;

            /* Generate XCLAIMs for each consumer that happens to
             * have pending entries. Empty consumers have no semantical
             * value so they are discarded. */
为每个拥有扥带实体的消费者产生XCLAIM命令,空的消费者没有等待的实体,所以被取消了。
            raxIterator ri_cons;
            raxStart(&ri_cons,group->consumers);
            raxSeek(&ri_cons,"^",NULL,0);
            while(raxNext(&ri_cons)) {
                streamConsumer *consumer = ri_cons.data;
                /* For the current consumer, iterate all the PEL entries
                 * to emit the XCLAIM protocol. */
对于当前的消费者,迭代所有的等待实体列表,对其发出XCLAIM协议
                raxIterator ri_pel;
                raxStart(&ri_pel,consumer->pel);
                raxSeek(&ri_pel,"^",NULL,0);
                while(raxNext(&ri_pel)) {
                    streamNACK *nack = ri_pel.data;
                    if (rioWriteStreamPendingEntry(r,key,(char*)ri.key,
                                                   ri.key_len,consumer,
                                                   ri_pel.key,nack) == 0)
                    {
                        return 0;
                    }
                }
                raxStop(&ri_pel);
            }
            raxStop(&ri_cons);
        }
        raxStop(&ri);
    }

    streamIteratorStop(&si);
    return 1;
}

/* Call the module type callback in order to rewrite a data type
 * that is exported by a module and is not handled by Redis itself.
 * The function returns 0 on error, 1 on success. */
调用模块类型回调以重写数据类型,该数据类型由模块导出且不由Redis本身处理,函数出错时返回0,成功时返回1。
int rewriteModuleObject(rio *r, robj *key, robj *o) {
    RedisModuleIO io;
    moduleValue *mv = o->ptr;
    moduleType *mt = mv->type;
    moduleInitIOContext(io,mt,r,key);
    mt->aof_rewrite(&io,key,mv->value); 模块的写函数
    if (io.ctx) {
        moduleFreeContext(io.ctx);
        zfree(io.ctx);
    }
    return io.error ? 0 : 1;
}

/* This function is called by the child rewriting the AOF file to read
 * the difference accumulated from the parent into a buffer, that is
 * concatenated at the end of the rewrite. */
重写AOF文件的子进程调用此函数,以将父进程累积的差异读取到缓冲区中,该缓冲区在重写结束时连接。
(就是将子进程写AOF文件的时候,因为程序还在运行,所以父进程还会制造出差异,这些差异也要写入AOF)
ssize_t aofReadDiffFromParent(void) {
    char buf[65536]; /* Default pipe buffer size on most Linux systems. */ 大多数Linux系统上的默认管道缓冲区大小。
    ssize_t nread, total = 0;

    while ((nread =
            read(server.aof_pipe_read_data_from_parent,buf,sizeof(buf))) > 0) {
        server.aof_child_diff = sdscatlen(server.aof_child_diff,buf,nread);
        total += nread;
    }
    return total;
}
重写AOFrio
int rewriteAppendOnlyFileRio(rio *aof) {
    dictIterator *di = NULL;
    dictEntry *de;
    size_t processed = 0;
    int j;

    for (j = 0; j < server.dbnum; j++) {
        char selectcmd[] = "*2\r\n$6\r\nSELECT\r\n"; 选择命令
        redisDb *db = server.db+j;
        dict *d = db->dict;
        if (dictSize(d) == 0) continue; 该数据库元素个数为0
        di = dictGetSafeIterator(d);

        /* SELECT the new DB */ 选择新的数据库
        if (rioWrite(aof,selectcmd,sizeof(selectcmd)-1) == 0) goto werr; 选择命令
        if (rioWriteBulkLongLong(aof,j) == 0) goto werr; 具体的库 

        /* Iterate this DB writing every entry */
        迭代此数据库并写入每个实体
        while((de = dictNext(di)) != NULL) {
            sds keystr;
            robj key, *o;
            long long expiretime;

            keystr = dictGetKey(de);
            o = dictGetVal(de);
            initStaticStringObject(key,keystr);

            expiretime = getExpire(db,&key);

            /* Save the key and associated value */ 保存键和关联的值
            if (o->type == OBJ_STRING) {
                /* Emit a SET command */ 发出一个SET命令
                char cmd[]="*3\r\n$3\r\nSET\r\n";
                if (rioWrite(aof,cmd,sizeof(cmd)-1) == 0) goto werr;
                /* Key and value */键和值
                if (rioWriteBulkObject(aof,&key) == 0) goto werr;
                if (rioWriteBulkObject(aof,o) == 0) goto werr;
            } else if (o->type == OBJ_LIST) {
                if (rewriteListObject(aof,&key,o) == 0) goto werr;
            } else if (o->type == OBJ_SET) {
                if (rewriteSetObject(aof,&key,o) == 0) goto werr;
            } else if (o->type == OBJ_ZSET) {
                if (rewriteSortedSetObject(aof,&key,o) == 0) goto werr;
            } else if (o->type == OBJ_HASH) {
                if (rewriteHashObject(aof,&key,o) == 0) goto werr;
            } else if (o->type == OBJ_STREAM) {
                if (rewriteStreamObject(aof,&key,o) == 0) goto werr;
            } else if (o->type == OBJ_MODULE) {
                if (rewriteModuleObject(aof,&key,o) == 0) goto werr;
            } else {
                serverPanic("Unknown object type");
            }
            /* Save the expire time */ 保存过期时间
            if (expiretime != -1) {
                char cmd[]="*3\r\n$9\r\nPEXPIREAT\r\n";
                if (rioWrite(aof,cmd,sizeof(cmd)-1) == 0) goto werr;
                if (rioWriteBulkObject(aof,&key) == 0) goto werr;
                if (rioWriteBulkLongLong(aof,expiretime) == 0) goto werr;
            }
            /* Read some diff from the parent process from time to time. */
            不时从父进程读取一些差异。
            if (aof->processed_bytes > processed+AOF_READ_DIFF_INTERVAL_BYTES) {
                processed = aof->processed_bytes;
                aofReadDiffFromParent();
            }
        }
        dictReleaseIterator(di);
        di = NULL;
    }
    return C_OK;

werr:
    if (di) dictReleaseIterator(di);
    return C_ERR;
}

/* Write a sequence of commands able to fully rebuild the dataset into
 * "filename". Used both by REWRITEAOF and BGREWRITEAOF.
编写一系列命令,以将数据集完全重建的写入到"filename"。REWRITEAOF和BGREWRITEAOF都使用这个函数。
 * In order to minimize the number of commands needed in the rewritten
 * log Redis uses variadic commands when possible, such as RPUSH, SADD
 * and ZADD. However at max AOF_REWRITE_ITEMS_PER_CMD items per time
 * are inserted using a single command. */
为了尽量减少重写日志中所需的命令数量,Redis尽可能使用可变命令,如RPUSH、SADD和ZADD。
但是,但是,每次最多使用一个命令插入AOF_REWRITE_ITEMS_PER_CMD(64)项。
int rewriteAppendOnlyFile(char *filename) {
    rio aof;
    FILE *fp;
    char tmpfile[256];
    char byte;

    /* Note that we have to use a different temp name here compared to the
     * one used by rewriteAppendOnlyFileBackground() function. */
请注意,与rewriteAppendOnlyFileBackground函数使用的临时名称相比,我们必须在此处使用不同的临时名称。
    snprintf(tmpfile,256,"temp-rewriteaof-%d.aof", (int) getpid());
    fp = fopen(tmpfile,"w");
    if (!fp) {
        serverLog(LL_WARNING, "Opening the temp file for AOF rewrite in rewriteAppendOnlyFile(): %s", strerror(errno));
        return C_ERR;
    }

    server.aof_child_diff = sdsempty();
    rioInitWithFile(&aof,fp);

    if (server.aof_rewrite_incremental_fsync)
        rioSetAutoSync(&aof,REDIS_AUTOSYNC_BYTES);设置自动刷新的大小

    startSaving(RDBFLAGS_AOF_PREAMBLE); 加载/保存RDB作为AOF前导

    if (server.aof_use_rdb_preamble) { aof的前面部分是否是rdb格式
        int error;
        if (rdbSaveRio(&aof,&error,RDBFLAGS_AOF_PREAMBLE,NULL) == C_ERR) {
            errno = error;
            goto werr;
        }
    } else { aof格式
        if (rewriteAppendOnlyFileRio(&aof) == C_ERR) goto werr;
    }

    /* Do an initial slow fsync here while the parent is still sending
     * data, in order to make the next final fsync faster. */
在父节点仍在发送数据时,在此处执行初始慢速fsync,以使下一个最终fsync更快。
    if (fflush(fp) == EOF) goto werr; 写到内核
    if (fsync(fileno(fp)) == -1) goto werr;  写到磁盘

    /* Read again a few times to get more data from the parent.
     * We can't read forever (the server may receive data from clients
     * faster than it is able to send data to the child), so we try to read
     * some more data in a loop as soon as there is a good chance more data
     * will come. If it looks like we are wasting time, we abort (this
     * happens after 20 ms without new data). */
再次读取几次以从父进程获取更多数据。
我们不能一直读取数据(服务器从客户端接收数据的速度可能快于它向子服务器发送数据的速度),
因此,一旦有机会获得更多数据,我们就尝试在循环中读取更多数据。
如果实际上我们在浪费时间(没有读取到数据),我们会中止(在没有新数据的情况下,这种情况会在20毫秒后发生)。

    int nodata = 0;
    mstime_t start = mstime();
    while(mstime()-start < 1000 && nodata < 20) {
        if (aeWait(server.aof_pipe_read_data_from_parent, AE_READABLE, 1) <= 0)
        {
            nodata++;
            continue;
        }
        nodata = 0; /* Start counting from zero, we stop on N *contiguous*
                       timeouts. */  只要有一次读到数据,就从零开始计数,
        aofReadDiffFromParent();
    }

    /* Ask the master to stop sending diffs. */
    请主机停止发送差异。
    if (write(server.aof_pipe_write_ack_to_parent,"!",1) != 1) goto werr;
    if (anetNonBlock(NULL,server.aof_pipe_read_ack_from_parent) != ANET_OK)
        goto werr;
    /* We read the ACK from the server using a 10 seconds timeout. Normally
     * it should reply ASAP, but just in case we lose its reply, we are sure
     * the child will eventually get terminated. */
我们使用10秒超时从服务器读取ACK。通常它应该尽快回复,但为了防止我们失去它的回复,我们确信子进程最终会被终止。
    if (syncRead(server.aof_pipe_read_ack_from_parent,&byte,1,5000) != 1 ||  这里设置为5秒,和上面的说明不同,说明没有及时更新
        byte != '!') goto werr;
    serverLog(LL_NOTICE,"Parent agreed to stop sending diffs. Finalizing AOF...");

    /* Read the final diff if any. */ 如果存在就读取最总的差异
    aofReadDiffFromParent();

    /* Write the received diff to the file. */ 将收到的差异写入文件。
    serverLog(LL_NOTICE,  
        "Concatenating %.2f MB of AOF diff received from parent.",
        (double) sdslen(server.aof_child_diff) / (1024*1024));
    if (rioWrite(&aof,server.aof_child_diff,sdslen(server.aof_child_diff)) == 0)
        goto werr;

    /* Make sure data will not remain on the OS's output buffers */
    确保数据不会保留在操作系统的输出缓冲区中
    if (fflush(fp) == EOF) goto werr; 刷到内核缓存
    if (fsync(fileno(fp)) == -1) goto werr; 刷到磁盘
    if (fclose(fp) == EOF) goto werr; 关闭文件

    /* Use RENAME to make sure the DB file is changed atomically only
     * if the generate DB file is ok. */
使用重命名确保仅在生成DB文件正常时才自动更改DB文件(名称)。
    if (rename(tmpfile,filename) == -1) {
        serverLog(LL_WARNING,"Error moving temp append only file on the final destination: %s", strerror(errno));
        unlink(tmpfile); 删除文件
        stopSaving(0);
        return C_ERR;
    }
    serverLog(LL_NOTICE,"SYNC append only file rewrite performed"); 成功执行
    stopSaving(1);
    return C_OK;

werr:
    serverLog(LL_WARNING,"Write error writing append only file on disk: %s", strerror(errno));
    fclose(fp);
    unlink(tmpfile);
    stopSaving(0);
    return C_ERR;
}

/* ----------------------------------------------------------------------------
 * AOF rewrite pipes for IPC
 * -------------------------------------------------------------------------- */
用于进程间通信的AOF重写管道
/* This event handler is called when the AOF rewriting child sends us a
 * single '!' char to signal we should stop sending buffer diffs. The
 * parent sends a '!' as well to acknowledge. */
当AOF重写子进程发送给我们单独一个'!'字符时,表示我们应该停止发送父子之间差异的缓存,
这个事件处理器就被调用。父进程发送一个'!'字符表示确认
void aofChildPipeReadable(aeEventLoop *el, int fd, void *privdata, int mask) {
    char byte;
    UNUSED(el);
    UNUSED(privdata);
    UNUSED(mask);

    if (read(fd,&byte,1) == 1 && byte == '!') {  子进程发送'!'标志
        serverLog(LL_NOTICE,"AOF rewrite child asks to stop sending diffs.");
        server.aof_stop_sending_diff = 1;
        if (write(server.aof_pipe_write_ack_to_child,"!",1) != 1) { 发送给子进车的回复失败
            /* If we can't send the ack, inform the user, but don't try again
             * since in the other side the children will use a timeout if the
             * kernel can't buffer our write, or, the children was
             * terminated. */
             如果我们不能发送ack确认回复,通知用户,但不要重试,因为在另一端,
             如果内核无法缓冲我们的写入,或者子进程被终止,子进程将使用超时。
            serverLog(LL_WARNING,"Can't send ACK to AOF child: %s",
                strerror(errno));
        }
    }
    /* Remove the handler since this can be called only one time during a
     * rewrite. */ 删除该处理句柄,因为在重写过程中只能调用一次。
    aeDeleteFileEvent(server.el,server.aof_pipe_read_ack_from_child,AE_READABLE);
}

/* Create the pipes used for parent - child process IPC during rewrite.
 * We have a data pipe used to send AOF incremental diffs to the child,
 * and two other pipes used by the children to signal it finished with
 * the rewrite so no more data should be written, and another for the
 * parent to acknowledge it understood this new condition. */
在重写期间创建用于父子进程IPC的管道。
我们有一个数据管道用于向子进程发送AOF增量差异,子进程使用另外两个管道,一个用来表示重写已完成,
因此不应再写入更多数据,另一个管道用于向父级确认已理解此新情况。
int aofCreatePipes(void) {
    int fds[6] = {-1, -1, -1, -1, -1, -1};
    int j;

    if (pipe(fds) == -1) goto error; /* parent -> children data. */ 父向子进程发送数据
    if (pipe(fds+2) == -1) goto error; /* children -> parent ack. */子到父的回复
    if (pipe(fds+4) == -1) goto error; /* parent -> children ack. */父到子的回复
    /* Parent -> children data is non blocking. */ 父到子 的数据是非阻塞的
    if (anetNonBlock(NULL,fds[0]) != ANET_OK) goto error;
    if (anetNonBlock(NULL,fds[1]) != ANET_OK) goto error;
    if (aeCreateFileEvent(server.el, fds[2], AE_READABLE, aofChildPipeReadable, NULL) == AE_ERR) goto error;

    server.aof_pipe_write_data_to_child = fds[1];
    server.aof_pipe_read_data_from_parent = fds[0];
    server.aof_pipe_write_ack_to_parent = fds[3];
    server.aof_pipe_read_ack_from_child = fds[2];
    server.aof_pipe_write_ack_to_child = fds[5];
    server.aof_pipe_read_ack_from_parent = fds[4];
    server.aof_stop_sending_diff = 0;
    return C_OK;

error:
    serverLog(LL_WARNING,"Error opening /setting AOF rewrite IPC pipes: %s",
        strerror(errno));
    for (j = 0; j < 6; j++) if(fds[j] != -1) close(fds[j]); 关闭6个文件描述符
    return C_ERR;
}

void aofClosePipes(void) { 关闭所有管道和相关注册数据
    aeDeleteFileEvent(server.el,server.aof_pipe_read_ack_from_child,AE_READABLE);
    aeDeleteFileEvent(server.el,server.aof_pipe_write_data_to_child,AE_WRITABLE);
    close(server.aof_pipe_write_data_to_child);
    close(server.aof_pipe_read_data_from_parent);
    close(server.aof_pipe_write_ack_to_parent);
    close(server.aof_pipe_read_ack_from_child);
    close(server.aof_pipe_write_ack_to_child);
    close(server.aof_pipe_read_ack_from_parent);
}

/* ----------------------------------------------------------------------------
 * AOF background rewrite
 * ------------------------------------------------------------------------- */
AOF后台重写
/* This is how rewriting of the append only file in background works:
这里是描述AOF的重写在后台进程是怎么工作的
 * 1) The user calls BGREWRITEAOF 用户调用BGREWRITEAOF
 * 2) Redis calls this function, that forks(): 用户调用这个forks函数
 *    2a) the child rewrite the append only file in a temp file. 子进程重写aof文件到一个临时文件
 *    2b) the parent accumulates differences in server.aof_rewrite_buf.父子进程的差异数据累积到server.aof_rewrite_buf
 * 3) When the child finished '2a' exists. 当子进程完成2a步骤,然后退出(这里exists应该是笔误,因为exits)
 * 4) The parent will trap the exit code, if it's OK, will append the
 *    data accumulated into server.aof_rewrite_buf into the temp file, and
 *    finally will rename(2) the temp file in the actual file name.
 *    The the new file is reopened as the new append only file. Profit!
父进程将捕获退出代码,如果成功,将累积到server.aof_rewrite_buf的数据追加写入到临时文件,
最终重命名临时文件名成实际文件名,新的文件重新打开作为新的AFO文件。完成!
 */
int rewriteAppendOnlyFileBackground(void) {
    pid_t childpid;

    if (hasActiveChildProcess()) return C_ERR;
    if (aofCreatePipes() != C_OK) return C_ERR;
    openChildInfoPipe();
    if ((childpid = redisFork()) == 0) {  子进程
        char tmpfile[256];

        /* Child */
        redisSetProcTitle("redis-aof-rewrite");
        redisSetCpuAffinity(server.aof_rewrite_cpulist);
        snprintf(tmpfile,256,"temp-rewriteaof-bg-%d.aof", (int) getpid());
        if (rewriteAppendOnlyFile(tmpfile) == C_OK) {
            sendChildCOWInfo(CHILD_INFO_TYPE_AOF, "AOF rewrite");
            exitFromChild(0);
        } else {
            exitFromChild(1);
        }
    } else { 父进程
        /* Parent */
        if (childpid == -1) { 创建子进程失败
            closeChildInfoPipe();
            serverLog(LL_WARNING,
                "Can't rewrite append only file in background: fork: %s",
                strerror(errno));
            aofClosePipes();
            return C_ERR;
        }
        serverLog(LL_NOTICE,
            "Background append only file rewriting started by pid %d",childpid);
        server.aof_rewrite_scheduled = 0;
        server.aof_rewrite_time_start = time(NULL);
        server.aof_child_pid = childpid;
        /* We set appendseldb to -1 in order to force the next call to the
         * feedAppendOnlyFile() to issue a SELECT command, so the differences
         * accumulated by the parent into server.aof_rewrite_buf will start
         * with a SELECT statement and it will be safe to merge. */
我们将appendseldb设置为-1,以便强制下一次调用feedAppendOnlyFile发出SELECT命令,
这样父进程在server.aof_rewrite_buf中累积的差异将以SELECT语句开始,并且可以安全地合并。
        server.aof_selected_db = -1;
        replicationScriptCacheFlush();
        return C_OK;
    }
    return C_OK; /* unreached */
}

void bgrewriteaofCommand(client *c) {
    if (server.aof_child_pid != -1) {
        addReplyError(c,"Background append only file rewriting already in progress");
    } else if (hasActiveChildProcess()) {
        server.aof_rewrite_scheduled = 1;
        addReplyStatus(c,"Background append only file rewriting scheduled");
    } else if (rewriteAppendOnlyFileBackground() == C_OK) {
        addReplyStatus(c,"Background append only file rewriting started");
    } else {
        addReplyError(c,"Can't execute an AOF background rewriting. "
                        "Please check the server logs for more information.");
    }
}

void aofRemoveTempFile(pid_t childpid) {
    char tmpfile[256];

    snprintf(tmpfile,256,"temp-rewriteaof-bg-%d.aof", (int) childpid);
    unlink(tmpfile);

    snprintf(tmpfile,256,"temp-rewriteaof-%d.aof", (int) childpid);
    unlink(tmpfile);
}

/* Update the server.aof_current_size field explicitly using stat(2)
 * to check the size of the file. This is useful after a rewrite or after
 * a restart, normally the size is updated just adding the write length
 * to the current length, that is much faster. */
使用stat显式更新server.aof_current_size字段以检查文件的大小。这在重写后或重新启动后非常有用,
通常只需将写入长度添加到当前长度,即可更新大小,速度要快得多。
void aofUpdateCurrentSize(void) {
    struct redis_stat sb;
    mstime_t latency;

    latencyStartMonitor(latency);
    if (redis_fstat(server.aof_fd,&sb) == -1) { 获取文件信息失败
        serverLog(LL_WARNING,"Unable to obtain the AOF file length. stat: %s",
            strerror(errno));
    } else {
        server.aof_current_size = sb.st_size;
    }
    latencyEndMonitor(latency);
    latencyAddSampleIfNeeded("aof-fstat",latency);
}

/* A background append only file rewriting (BGREWRITEAOF) terminated its work.
 * Handle this. */
后台AOF重写(BGREWRITEAOF)终止了其工作。处理这件事。
void backgroundRewriteDoneHandler(int exitcode, int bysignal) {
    if (!bysignal && exitcode == 0) {
        int newfd, oldfd;
        char tmpfile[256];
        long long now = ustime();
        mstime_t latency;

        serverLog(LL_NOTICE,
            "Background AOF rewrite terminated with success");

        /* Flush the differences accumulated by the parent to the
         * rewritten AOF. */ 刷新父进程累积的差异刷新到重写AOF
        latencyStartMonitor(latency);
        snprintf(tmpfile,256,"temp-rewriteaof-bg-%d.aof",
            (int)server.aof_child_pid);
        newfd = open(tmpfile,O_WRONLY|O_APPEND);以追加方式打开文件
        if (newfd == -1) {
            serverLog(LL_WARNING,
                "Unable to open the temporary AOF produced by the child: %s", strerror(errno));
            goto cleanup;
        }

        if (aofRewriteBufferWrite(newfd) == -1) { 写入出错
            serverLog(LL_WARNING,
                "Error trying to flush the parent diff to the rewritten AOF: %s", strerror(errno));
            close(newfd);
            goto cleanup;
        }
        latencyEndMonitor(latency);
        latencyAddSampleIfNeeded("aof-rewrite-diff-write",latency);

        serverLog(LL_NOTICE,
            "Residual parent diff successfully flushed to the rewritten AOF (%.2f MB)", (double) aofRewriteBufferSize() / (1024*1024));

        /* The only remaining thing to do is to rename the temporary file to
         * the configured file and switch the file descriptor used to do AOF
         * writes. We don't want close(2) or rename(2) calls to block the
         * server on old file deletion.
剩下要做的唯一一件事是将临时文件重命名为已配置的文件,并切换用于进行AOF写入的文件描述符。
我们不希望通过关闭或重命名调用来删除旧文件阻塞服务器。
         *
         * There are two possible scenarios: 有两种可能的情况
         *
         * 1) AOF is DISABLED and this was a one time rewrite. The temporary
         * file will be renamed to the configured file. When this file already
         * exists, it will be unlinked, which may block the server.
AOF被禁用,这是一次重写。临时文件将重命名为配置的文件。当此文件已存在时,它将被删除,这可能会阻塞服务器。
         * 2) AOF is ENABLED and the rewritten AOF will immediately start
         * receiving writes. After the temporary file is renamed to the
         * configured file, the original AOF file descriptor will be closed.
         * Since this will be the last reference to that file, closing it
         * causes the underlying file to be unlinked, which may block the
         * server.
AOF已启用,重写的AOF将立即开始接收写操作。将临时文件重命名为配置文件后,原始AOF文件描述符将关闭。
由于这将是对该文件的最后一次引用,因此关闭它会导致基础文件被删除,这可能会阻塞服务器。
         * To mitigate the blocking effect of the unlink operation (either
         * caused by rename(2) in scenario 1, or by close(2) in scenario 2), we
         * use a background thread to take care of this. First, we
         * make scenario 1 identical to scenario 2 by opening the target file
         * when it exists. The unlink operation after the rename(2) will then
         * be executed upon calling close(2) for its descriptor. Everything to
         * guarantee atomicity for this switch has already happened by then, so
         * we don't care what the outcome or duration of that close operation
         * is, as long as the file descriptor is released again. */
为了减轻删除操作的阻塞效应(场景1中由重命名或场景2中由关闭引起),我们使用后台线程来解决这个问题。
首先,我们通过在目标文件存在时打开它,使场景1与场景2完全相同。重命名后的删除操作将在为其描述符调用close时执行。
到那时,保证这个开关原子性的所有事情都已经发生了,所以我们不关心关闭操作的结果或持续时间,只要再次释放文件描述符
        if (server.aof_fd == -1) {
            /* AOF disabled */ 禁用AOF的情况

            /* Don't care if this fails: oldfd will be -1 and we handle that.
             * One notable case of -1 return is if the old file does
             * not exist. */
不必在意这是否失败:oldfd将是-1,我们会处理它。返回-1的一个值得注意的情况是旧文件不存在。
            oldfd = open(server.aof_filename,O_RDONLY|O_NONBLOCK);
        } else {
            /* AOF enabled */开启AOF的情况
            oldfd = -1; /* We'll set this to the current AOF filedes later. */ 我们将在稍后将其设置为当前AOF文件。
        }

        /* Rename the temporary file. This will not unlink the target file if
         * it exists, because we reference it with "oldfd". */
重命名临时文件。如果目标文件存在,这不会删除,因为我们使用“oldfd”引用它。
        latencyStartMonitor(latency);
        if (rename(tmpfile,server.aof_filename) == -1) {
            serverLog(LL_WARNING,
                "Error trying to rename the temporary AOF file %s into %s: %s",
                tmpfile,
                server.aof_filename,
                strerror(errno));
            close(newfd);
            if (oldfd != -1) close(oldfd);
            goto cleanup;
        }
        latencyEndMonitor(latency);
        latencyAddSampleIfNeeded("aof-rename",latency);

        if (server.aof_fd == -1) {
            /* AOF disabled, we don't need to set the AOF file descriptor
             * to this new file, so we can close it. */
AOF被禁用,我们不需要将AOF文件描述符设置为这个新文件,所以我们可以关闭它
            close(newfd);
        } else {
            /* AOF enabled, replace the old fd with the new one. */ 启用AOF后,用新fd替换旧fd。
            oldfd = server.aof_fd;
            server.aof_fd = newfd;
            if (server.aof_fsync == AOF_FSYNC_ALWAYS)
                redis_fsync(newfd);
            else if (server.aof_fsync == AOF_FSYNC_EVERYSEC)
                aof_background_fsync(newfd);
            server.aof_selected_db = -1; /* Make sure SELECT is re-issued */ 确保重新执行SELECT操作
            aofUpdateCurrentSize();
            server.aof_rewrite_base_size = server.aof_current_size;
            server.aof_fsync_offset = server.aof_current_size;

            /* Clear regular AOF buffer since its contents was just written to
             * the new AOF from the background rewrite buffer. */
清除常规AOF缓冲区,因为其内容刚刚从后台重写缓冲区写入新AOF。
            sdsfree(server.aof_buf);
            server.aof_buf = sdsempty();
        }

        server.aof_lastbgrewrite_status = C_OK;

        serverLog(LL_NOTICE, "Background AOF rewrite finished successfully");
        /* Change state from WAIT_REWRITE to ON if needed */ 如果需要,将状态从WAIT_REWRITE更改为ON
        if (server.aof_state == AOF_WAIT_REWRITE)
            server.aof_state = AOF_ON;

        /* Asynchronously close the overwritten AOF. */ 异步关闭被覆盖的AOF
        if (oldfd != -1) bioCreateBackgroundJob(BIO_CLOSE_FILE,(void*)(long)oldfd,NULL,NULL);

        serverLog(LL_VERBOSE,
            "Background AOF rewrite signal handler took %lldus", ustime()-now);
    } else if (!bysignal && exitcode != 0) {
        server.aof_lastbgrewrite_status = C_ERR;

        serverLog(LL_WARNING,
            "Background AOF rewrite terminated with error");
    } else {
        /* SIGUSR1 is whitelisted, so we have a way to kill a child without
         * tirggering an error condition. */
SIGUSR1被列入了白名单,所以我们有一种方法可以杀死一个子进程,而不会造成错误。
        if (bysignal != SIGUSR1)
            server.aof_lastbgrewrite_status = C_ERR;

        serverLog(LL_WARNING,
            "Background AOF rewrite terminated by signal %d", bysignal);
    }

cleanup:
    aofClosePipes();
    aofRewriteBufferReset();
    aofRemoveTempFile(server.aof_child_pid);
    server.aof_child_pid = -1;
    server.aof_rewrite_time_last = time(NULL)-server.aof_rewrite_time_start;
    server.aof_rewrite_time_start = -1;
    /* Schedule a new rewrite if we are waiting for it to switch the AOF ON. */
如果我们正在等待AOF开启,请安排一次新的重写。
    if (server.aof_state == AOF_WAIT_REWRITE)
        server.aof_rewrite_scheduled = 1;
}

***********************************************************************************************

 

posted on 2021-10-25 19:59  子虚乌有  阅读(180)  评论(0)    收藏  举报