redis6.0.5之networking.c阅读笔记-与客户端通讯

#include "server.h"
#include "atomicvar.h"
#include <sys/socket.h>
#include <sys/uio.h>
#include <math.h>
#include <ctype.h>

static void setProtocolError(const char *errstr, client *c);
int postponeClientRead(client *c);
int ProcessingEventsWhileBlocked = 0; /* See processEventsWhileBlocked(). */

/* Return the size consumed from the allocator, for the specified SDS string,
 * including internal fragmentation. This function is used in order to compute
 * the client output buffer size. */
返回指定SDS字符串(包括内部碎片)从分配器消耗的大小。此函数用于计算客户端输出缓冲区大小
size_t sdsZmallocSize(sds s) {
    void *sh = sdsAllocPtr(s); 返回指向实际字符串开始的地址
    return zmalloc_size(sh); 计算分配的内存大小
}

/* Return the amount of memory used by the sds string at object->ptr
 * for a string object. */
返回字符串对象在object->ptr处sds字符串使用的内存量
size_t getStringObjectSdsUsedMemory(robj *o) {
    serverAssertWithInfo(NULL,o,o->type == OBJ_STRING); 是字符串对象
    switch(o->encoding) {
    case OBJ_ENCODING_RAW: return sdsZmallocSize(o->ptr); 返回字符串本身的大小
    case OBJ_ENCODING_EMBSTR: return zmalloc_size(o)-sizeof(robj); 整个对象的大小 减去 对象结构的大小 剩下就是字符串的大小
    default: return 0; /* Just integer encoding for now. */ 只需要整数编码即可(例如123这种,全部是数字)
    }
}

/* This structure is used in order to represent the output buffer of a client,
 * which is actually a linked list of blocks like that, that is: client->reply. */
这个结构是用来表示客户端的输出缓存,实际上是一个连接的块列表,像这个样子:client->reply
typedef struct clientReplyBlock {
    size_t size, used;
    char buf[];
} clientReplyBlock;

/* Client.reply list dup and free methods. */
客户端回复列表的复制和释放方法
复制方法
void *dupClientReplyValue(void *o) {
    clientReplyBlock *old = o; 指向原来旧的内存
    clientReplyBlock *buf = zmalloc(sizeof(clientReplyBlock) + old->size);分配内存 结构大小+ 内容大小
    memcpy(buf, o, sizeof(clientReplyBlock) + old->size); 将旧的内容拷贝到新的内存中
    return buf;
}
释放方法
void freeClientReplyValue(void *o) {
    zfree(o);  释放了结构指针,那里面的内容需要调用者自行处理
}

两个字符串对象是否相等
int listMatchObjects(void *a, void *b) {
    return equalStringObjects(a,b);
}

/* This function links the client to the global linked list of clients.
 * unlinkClient() does the opposite, among other things. */
这个函数连接客户端到全局的客户端列表中,函数unlinkClient的作用与其相反
void linkClient(client *c) {
    listAddNodeTail(server.clients,c); 将当前客户端添加到全局客户端列表的尾部
    /* Note that we remember the linked list node where the client is stored,
     * this way removing the client in unlinkClient() will not require
     * a linear scan, but just a constant time operation. */
当客户端被保存时,我们记住连接的列表节点,
用这种方法当我们调用函数unlinkClient可以不用线性扫描列表,只需要常量的操作时间
    c->client_list_node = listLast(server.clients); 指向最后一个节点
    uint64_t id = htonu64(c->id); 获取独一无二的客户端ID
    raxInsert(server.clients_index,(unsigned char*)&id,sizeof(id),c,NULL);在基树中插入
}


typedef struct client {
    uint64_t id;            /* Client incremental unique ID. */ 客户端递增的唯一ID
    connection *conn;       连接
    int resp;               /* RESP protocol version. Can be 2 or 3. */ Redis Serialization Protocol 协议版本 2或者3
    redisDb *db;            /* Pointer to currently SELECTed DB. */ 指向当前选择的数据库
    robj *name;             /* As set by CLIENT SETNAME. */ 被客户端命令SETNAME设置的名字
    sds querybuf;           /* Buffer we use to accumulate client queries. */ 用来累积客户端查询的缓存
    size_t qb_pos;          /* The position we have read in querybuf. */ 我们在查询缓冲区已经读取位置
    sds pending_querybuf;   /* If this client is flagged as master, this buffer
                               represents the yet not applied portion of the
                               replication stream that we are receiving from
                               the master. */
                               如果客户机被标记为主机,那么这个缓存代表从主机复制过来尚未实施的部分流数据
    size_t querybuf_peak;   /* Recent (100ms or more) peak of querybuf size. */ 最近查询缓存大小的峰值(100毫秒或更多)
    int argc;               /* Num of arguments of current command. */ 当前命令的参数个数
    robj **argv;            /* Arguments of current command. */ 当前命令的实际参数
    struct redisCommand *cmd, *lastcmd;  /* Last command executed. */ 最后一个执行的命令
    user *user;             /* User associated with this connection. If the
                               user is set to NULL the connection can do
                               anything (admin). */ 连接关联的用户,如果用户被设置为空,那么连接可以干任何时期(因为是管理员)
    int reqtype;            /* Request protocol type: PROTO_REQ_* */请求协议类型PROTO_REQ_*
    int multibulklen;       /* Number of multi bulk arguments left to read. */  剩余要读取的多批量参数数
    long bulklen;           /* Length of bulk argument in multi bulk request. */ 多批量请求中批量参数的长度
    list *reply;            /* List of reply objects to send to the client. */ 要发送到客户端的答复对象列表
    unsigned long long reply_bytes; /* Tot bytes of objects in reply list. */ 总的答复列表中对象的总字节大小
    size_t sentlen;         /* Amount of bytes already sent in the current
                               buffer or object being sent. */ 当前缓冲区已经发出的或者正在发送对象的字节大小
    time_t ctime;           /* Client creation time. */ 客户端创建时间
    time_t lastinteraction; /* Time of the last interaction, used for timeout */ 上次交互时间,用于过期判断
    time_t obuf_soft_limit_reached_time;  输出缓存软性限制大小到达时间
    uint64_t flags;         /* Client flags: CLIENT_* macros. */ 客户端标志 CLIENT_*int authenticated;      /* Needed when the default user requires auth. */ 当默认用户需要认证时就需要
    int replstate;          /* Replication state if this is a slave. */ 赋值状态,如果这是一个从机
    int repl_put_online_on_ack; /* Install slave write handler on first ACK. */ 在第一个确认之后 安装从机写入句柄
    int repldbfd;           /* Replication DB file descriptor. */ 复制数据库文件描述符
    off_t repldboff;        /* Replication DB file offset. */复制数据库文件偏移量
    off_t repldbsize;       /* Replication DB file size. */ 复制数据库文件大小
    sds replpreamble;       /* Replication DB preamble. */ 复制数据库前奏(标识)
    long long read_reploff; /* Read replication offset if this is a master. */ 如果这是主机,则读取复制偏移量。
    long long reploff;      /* Applied replication offset if this is a master. */ 如果这是主机,则应用复制偏移量
    long long repl_ack_off; /* Replication ack offset, if this is a slave. */ 复制确认偏移量,如果这是从机。
    long long repl_ack_time;/* Replication ack time, if this is a slave. */复制确认时间,如果这是从机。
    long long psync_initial_offset; /* FULLRESYNC reply offset other slaves
                                       copying this slave output buffer
                                       should use. */
    char replid[CONFIG_RUN_ID_SIZE+1]; /* Master replication ID (if master). */主机复制ID(如果是主机) #define CONFIG_RUN_ID_SIZE 40
    int slave_listening_port; /* As configured with: SLAVECONF listening-port */ 配置为:SLAVECONF 侦听端口
    
    #define NET_IP_STR_LEN 46 /* INET6_ADDRSTRLEN is 46, but we need to be sure */
    char slave_ip[NET_IP_STR_LEN]; /* Optionally given by REPLCONF ip-address */ 由REPLCONF ip地址给出的选项
    int slave_capa;         /* Slave capabilities: SLAVE_CAPA_* bitwise OR. */  从机功能:从机按位或
    multiState mstate;      /* MULTI/EXEC state */ 事务状态
    int btype;              /* Type of blocking op if CLIENT_BLOCKED. */ 客户端阻塞类型
    blockingState bpop;     /* blocking state */ 阻塞状态
    long long woff;         /* Last write global replication offset. */ 最近一次全局复制的偏移量
    list *watched_keys;     /* Keys WATCHED for MULTI/EXEC CAS */ 通过事务总线监视的键
    dict *pubsub_channels;  /* channels a client is interested in (SUBSCRIBE) */ 客户感兴趣的频道(订阅)
    list *pubsub_patterns;  /* patterns a client is interested in (SUBSCRIBE) */ 客户感兴趣的模式(订阅)
    sds peerid;             /* Cached peer ID. */ 缓存的对方ID
    listNode *client_list_node; /* list node in client list */ 客户端列表的节点
    RedisModuleUserChangedFunc auth_callback; /* Module callback to execute
                                               * when the authenticated user
                                               * changes. */
                                               当认证用户改变时,需要回调执行的模块
    void *auth_callback_privdata; /* Private data that is passed when the auth
                                   * changed callback is executed. Opaque for
                                   * Redis Core. */ 执行认证改变回调时传递的私有数据。对Redis核心隐藏
    void *auth_module;      /* The module that owns the callback, which is used
                             * to disconnect the client if the module is
                             * unloaded for cleanup. Opaque for Redis Core.*/
拥有回调的模块,用于在卸载该模块进行清理时断开客户端的连接。对于Redis Core来说是不透明的。                        

    /* If this client is in tracking mode and this field is non zero,
     * invalidation messages for keys fetched by this client will be send to
     * the specified client ID. */
如果这个客户端处于跟踪模式,那么这个字段就不为0,
通过客户端获取的键的无效消息将被送往指定ID的客户端
    uint64_t client_tracking_redirection;
    rax *client_tracking_prefixes; /* A dictionary of prefixes we are already
                                      subscribed to in BCAST mode, in the
                                      context of client side caching. */
                                      一个有已经订阅的广播模式的前缀字典,在客户单的上下文缓存中
    /* In clientsCronTrackClientsMemUsage() we track the memory usage of
     * each client and add it to the sum of all the clients of a given type,
     * however we need to remember what was the old contribution of each
     * client, and in which categoty the client was, in order to remove it
     * before adding it the new value. */
在clientsCronTrackClientsMemUsage()中,我们跟踪每个客户机的内存使用情况,
并将其添加到给定类型的所有客户机的总和中,但是我们需要记住每个客户机的旧贡献是什么,
以及客户机在哪个类别中,以便在添加新值之前将其删除
    uint64_t client_cron_last_memory_usage;
    int      client_cron_last_memory_type;
    /* Response buffer */ 回复的缓存
    int bufpos; 缓存位置
    char buf[PROTO_REPLY_CHUNK_BYTES];  #define PROTO_REPLY_CHUNK_BYTES (16*1024) /* 16k output buffer */
} client;


通过连接创建客户端
client *createClient(connection *conn) {
    client *c = zmalloc(sizeof(client));

    /* passing NULL as conn it is possible to create a non connected client.
     * This is useful since all the commands needs to be executed
     * in the context of a client. When commands are executed in other
     * contexts (for instance a Lua script) we need a non connected client. */
传入空的连接可能是为了创建一个没有连接的客户端。这有时候会非常有用,因为所有命令都需要在客户机的上下文中执行。
当在其他上下文中执行命令(例如Lua脚本)时,我们需要一个未连接的客户端。
    if (conn) {  连接非空
        connNonBlock(conn); 设置非阻塞
        connEnableTcpNoDelay(conn); 设置不延迟发送
        if (server.tcpkeepalive) 如果设置了使用系统协议判断是否存活
            connKeepAlive(conn,server.tcpkeepalive);设置网络存活判断
        connSetReadHandler(conn, readQueryFromClient); 设置读取回调函数,当客户端准备好久可以读数据
        connSetPrivateData(conn, c); 将客户端数据指针同连接关联在一起
    }

    selectDb(c,0); 默认用DB0
    uint64_t client_id = ++server.next_client_id; 初始化为server.next_client_id当前值,随后server.next_client_id加1
    c->id = client_id;
    c->resp = 2; 协议版本
    c->conn = conn; 连接
    c->name = NULL; 
    c->bufpos = 0;
    c->qb_pos = 0;
    c->querybuf = sdsempty();
    c->pending_querybuf = sdsempty();
    c->querybuf_peak = 0;
    c->reqtype = 0;
    c->argc = 0;
    c->argv = NULL;
    c->cmd = c->lastcmd = NULL;
    c->user = DefaultUser;
    c->multibulklen = 0;
    c->bulklen = -1;
    c->sentlen = 0;
    c->flags = 0;
    c->ctime = c->lastinteraction = server.unixtime;
    /* If the default user does not require authentication, the user is
     * directly authenticated. */
     如果默认用户不需要身份验证,则直接对该用户进行身份验证
    c->authenticated = (c->user->flags & USER_FLAG_NOPASS) &&
                       !(c->user->flags & USER_FLAG_DISABLED);
    c->replstate = REPL_STATE_NONE;
    c->repl_put_online_on_ack = 0;
    c->reploff = 0;
    c->read_reploff = 0;
    c->repl_ack_off = 0;
    c->repl_ack_time = 0;
    c->slave_listening_port = 0;
    c->slave_ip[0] = '\0';
    c->slave_capa = SLAVE_CAPA_NONE;
    c->reply = listCreate();
    c->reply_bytes = 0;
    c->obuf_soft_limit_reached_time = 0;
    listSetFreeMethod(c->reply,freeClientReplyValue);
    listSetDupMethod(c->reply,dupClientReplyValue);
    c->btype = BLOCKED_NONE;
    c->bpop.timeout = 0;
    c->bpop.keys = dictCreate(&objectKeyHeapPointerValueDictType,NULL);
    c->bpop.target = NULL;
    c->bpop.xread_group = NULL;
    c->bpop.xread_consumer = NULL;
    c->bpop.xread_group_noack = 0;
    c->bpop.numreplicas = 0;
    c->bpop.reploffset = 0;
    c->woff = 0;
    c->watched_keys = listCreate();
    c->pubsub_channels = dictCreate(&objectKeyPointerValueDictType,NULL);
    c->pubsub_patterns = listCreate();
    c->peerid = NULL;
    c->client_list_node = NULL;
    c->client_tracking_redirection = 0;
    c->client_tracking_prefixes = NULL;
    c->client_cron_last_memory_usage = 0;
    c->client_cron_last_memory_type = CLIENT_TYPE_NORMAL;
    c->auth_callback = NULL;
    c->auth_callback_privdata = NULL;
    c->auth_module = NULL;
    listSetFreeMethod(c->pubsub_patterns,decrRefCountVoid);
    listSetMatchMethod(c->pubsub_patterns,listMatchObjects);
    if (conn) linkClient(c);
    initClientMultiState(c); 初始化事务
    return c;
}

/* This funciton puts the client in the queue of clients that should write
 * their output buffers to the socket. Note that it does not *yet* install
 * the write handler, to start clients are put in a queue of clients that need
 * to write, so we try to do that before returning in the event loop (see the
 * handleClientsWithPendingWrites() function).
 * If we fail and there is more data to write, compared to what the socket
 * buffers can hold, then we'll really install the handler. */
这个函数将客户端段放入到客户端队列,该队列会将他们的输出缓冲区写入到套接字。
注意到它还没有安装写入句柄,启动客户端放入一个需要些的客户端队列。
因此我们在返回事件循环之前尝试这样做(具体看函数handleClientsWithPendingWrites)
如果我们失败并且这里有更多的数据要写,比较套接字缓存是否可能容纳,然后我们才实际安装句柄。
void clientInstallWriteHandler(client *c) {
    /* Schedule the client to write the output buffers to the socket only
     * if not already done and, for slaves, if the slave can actually receive
     * writes at this stage. */
只有当输出缓冲区还没有写入套接字 或者 对于从机,如果从机在这个阶段能够实际接收写入数据,
那么调度客户端将输出缓存写入到套接字
    if (!(c->flags & CLIENT_PENDING_WRITE) &&  !客户端有内容要发送,但是写入句柄还没有安装
        (c->replstate == REPL_STATE_NONE || 复制状态不是活跃的
         (c->replstate == SLAVE_STATE_ONLINE && !c->repl_put_online_on_ack)))  
         RDB文件已传输,仅发送更新 && !在第一个确认之后 安装从机写入句柄
    {
        /* Here instead of installing the write handler, we just flag the
         * client and put it into a list of clients that have something
         * to write to the socket. This way before re-entering the event
         * loop, we can try to directly write to the client sockets avoiding
         * a system call. We'll only really install the write handler if
         * we'll not be able to write the whole reply at once. */
         这里替代安装写入句柄,我们只是标记客户端并且将其放入到一个将要写入一些数据到套接字的客户端列表。
这个方法在重入事件循环之前,我们可以尝试直接写入到客户端套接字避免系统调用。
只有在我们不能将整个回复一次性写入的时候,才实际的安装写入句柄。
        c->flags |= CLIENT_PENDING_WRITE;
        listAddNodeHead(server.clients_pending_write,c); 添加到写入 或者 安装句柄队列
    }
}

/* This function is called every time we are going to transmit new data
 * to the client. The behavior is the following:
我们每次准备传输新数据到客户端的时候将会调用这个函数。这个函数表现如下:
 * If the client should receive new data (normal clients will) the function
 * returns C_OK, and make sure to install the write handler in our event
 * loop so that when the socket is writable new data gets written.
如果客户端接收新数据(正常客户端会这样做),那么函数返回成功。
在我们的时间循环中确保安装写入句柄,这样套接字可写入时就可以写入新数据
 * If the client should not receive new data, because it is a fake client
 * (used to load AOF in memory), a master or because the setup of the write
 * handler failed, the function returns C_ERR.
如果客户端不能接收新数据,因为这是一个假客户端(用来加载AOF文件到内存),
主机 或者 因为设置写入句柄失败 ,函数将返回错误
 * The function may return C_OK without actually installing the write
 * event handler in the following cases:
没有成功安装写入事件句柄函数也会返回成功,有如下几种情况:
 * 1) The event handler should already be installed since the output buffer
 *    already contains something.
事件句柄已经被安装了,因为输出缓存已经包含了一些东西。
 * 2) The client is a slave but not yet online, so we want to just accumulate
 *    writes in the buffer but not actually sending them yet.
客户端是个从机,但是不在线,因此我们只是累积写入到缓存但是不实际发送它们。
 * Typically gets called every time a reply is built, before adding more
 * data to the clients output buffers. If the function returns C_ERR no
 * data should be appended to the output buffers. */
通常在每次生成回复时都会被调用,在加入更多的数据到客户端输出缓冲区时,如果这个函数返回错误,
没有数据会被添加到输出缓存中。
int prepareClientToWrite(client *c) {
    /* If it's the Lua client we always return ok without installing any
     * handler since there is no socket at all. */
     如果这个是一个Lua脚本客户端,我们返回成功不需要安装任何句柄,因为没有套接字
    if (c->flags & (CLIENT_LUA|CLIENT_MODULE)) return C_OK; 是LUA或者模块  直接返回成功

    /* CLIENT REPLY OFF / SKIP handling: don't send replies. */ 客户端回复关闭或者跳过处理: 不发送回复
    if (c->flags & (CLIENT_REPLY_OFF|CLIENT_REPLY_SKIP)) return C_ERR;

    /* Masters don't receive replies, unless CLIENT_MASTER_FORCE_REPLY flag
     * is set. */ 主机不接收回复,除非标志CLIENT_MASTER_FORCE_REPLY被设置
    if ((c->flags & CLIENT_MASTER) &&
        !(c->flags & CLIENT_MASTER_FORCE_REPLY)) return C_ERR;

    if (!c->conn) return C_ERR; /* Fake client for AOF loading. */ 为了加载AOF的假客户端

    /* Schedule the client to write the output buffers to the socket, unless
     * it should already be setup to do so (it has already pending data). */
调度客户端将输出缓存写入到套接字中,除非已经被设置成这样做(已经存在挂起的数据)
    if (!clientHasPendingReplies(c)) clientInstallWriteHandler(c); 没有挂起的客户端,挂起

    /* Authorize the caller to queue in the output buffer of this client. */
    授权调用方在此客户端的输出缓冲区中排队
    return C_OK;
}

/* -----------------------------------------------------------------------------
 * Low level functions to add more data to output buffers.
低层次的函数 添加更多数据到输出缓存
 * -------------------------------------------------------------------------- */
int _addReplyToBuffer(client *c, const char *s, size_t len) {
    size_t available = sizeof(c->buf)-c->bufpos; 回复缓存总大小 - 已用大小

    if (c->flags & CLIENT_CLOSE_AFTER_REPLY) return C_OK; 在回复之后关闭客户端

    /* If there already are entries in the reply list, we cannot
     * add anything more to the static buffer. */
     如果已有实体在回复列表中,我们不能添加任何更多的实体到静态缓存
    if (listLength(c->reply) > 0) return C_ERR;

    /* Check that the buffer has enough space available for this string. */
    检查剩下的空间是否足够当前的写入字符串长度
    if (len > available) return C_ERR;

    memcpy(c->buf+c->bufpos,s,len); 缓存够的情况下,拷贝内容到缓存数组
    c->bufpos+=len; 已有大小加上当前字符串长度
    return C_OK;
}

添加原型数据到列表
void _addReplyProtoToList(client *c, const char *s, size_t len) {
    if (c->flags & CLIENT_CLOSE_AFTER_REPLY) return; 客户端已关闭 直接返回

    listNode *ln = listLast(c->reply); 最后一个回复节点
    clientReplyBlock *tail = ln? listNodeValue(ln): NULL; 非空获取内容

    /* Note that 'tail' may be NULL even if we have a tail node, becuase when
     * addReplyDeferredLen() is used, it sets a dummy node to NULL just
     * fo fill it later, when the size of the bulk length is set. */
注意变量尾节点可能为空,即使我们拥有一个尾节点,因为当使用函数addReplyDeferredLen时,
它会设置一个假的空节点,用来后面填充,当主体长度的大小被设置时。
    /* Append to tail string when possible. */ 尽可能添加到尾部字符串
    if (tail) { 尾部节点非空
        /* Copy the part we can fit into the tail, and leave the rest for a
         * new node */拷贝我们能填充到尾节点的部分,剩下的填充新节点
        size_t avail = tail->size - tail->used;
        size_t copy = avail >= len? len: avail;
        memcpy(tail->buf + tail->used, s, copy);
        tail->used += copy;
        s += copy;
        len -= copy;
    }
    if (len) { 长度还不为0,需要创建新节点
        /* Create a new node, make sure it is allocated to at
         * least PROTO_REPLY_CHUNK_BYTES */
        创建一个新节点,确保它至少分配给PROTO_REPLY_CHUNK_字节 
        #define PROTO_REPLY_CHUNK_BYTES (16*1024) /* 16k output buffer */
        size_t size = len < PROTO_REPLY_CHUNK_BYTES? PROTO_REPLY_CHUNK_BYTES: len;
        tail = zmalloc(size + sizeof(clientReplyBlock)); 本身大小 + 结构体大小
        /* take over the allocation's internal fragmentation */ 接管分配的内部碎片
        tail->size = zmalloc_usable(tail) - sizeof(clientReplyBlock); 总大小
        tail->used = len; 使用长度
        memcpy(tail->buf, s, len); 继续拷贝剩下的字节
        listAddNodeTail(c->reply, tail); 添加新的尾部节点
        c->reply_bytes += tail->size; 增加回复的字节数
    }
    asyncCloseClientOnOutputBufferLimitReached(c); 因为到达输出缓冲区限制条件关闭客户端
}

/* -----------------------------------------------------------------------------
 * Higher level functions to queue data on the client output buffer.
 * The following functions are the ones that commands implementations will call.
高层次函数  对客户端输出缓存的数据排队
 * -------------------------------------------------------------------------- */
/* Add the object 'obj' string representation to the client output buffer. */
添加对象obj字符串表示到客户端输出缓存
void addReply(client *c, robj *obj) {
    if (prepareClientToWrite(c) != C_OK) return; 准备客户端写入

    if (sdsEncodedObject(obj)) { 是否字符串编码
        if (_addReplyToBuffer(c,obj->ptr,sdslen(obj->ptr)) != C_OK) 添加字符串到缓存不成功
            _addReplyProtoToList(c,obj->ptr,sdslen(obj->ptr)); 添加原型数据到列表
    } else if (obj->encoding == OBJ_ENCODING_INT) { 整型编码
        /* For integer encoded strings we just convert it into a string
         * using our optimized function, and attach the resulting string
         * to the output buffer. */
         对整型编码的字符串,我们值需要使用我们的优化函数转化为字符串,添加结果字符串到输出缓存。
        char buf[32];
        size_t len = ll2string(buf,sizeof(buf),(long)obj->ptr); 转字符串
        if (_addReplyToBuffer(c,buf,len) != C_OK) 添加字符串到缓存失败
            _addReplyProtoToList(c,buf,len); 添加到输出列表
    } else {
        serverPanic("Wrong obj->encoding in addReply()"); 编码错误
    }
}

/* Add the SDS 's' string to the client output buffer, as a side effect
 * the SDS string is freed. */
添加sds字符串s到客户端输出缓存,作为一个伴随效应,SDS字符串被释放
void addReplySds(client *c, sds s) {
    if (prepareClientToWrite(c) != C_OK) { 准备客户端写入
        /* The caller expects the sds to be free'd. */ 调用者期望sds字符串被释放
        sdsfree(s);
        return;
    }
    if (_addReplyToBuffer(c,s,sdslen(s)) != C_OK) 不能成功写入缓存
        _addReplyProtoToList(c,s,sdslen(s));写入列表
    sdsfree(s); 释放字符串
}

/* This low level function just adds whatever protocol you send it to the
 * client buffer, trying the static buffer initially, and using the string
 * of objects if not possible.
这个低层次函数 只是将你发送的任何协议添加到客户端缓存区,
尝试用静态缓存初始化,如果可能使用对象的字符串
 * It is efficient because does not create an SDS object nor an Redis object
 * if not needed. The object will only be created by calling
 * _addReplyProtoToList() if we fail to extend the existing tail object
 * in the list of objects. */
这个是非常有效的,因为如果不需要,不用创建sds对象或者redis对象,
如果我们扩张尾对象列表存在的尾部对象失败,那么对象只能通过调用函数_addReplyProtoToList创建
void addReplyProto(client *c, const char *s, size_t len) {
    if (prepareClientToWrite(c) != C_OK) return;
    if (_addReplyToBuffer(c,s,len) != C_OK)
        _addReplyProtoToList(c,s,len);
}

/* Low level function called by the addReplyError...() functions.
 * It emits the protocol for a Redis error, in the form:
低层次的函数 通过函数addReplyError调用
它发出redis错误的协议,格式如下:
 * -ERRORCODE Error Message<CR><LF> -错误码 错误信息 回车换行
 *
 * If the error code is already passed in the string 's', the error
 * code provided is used, otherwise the string "-ERR " for the generic
 * error code is automatically added. */
如果错误码已经通过字符串s传递,那么就使用提供的错误码。否则字符串"-ERR "
作为通用错误码被自动添加。
void addReplyErrorLength(client *c, const char *s, size_t len) {
    /* If the string already starts with "-..." then the error code
     * is provided by the caller. Otherwise we use "-ERR". */
     如果字符串以"-..."开始,那么调用者就提供了错误码,否则使用通用错误码"-ERR"
    if (!len || s[0] != '-') addReplyProto(c,"-ERR ",5); 长度为空或者不以-开始,返回通用错误码
    addReplyProto(c,s,len);
    addReplyProto(c,"\r\n",2);

    /* Sometimes it could be normal that a slave replies to a master with
     * an error and this function gets called. Actually the error will never
     * be sent because addReply*() against master clients has no effect...
     * A notable example is:
有时,从机向主机回复错误并调用此函数可能是正常的。
实际上这个错误永远不会被发送,因为函数addReply*对主客户端没有任何影响。。。
一个显著的例子是
     *    EVAL 'redis.call("incr",KEYS[1]); redis.call("nonexisting")' 1 x
     *
     * Where the master must propagate the first change even if the second
     * will produce an error. However it is useful to log such events since
     * they are rare and may hint at errors in a script or a bug in Redis. */
其中,即使第二个更改将产生错误,主机也必须传播第一个更改。
不过,记录此类事件很有用,因为它们很少见,可能会提示脚本中出现错误或Redis中出现错误。
    int ctype = getClientType(c); 获取客户端类型
    if (ctype == CLIENT_TYPE_MASTER || ctype == CLIENT_TYPE_SLAVE || c->id == CLIENT_ID_AOF) {
    主机  或者  从机  或者  AOF端
        char *to, *from;

        if (c->id == CLIENT_ID_AOF) {
            to = "AOF-loading-client";
            from = "server";
        } else if (ctype == CLIENT_TYPE_MASTER) {
            to = "master";
            from = "replica";
        } else {
            to = "replica";
            from = "master";
        }

        char *cmdname = c->lastcmd ? c->lastcmd->name : "<unknown>"; 最后一个命令的名字
        serverLog(LL_WARNING,"== CRITICAL == This %s is sending an error "
                             "to its %s: '%s' after processing the command "
                             "'%s'", from, to, s, cmdname);
        if (ctype == CLIENT_TYPE_MASTER && server.repl_backlog &&
            server.repl_backlog_histlen > 0)
        {
            showLatestBacklog(); 查找最近的积压日志
        }
        server.stat_unexpected_error_replies++; 意外的回复错误状态+1
    }
}

void addReplyError(client *c, const char *err) {
    addReplyErrorLength(c,err,strlen(err)); 添加错误回复信息
}

添加错误回复格式
void addReplyErrorFormat(client *c, const char *fmt, ...) {
    size_t l, j;
    va_list ap;
    va_start(ap,fmt);
    sds s = sdscatvprintf(sdsempty(),fmt,ap);
    va_end(ap);
    /* Make sure there are no newlines in the string, otherwise invalid protocol
     * is emitted. */ 确认字符串中没有新的行,否则发出无效协议提示
    l = sdslen(s);
    for (j = 0; j < l; j++) {
        if (s[j] == '\r' || s[j] == '\n') s[j] = ' '; 将回车换行替换成空
    }
    addReplyErrorLength(c,s,sdslen(s)); 回复错误信息
    sdsfree(s); 释放字符串
}

void addReplyStatusLength(client *c, const char *s, size_t len) {
    addReplyProto(c,"+",1); 以+开始
    addReplyProto(c,s,len);
    addReplyProto(c,"\r\n",2); 以\r\n结束
}

状态回复
void addReplyStatus(client *c, const char *status) {
    addReplyStatusLength(c,status,strlen(status));
}

回复状态格式
void addReplyStatusFormat(client *c, const char *fmt, ...) {
    va_list ap;
    va_start(ap,fmt);
    sds s = sdscatvprintf(sdsempty(),fmt,ap); 接受所有可变参数
    va_end(ap);
    addReplyStatusLength(c,s,sdslen(s)); 回复
    sdsfree(s);
}

/* Sometimes we are forced to create a new reply node, and we can't append to
 * the previous one, when that happens, we wanna try to trim the unused space
 * at the end of the last reply node which we won't use anymore. */
有时我们被强制创建一个新的回复节点,不能添加到前一个节点上,当这种情况发生时,
我们尝试去除最后一个回复接地那不使用的空间,这些空间我们再也不会使用了。
(节约内存的操作)
void trimReplyUnusedTailSpace(client *c) {
    listNode *ln = listLast(c->reply); 获取最后一个节点
    clientReplyBlock *tail = ln? listNodeValue(ln): NULL; 里面内容是否为空

    /* Note that 'tail' may be NULL even if we have a tail node, becuase when
     * addReplyDeferredLen() is used */
     注意到变量tail可能为空,即使列表有一个尾节点,因为函数addReplyDeferredLen的调用
    if (!tail) return;

    /* We only try to trim the space is relatively high (more than a 1/4 of the
     * allocation), otherwise there's a high chance realloc will NOP.
     * Also, to avoid large memmove which happens as part of realloc, we only do
     * that if the used part is small.  */
我们只去除那些相对剩余空间多的节点(超过分配内存的1/4),否则函数realloc可能不会重新分配内存。
为了避免内存重分配后的大规模的移动,我们值对使用部分少的做处理。
    if (tail->size - tail->used > tail->size / 4 &&  剩余内存超过分配内存的1/4
        tail->used < PROTO_REPLY_CHUNK_BYTES)  使用内存不超过16k
    {
        size_t old_size = tail->size; 原来总的空间
        tail = zrealloc(tail, tail->used + sizeof(clientReplyBlock)); 根据实际使用重新分配
        /* take over the allocation's internal fragmentation (at least for
         * memory usage tracking) */ 接管分配的内部内存碎片(至少会用于内存使用跟踪)
        tail->size = zmalloc_usable(tail) - sizeof(clientReplyBlock); 实际分配的空间大小
        c->reply_bytes = c->reply_bytes + tail->size - old_size;  新的回复总大小
        listNodeValue(ln) = tail; 指向想的尾节点
    }
}

/* Adds an empty object to the reply list that will contain the multi bulk
 * length, which is not known when this function is called. */
增加一个空的对象到回复列表,包含函数调用时还不知道的多主体的长度
void *addReplyDeferredLen(client *c) {
    /* Note that we install the write event here even if the object is not
     * ready to be sent, since we are sure that before returning to the
     * event loop setDeferredAggregateLen() will be called. */
注意在这里即使对象还没有准备好发送,我们也安装写事件,
因为我们知道在返回到事件循环之前,函数setDeferredAggregateLen会被调用
    if (prepareClientToWrite(c) != C_OK) return NULL; 是否准备好写入
    trimReplyUnusedTailSpace(c);  节约内存空间
    listAddNodeTail(c->reply,NULL); /* NULL is our placeholder. */ null是我们的占位符,就是上面说的空尾节点
    return listLast(c->reply);
}

/* Populate the length object and try gluing it to the next chunk. */
填充长度对象并且尝试将它粘贴到下一个块
void setDeferredAggregateLen(client *c, void *node, long length, char prefix) {
    listNode *ln = (listNode*)node;
    clientReplyBlock *next;
    char lenstr[128];
    size_t lenstr_len = sprintf(lenstr, "%c%ld\r\n", prefix, length);

    /* Abort when *node is NULL: when the client should not accept writes
     * we return NULL in addReplyDeferredLen() */
当节点为空时终止:当客户端不接受写时我们在函数addReplyDeferredLen中返回null
    if (node == NULL) return;
    serverAssert(!listNodeValue(ln)); 确认节点是否为空

    /* Normally we fill this dummy NULL node, added by addReplyDeferredLen(),
     * with a new buffer structure containing the protocol needed to specify
     * the length of the array following. However sometimes when there is
     * little memory to move, we may instead remove this NULL node, and prefix
     * our protocol in the node immediately after to it, in order to save a
     * write(2) syscall later. Conditions needed to do it:
通常,我们用一个新的缓冲区结构填充这个由addReplyFerredLen()添加的伪空节点,
该缓冲区结构包含指定以下数组长度所需的协议.然而,有时当内存不足时,我们可能会删除这个空节点,
并在紧接着它的节点中为我们的协议添加前缀,以便稍后保存一个写的系统调用。这样做所需的条件:
     * - The next node is non-NULL, 下一个节点是非空
     * - It has enough room already allocated  拥有足够的已分配空间
     * - And not too large (avoid large memmove) */ 使用内存不大(避免大内存)
    if (ln->next != NULL && (next = listNodeValue(ln->next)) &&    下一个节点非空  并且 节点内容非空
        next->size - next->used >= lenstr_len &&  有足够的空间
        next->used < PROTO_REPLY_CHUNK_BYTES * 4) { 使用的内存不超过64K
        memmove(next->buf + lenstr_len, next->buf, next->used); 为长度腾出空间
        memcpy(next->buf, lenstr, lenstr_len); 充填长度内容
        next->used += lenstr_len; 使用空间加上长度内容使用的字节
        listDelNode(c->reply,ln); 删除不再使用的节点
    } else {
        /* Create a new node */ 创建一个新节点
        clientReplyBlock *buf = zmalloc(lenstr_len + sizeof(clientReplyBlock));
        /* Take over the allocation's internal fragmentation */接管已分配的内部内存碎片
        buf->size = zmalloc_usable(buf) - sizeof(clientReplyBlock); 总的空间大小
        buf->used = lenstr_len; 使用的空间大小
        memcpy(buf->buf, lenstr, lenstr_len); 拷贝长度内容
        listNodeValue(ln) = buf; 节点内容赋新内容
        c->reply_bytes += buf->size; 总回复大小加上新分配的使用空间
    }
    asyncCloseClientOnOutputBufferLimitReached(c); 因为到达输出缓冲区限制条件关闭客户端
}

设置回复数组长度内容
void setDeferredArrayLen(client *c, void *node, long length) {
    setDeferredAggregateLen(c,node,length,'*');
}
根据协议版本设置不同的回复前缀
void setDeferredMapLen(client *c, void *node, long length) {
    int prefix = c->resp == 2 ? '*' : '%';
    if (c->resp == 2) length *= 2;
    setDeferredAggregateLen(c,node,length,prefix);
}

根据协议版本设置不同的回复前缀
void setDeferredSetLen(client *c, void *node, long length) {
    int prefix = c->resp == 2 ? '*' : '~';
    setDeferredAggregateLen(c,node,length,prefix);
}
根据协议版本设置不同的回复前缀
void setDeferredAttributeLen(client *c, void *node, long length) {
    int prefix = c->resp == 2 ? '*' : '|';
    if (c->resp == 2) length *= 2;
    setDeferredAggregateLen(c,node,length,prefix);
}
根据协议版本设置不同的回复前缀
void setDeferredPushLen(client *c, void *node, long length) {
    int prefix = c->resp == 2 ? '*' : '>';
    setDeferredAggregateLen(c,node,length,prefix);
}

/* Add a double as a bulk reply */
添加一个double作为批量回复
void addReplyDouble(client *c, double d) {
    if (isinf(d)) { 无限的情况
        /* Libc in odd systems (Hi Solaris!) will format infinite in a
         * different way, so better to handle it in an explicit way. */
         在奇系统(例如Solaris)的libc中格式化无限是用不同的方式,因此最好是用明确的方式处理
        if (c->resp == 2) { 协议2的版本
            addReplyBulkCString(c, d > 0 ? "inf" : "-inf");
        } else {
            addReplyProto(c, d > 0 ? ",inf\r\n" : ",-inf\r\n",
                              d > 0 ? 6 : 7);
        }
    } else { 一般情况
        char dbuf[MAX_LONG_DOUBLE_CHARS+3],
             sbuf[MAX_LONG_DOUBLE_CHARS+32];
        int dlen, slen;
        if (c->resp == 2) {
            dlen = snprintf(dbuf,sizeof(dbuf),"%.17g",d); 保留小数点后17位的值
            slen = snprintf(sbuf,sizeof(sbuf),"$%d\r\n%s\r\n",dlen,dbuf);  拼接回复的字符串
            addReplyProto(c,sbuf,slen);
        } else {
            dlen = snprintf(dbuf,sizeof(dbuf),",%.17g\r\n",d); 拼接回复的字符串
            addReplyProto(c,dbuf,dlen);
        }
    }
}

/* Add a long double as a bulk reply, but uses a human readable formatting
 * of the double instead of exposing the crude behavior of doubles to the
 * dear user. */
添加一个长浮点数作为批量回复,使用人类可读的浮点数格式,代替原始的浮点数的指数表示
void addReplyHumanLongDouble(client *c, long double d) {
    if (c->resp == 2) { 协议版本2
        robj *o = createStringObjectFromLongDouble(d,1); 对人友好的浮点数表示方式
        addReplyBulk(c,o);
        decrRefCount(o);
    } else {
        char buf[MAX_LONG_DOUBLE_CHARS];
        int len = ld2string(buf,sizeof(buf),d,LD_STR_HUMAN);
        addReplyProto(c,",",1);
        addReplyProto(c,buf,len);
        addReplyProto(c,"\r\n",2);
    }
}

/* Add a long long as integer reply or bulk len / multi bulk count.
 * Basically this is used to output <prefix><long long><crlf>. */
添加长整形整数回复或者批量长度/多批量长度的计数
基本的输出格式为: <prefix><long long><crlf>  前缀 长整型 回车换行
void addReplyLongLongWithPrefix(client *c, long long ll, char prefix) {
    char buf[128];
    int len;

    /* Things like $3\r\n or *2\r\n are emitted very often by the protocol
     * so we have a few shared objects to use if the integer is small
     * like it is most of the times. */
东西像$3\r\n or *2\r\n基本都是由协议发出的,因此如果是小整数,
我们通常可以用一个共享对象来处理。
#define OBJ_SHARED_BULKHDR_LEN 32
    if (prefix == '*' && ll < OBJ_SHARED_BULKHDR_LEN && ll >= 0) { 小整数
        addReply(c,shared.mbulkhdr[ll]); 使用共享小整数对象
        return;
    } else if (prefix == '$' && ll < OBJ_SHARED_BULKHDR_LEN && ll >= 0) {
        addReply(c,shared.bulkhdr[ll]);
        return;
    }
其它情况
    buf[0] = prefix;
    len = ll2string(buf+1,sizeof(buf)-1,ll);
    buf[len+1] = '\r';
    buf[len+2] = '\n';
    addReplyProto(c,buf,len+3); 
}

添加长整型到回复
void addReplyLongLong(client *c, long long ll) {
    if (ll == 0) 
        addReply(c,shared.czero);
    else if (ll == 1)
        addReply(c,shared.cone);
    else
        addReplyLongLongWithPrefix(c,ll,':');
}
添加复合体的长度
void addReplyAggregateLen(client *c, long length, int prefix) {
    if (prefix == '*' && length < OBJ_SHARED_BULKHDR_LEN)  小整数
        addReply(c,shared.mbulkhdr[length]);
    else
        addReplyLongLongWithPrefix(c,length,prefix);  其它情况(那这个里面不是会有重复判断!)
}

void addReplyArrayLen(client *c, long length) {
    addReplyAggregateLen(c,length,'*');
}

void addReplyMapLen(client *c, long length) {
    int prefix = c->resp == 2 ? '*' : '%';
    if (c->resp == 2) length *= 2;
    addReplyAggregateLen(c,length,prefix);
}

void addReplySetLen(client *c, long length) {
    int prefix = c->resp == 2 ? '*' : '~';
    addReplyAggregateLen(c,length,prefix);
}

void addReplyAttributeLen(client *c, long length) {
    int prefix = c->resp == 2 ? '*' : '|';
    if (c->resp == 2) length *= 2;
    addReplyAggregateLen(c,length,prefix);
}

void addReplyPushLen(client *c, long length) {
    int prefix = c->resp == 2 ? '*' : '>';
    addReplyAggregateLen(c,length,prefix);
}
添加空回复
void addReplyNull(client *c) {
    if (c->resp == 2) {
        addReplyProto(c,"$-1\r\n",5);
    } else {
        addReplyProto(c,"_\r\n",3);
    }
}
添加布尔回复
void addReplyBool(client *c, int b) {
    if (c->resp == 2) {
        addReply(c, b ? shared.cone : shared.czero);
    } else {
        addReplyProto(c, b ? "#t\r\n" : "#f\r\n",4);
    }
}

/* A null array is a concept that no longer exists in RESP3. However
 * RESP2 had it, so API-wise we have this call, that will emit the correct
 * RESP2 protocol, however for RESP3 the reply will always be just the
 * Null type "_\r\n". */
空数组是协议2中的概念,协议3中是不存在的。因此就API而言,我们有这个调用,
它将发出正确的RESP2协议,但是对于RESP3,应答将始终是空类型“_\r\n”。
void addReplyNullArray(client *c) {
    if (c->resp == 2) {
        addReplyProto(c,"*-1\r\n",5);
    } else {
        addReplyProto(c,"_\r\n",3);
    }
}

/* Create the length prefix of a bulk reply, example: $2234 */
创建批量回复的长度前缀,例如:$2234
void addReplyBulkLen(client *c, robj *obj) {
    size_t len = stringObjectLen(obj); 对象长度

    if (len < OBJ_SHARED_BULKHDR_LEN) 小整数共享长度对象
        addReply(c,shared.bulkhdr[len]);
    else
        addReplyLongLongWithPrefix(c,len,'$'); 
}

/* Add a Redis Object as a bulk reply */
添加一个redis对象作为批量回复
void addReplyBulk(client *c, robj *obj) {
    addReplyBulkLen(c,obj);
    addReply(c,obj);
    addReply(c,shared.crlf);
}

/* Add a C buffer as bulk reply */
添加一个C风格的缓存作为批量回复
void addReplyBulkCBuffer(client *c, const void *p, size_t len) {
    addReplyLongLongWithPrefix(c,len,'$');
    addReplyProto(c,p,len);
    addReply(c,shared.crlf);
}

/* Add sds to reply (takes ownership of sds and frees it) */
添加SDS字符串到回复(获得sds的所有权并释放sds)
void addReplyBulkSds(client *c, sds s)  {
    addReplyLongLongWithPrefix(c,sdslen(s),'$');
    addReplySds(c,s);
    addReply(c,shared.crlf);
}

/* Add a C null term string as bulk reply */
添加一个以C空字符结尾的字符串作为批量回复
void addReplyBulkCString(client *c, const char *s) {
    if (s == NULL) {
        addReplyNull(c);
    } else {
        addReplyBulkCBuffer(c,s,strlen(s));
    }
}

/* Add a long long as a bulk reply */
添加一个长整型作为回复
void addReplyBulkLongLong(client *c, long long ll) {
    char buf[64];
    int len;

    len = ll2string(buf,64,ll); 转字符串
    addReplyBulkCBuffer(c,buf,len);
}

/* Reply with a verbatim type having the specified extension.
使用具有指定扩展名的字面回复
 * The 'ext' is the "extension" of the file, actually just a three
 * character type that describes the format of the verbatim string.
 * For instance "txt" means it should be interpreted as a text only
 * file by the receiver, "md " as markdown, and so forth. Only the
 * three first characters of the extension are used, and if the
 * provided one is shorter than that, the remaining is filled with
 * spaces. */
变量ext代表了文件的扩展属性,实际上只是一种描述逐字记录字符串格式的三字符类型。
举例来说,“txt”表示接收者应将其解释为纯文本文件,“md”表示标记,等等。
仅使用扩展名的前三个字符,如果提供的字符短于此,则剩余字符将填充空格。
void addReplyVerbatim(client *c, const char *s, size_t len, const char *ext) {
    if (c->resp == 2) { 协议版本2
        addReplyBulkCBuffer(c,s,len);
    } else {
        char buf[32];
        size_t preflen = snprintf(buf,sizeof(buf),"=%zu\r\nxxx:",len+4);
        char *p = buf+preflen-4;
        for (int i = 0; i < 3; i++) {  重复写入xxx所在位置
            if (*ext == '\0') {
                p[i] = ' ';
            } else {
                p[i] = *ext++;
            }
        }
        addReplyProto(c,buf,preflen);
        addReplyProto(c,s,len);
        addReplyProto(c,"\r\n",2);
    }
}

/* Add an array of C strings as status replies with a heading.
 * This function is typically invoked by from commands that support
 * subcommands in response to the 'help' subcommand. The help array
 * is terminated by NULL sentinel. */
添加一个C字符串数组作为带有标题的状态回复。此函数通常由支持子命令的命令调用,
以响应帮助子命令。帮助数组由空哨兵终止
void addReplyHelp(client *c, const char **help) {
    sds cmd = sdsnew((char*) c->argv[0]->ptr);
    void *blenp = addReplyDeferredLen(c); 用空节点占位
    int blen = 0;

    sdstoupper(cmd); 转化为大写
    addReplyStatusFormat(c,
        "%s <subcommand> arg arg ... arg. Subcommands are:",cmd); 回复状态格式
    sdsfree(cmd); 释放命令字符串

    while (help[blen]) addReplyStatus(c,help[blen++]); 输出帮助内容

    blen++;  /* Account for the header line(s). */ 对头行业进行计数
    setDeferredArrayLen(c,blenp,blen); 设置总长度
}

/* Add a suggestive error reply. 添加提示性错误
 * This function is typically invoked by from commands that support
 * subcommands in response to an unknown subcommand or argument error. */
此函数通常由支持子命令的命令调用,以响应未知的子命令或参数错误
void addReplySubcommandSyntaxError(client *c) {
    sds cmd = sdsnew((char*) c->argv[0]->ptr);
    sdstoupper(cmd);
    addReplyErrorFormat(c,
        "Unknown subcommand or wrong number of arguments for '%s'. Try %s HELP.",
        (char*)c->argv[1]->ptr,cmd);
    sdsfree(cmd);
}

/* Append 'src' client output buffers into 'dst' client output buffers. 
 * This function clears the output buffers of 'src' */
拼接源客户端的输出缓存到目的客户端的输出缓存,这个函数会清空源客户端的输出缓存
void AddReplyFromClient(client *dst, client *src) {
    if (prepareClientToWrite(dst) != C_OK)  目的客户端不能写
        return;
    addReplyProto(dst,src->buf, src->bufpos); 将源客户端输出缓存添加到目的客户端输出缓存
    if (listLength(src->reply)) 源客户端存在回复列表
        listJoin(dst->reply,src->reply); 拼接到目的客户端列表后面
    dst->reply_bytes += src->reply_bytes; 总的回复字节数
    src->reply_bytes = 0; 源的回复字节数清零
    src->bufpos = 0;
}

/* Copy 'src' client output buffers into 'dst' client output buffers.
 * The function takes care of freeing the old output buffers of the
 * destination client. */
将“src”客户端输出缓冲区复制到“dst”客户端输出缓冲区。
该函数负责释放目标客户机的旧输出缓冲区。
void copyClientOutputBuffer(client *dst, client *src) {
    listRelease(dst->reply); 清空目的客户端回复列表
    dst->sentlen = 0; 发送字节长度为0
    dst->reply = listDup(src->reply); 赋值源客户端列表
    memcpy(dst->buf,src->buf,src->bufpos); 拷贝具体的输出缓冲区内容
    dst->bufpos = src->bufpos; 设置输出缓冲区偏移量
    dst->reply_bytes = src->reply_bytes; 设置回复字节数
}

/* Return true if the specified client has pending reply buffers to write to
 * the socket. */
如果指定的客户端有挂起的应答缓冲区要写入套接字,则返回true
int clientHasPendingReplies(client *c) {
    return c->bufpos || listLength(c->reply);
}

void clientAcceptHandler(connection *conn) {
    client *c = connGetPrivateData(conn); 获取连接关联的客户端数据

    if (connGetState(conn) != CONN_STATE_CONNECTED) { 连接的状态是非连接的
        serverLog(LL_WARNING,
                "Error accepting a client connection: %s",
                connGetLastError(conn));
        freeClientAsync(c); 释放客户端
        return;
    }

    /* If the server is running in protected mode (the default) and there
     * is no password set, nor a specific interface is bound, we don't accept
     * requests from non loopback interfaces. Instead we try to explain the
     * user what to do to fix it if needed. */
如果服务器以保护模式(默认模式)运行,并且没有设置密码,也没有绑定特定接口,
则我们不接受来自非环回接口的请求。相反,我们试图向用户解释,如果需要,如何修复它
    if (server.protected_mode && 保护模式
        server.bindaddr_count == 0 && 没有绑定地址
        DefaultUser->flags & USER_FLAG_NOPASS &&  没有设置密码
        !(c->flags & CLIENT_UNIX_SOCKET))  非unix网络套接字
    {
        char cip[NET_IP_STR_LEN+1] = { 0 };
        connPeerToString(conn, cip, sizeof(cip)-1, NULL); 获取ip地址

        if (strcmp(cip,"127.0.0.1") && strcmp(cip,"::1")) {  如果是环回地址
            char *err =
                "-DENIED Redis is running in protected mode because protected "
                "mode is enabled, no bind address was specified, no "
                "authentication password is requested to clients. In this mode "
                "connections are only accepted from the loopback interface. "
                "If you want to connect from external computers to Redis you "
                "may adopt one of the following solutions: "
                "1) Just disable protected mode sending the command "
                "'CONFIG SET protected-mode no' from the loopback interface "
                "by connecting to Redis from the same host the server is "
                "running, however MAKE SURE Redis is not publicly accessible "
                "from internet if you do so. Use CONFIG REWRITE to make this "
                "change permanent. "
                "2) Alternatively you can just disable the protected mode by "
                "editing the Redis configuration file, and setting the protected "
                "mode option to 'no', and then restarting the server. "
                "3) If you started the server manually just for testing, restart "
                "it with the '--protected-mode no' option. "
                "4) Setup a bind address or an authentication password. "
                "NOTE: You only need to do one of the above things in order for "
                "the server to start accepting connections from the outside.\r\n";
DENIED Redis正在保护模式下运行,因为已启用保护模式,未指定绑定地址,未向客户端请求身份验证密码。
在此模式下,只能从环回接口接受连接。如果要从外部计算机连接到Redis,可以采用以下解决方案之一:
1) 只需禁用保护模式发送命令,通过从服务器运行的同一主机连接到Redis,从环回接口中选择“配置设置受保护模式否”,
但是,如果您这样做,请确保不能从internet公开访问Redis。使用“配置重写”将此更改永久化。"
2)或者,您也可以通过以下方式禁用保护模式:编辑Redis配置文件,并将受保护模式选项设置为“否”,然后重新启动服务器。
3)如果只是为了测试而手动启动服务器,请使用“--protected mode no”选项重新启动它。“
4) 设置绑定地址或身份验证密码。
注意:您只需要执行上述操作之一,服务器就可以开始接受来自外部的连接。
            if (connWrite(c->conn,err,strlen(err)) == -1) { 想连接写入数据
                /* Nothing to do, Just to avoid the warning... */ 不做任何事情,避免警告
            }
            server.stat_rejected_conn++; 拒绝状态+1
            freeClientAsync(c); 异步释放客户端
            return;
        }
    }

    server.stat_numconnections++; 正常连接接收+1
    moduleFireServerEvent(REDISMODULE_EVENT_CLIENT_CHANGE,
                          REDISMODULE_SUBEVENT_CLIENT_CHANGE_CONNECTED,
                          c); 激发模块能够处理的事件
}

#define MAX_ACCEPTS_PER_CALL 1000  每个调用最大接收次数
static void acceptCommonHandler(connection *conn, int flags, char *ip) {
    client *c;
    UNUSED(ip);

    /* Admission control will happen before a client is created and connAccept()
     * called, because we don't want to even start transport-level negotiation
     * if rejected.
     */
许可控制将在创建客户端并调用connAccept之前发生,因为如果被拒绝,我们甚至不想启动传输级协商
    if (listLength(server.clients) >= server.maxclients) {  超过最大客户端数目
        char *err = "-ERR max number of clients reached\r\n";

        /* That's a best effort error message, don't check write errors.
         * Note that for TLS connections, no handshake was done yet so nothing is written
         * and the connection will just drop.
         */
这是一条尽力而为的错误消息,不要检查写入错误。请注意,对于TLS连接,尚未进行握手,因此未写入任何内容,连接将中断。
        if (connWrite(conn,err,strlen(err)) == -1) {
            /* Nothing to do, Just to avoid the warning... */
        }
        server.stat_rejected_conn++; 拒绝计数+1
        connClose(conn); 关闭连接
        return;
    }

    /* Create connection and client */ 创建连接和客户端
    if ((c = createClient(conn)) == NULL) { 创建客户端失败
        char conninfo[100];
        serverLog(LL_WARNING,
            "Error registering fd event for the new client: %s (conn: %s)",
            connGetLastError(conn),
            connGetInfo(conn, conninfo, sizeof(conninfo)));
        connClose(conn); /* May be already closed, just ignore errors */ 可能已经关闭,防止意外
        return;
    }

    /* Last chance to keep flags */  保留标志的最后机会
    c->flags |= flags;

    /* Initiate accept. 初始化接收
     *
     * Note that connAccept() is free to do two things here:
     * 1. Call clientAcceptHandler() immediately;
     * 2. Schedule a future call to clientAcceptHandler().
注意,函数connAccept在这里可以自由地做两件事:
1立即调用clientAcceptHandler
2计划将来对clientAcceptHandler的调用(设置回调函数)
     * Because of that, we must do nothing else afterwards.
     因此,我们以后不再做其他事情
     */
    if (connAccept(conn, clientAcceptHandler) == C_ERR) { 连接失败
        char conninfo[100];
        if (connGetState(conn) == CONN_STATE_ERROR) 连接状态错误
            serverLog(LL_WARNING,
                    "Error accepting a client connection: %s (conn: %s)",
                    connGetLastError(conn), connGetInfo(conn, conninfo, sizeof(conninfo)));
        freeClient(connGetPrivateData(conn)); 释放连接中的客户端
        return;
    }
}

tcp连接处理
void acceptTcpHandler(aeEventLoop *el, int fd, void *privdata, int mask) {
    int cport, cfd, max = MAX_ACCEPTS_PER_CALL;
    char cip[NET_IP_STR_LEN];
    UNUSED(el);
    UNUSED(mask);
    UNUSED(privdata);

    while(max--) {
        cfd = anetTcpAccept(server.neterr, fd, cip, sizeof(cip), &cport); 连接服务器
        if (cfd == ANET_ERR) { 连接失败
            if (errno != EWOULDBLOCK)
                serverLog(LL_WARNING,
                    "Accepting client connection: %s", server.neterr);
            return;
        }
        serverLog(LL_VERBOSE,"Accepted %s:%d", cip, cport); 记录连接的地址信息
        acceptCommonHandler(connCreateAcceptedSocket(cfd),0,cip); 创建连接客户端
    }
}
TLS连接处理
void acceptTLSHandler(aeEventLoop *el, int fd, void *privdata, int mask) {
    int cport, cfd, max = MAX_ACCEPTS_PER_CALL;
    char cip[NET_IP_STR_LEN];
    UNUSED(el);
    UNUSED(mask);
    UNUSED(privdata);

    while(max--) {
        cfd = anetTcpAccept(server.neterr, fd, cip, sizeof(cip), &cport); 连接服务器
        if (cfd == ANET_ERR) {
            if (errno != EWOULDBLOCK)
                serverLog(LL_WARNING,
                    "Accepting client connection: %s", server.neterr);
            return;
        }
        serverLog(LL_VERBOSE,"Accepted %s:%d", cip, cport);
        acceptCommonHandler(connCreateAcceptedTLS(cfd, server.tls_auth_clients),0,cip);
    }
}
unix连接处理
void acceptUnixHandler(aeEventLoop *el, int fd, void *privdata, int mask) {
    int cfd, max = MAX_ACCEPTS_PER_CALL;
    UNUSED(el);
    UNUSED(mask);
    UNUSED(privdata);

    while(max--) {
        cfd = anetUnixAccept(server.neterr, fd);
        if (cfd == ANET_ERR) {
            if (errno != EWOULDBLOCK)
                serverLog(LL_WARNING,
                    "Accepting client connection: %s", server.neterr);
            return;
        }
        serverLog(LL_VERBOSE,"Accepted connection to %s", server.unixsocket);
        acceptCommonHandler(connCreateAcceptedSocket(cfd),CLIENT_UNIX_SOCKET,NULL);
    }
}
释放客户端参数
static void freeClientArgv(client *c) {
    int j;
    for (j = 0; j < c->argc; j++)
        decrRefCount(c->argv[j]);
    c->argc = 0;
    c->cmd = NULL;
}

/* Close all the slaves connections. This is useful in chained replication
 * when we resync with our own master and want to force all our slaves to
 * resync with us as well. */
关闭所有从属连接。在链式复制中,
当我们与自己的主服务器重新同步并且希望强制所有从属服务器也与我们重新同步时,这非常有用
void disconnectSlaves(void) {
    listIter li;
    listNode *ln;
    listRewind(server.slaves,&li); 初始化迭代器
    while((ln = listNext(&li))) { 遍历从机列表
        freeClient((client*)ln->value); 释放客户端
    }
}

/* Remove the specified client from global lists where the client could
 * be referenced, not including the Pub/Sub channels.
 * This is used by freeClient() and replicationCacheMaster(). */
从可引用客户端(不包括发布/订阅频道)的全局列表中删除指定的客户端。
该函数由freeClient和replicationCacheMaster使用
void unlinkClient(client *c) {
    listNode *ln;

    /* If this is marked as current client unset it. */
    如果将其标记为当前客户端,请将其取消设置
    if (server.current_client == c) server.current_client = NULL;

    /* Certain operations must be done only if the client has an active connection.
     * If the client was already unlinked or if it's a "fake client" the
     * conn is already set to NULL. */
仅当客户端具有活动连接时,才能执行某些操作。如果客户机已解除链接或是“假客户机”,则连接已设置为空
    if (c->conn) { 连接非空
        /* Remove from the list of active clients. */ 从活跃的客户端列表中移除
        if (c->client_list_node) { 
            uint64_t id = htonu64(c->id); 获取客户端ID
            raxRemove(server.clients_index,(unsigned char*)&id,sizeof(id),NULL); 从基树中删除该客户端索引
            listDelNode(server.clients,c->client_list_node); 从客户端列表中删除该客户端节点
            c->client_list_node = NULL; 置成空
        }

        /* Check if this is a replica waiting for diskless replication (rdb pipe),
         * in which case it needs to be cleaned from that list */
检查这是否是等待无盘复制(rdb管道)的复制副本,在这种情况下,需要从该列表中清除它
        if (c->flags & CLIENT_SLAVE && 从机
            c->replstate == SLAVE_STATE_WAIT_BGSAVE_END && 等待rdb文件复制结束
            server.rdb_pipe_conns) 当前使用的连接是rdb管道
        {
            int i;
            for (i=0; i < server.rdb_pipe_numconns; i++) {
                if (server.rdb_pipe_conns[i] == c->conn) {
                    rdbPipeWriteHandlerConnRemoved(c->conn); 从管道列表中移除
                    server.rdb_pipe_conns[i] = NULL;置为空
                    break;
                }
            }
        }
        connClose(c->conn); 关闭连接
        c->conn = NULL;
    }

    /* Remove from the list of pending writes if needed. */ 如果需要,请从挂起的写入列表中删除
    if (c->flags & CLIENT_PENDING_WRITE) { 挂起写状态
        ln = listSearchKey(server.clients_pending_write,c); 从挂起写列表查找
        serverAssert(ln != NULL); 找到节点不为空
        listDelNode(server.clients_pending_write,ln);删除
        c->flags &= ~CLIENT_PENDING_WRITE; 将挂起写标志去除
    }

    /* Remove from the list of pending reads if needed. */如果需要,从挂起读取列表中删除
    if (c->flags & CLIENT_PENDING_READ) {挂起读状态
        ln = listSearchKey(server.clients_pending_read,c);
        serverAssert(ln != NULL);
        listDelNode(server.clients_pending_read,ln);
        c->flags &= ~CLIENT_PENDING_READ;
    }

    /* When client was just unblocked because of a blocking operation,
     * remove it from the list of unblocked clients. */
当客户端由于阻塞操作而刚刚被解除阻塞时,请将其从解除阻塞的客户端列表中删除
    if (c->flags & CLIENT_UNBLOCKED) { 非阻塞
        ln = listSearchKey(server.unblocked_clients,c); 从解除阻塞列表中查找
        serverAssert(ln != NULL);
        listDelNode(server.unblocked_clients,ln);
        c->flags &= ~CLIENT_UNBLOCKED;
    }

    /* Clear the tracking status. */ 清除跟踪状态
    if (c->flags & CLIENT_TRACKING) disableTracking(c);
}
释放客户端
void freeClient(client *c) {
    listNode *ln;

    /* If a client is protected, yet we need to free it right now, make sure
     * to at least use asynchronous freeing. */
如果客户机受到保护,但我们现在需要释放它,请确保使用异步释放
    if (c->flags & CLIENT_PROTECTED) { 受保护状态
        freeClientAsync(c); 异步释放客户端
        return;
    }

    /* For connected clients, call the disconnection event of modules hooks. */
    对于已连接的客户端,调用模块钩子的方式断开事件
    if (c->conn) {
        moduleFireServerEvent(REDISMODULE_EVENT_CLIENT_CHANGE,
                              REDISMODULE_SUBEVENT_CLIENT_CHANGE_DISCONNECTED,
                              c);
    }

    /* Notify module system that this client auth status changed. */
    通知模块系统此客户端身份验证状态已更改
    moduleNotifyUserChanged(c);

    /* If this client was scheduled for async freeing we need to remove it
     * from the queue. Note that we need to do this here, because later
     * we may call replicationCacheMaster() and the client should already
     * be removed from the list of clients to free. */
如果此客户端计划异步释放,则需要将其从队列中删除。请注意,我们需要在这里执行此操作,
因为稍后我们可能会调用replicationCacheMaster,并且客户端应该已经从要释放的客户端列表中删除
    if (c->flags & CLIENT_CLOSE_ASAP) {  如果需要尽快关闭客户端
        ln = listSearchKey(server.clients_to_close,c); 从列表中查找
        serverAssert(ln != NULL); 不为空的情况下
        listDelNode(server.clients_to_close,ln); 删除这个客户端节点
    }

    /* If it is our master that's beging disconnected we should make sure
     * to cache the state to try a partial resynchronization later.
如果是我们的主机开始断开连接,我们应该确保缓存该状态,以便稍后尝试部分重新同步
     * Note that before doing this we make sure that the client is not in
     * some unexpected state, by checking its flags. */
请注意,在执行此操作之前,我们通过检查其标志来确保客户端未处于某种意外状态
    if (server.master && c->flags & CLIENT_MASTER) {  
        serverLog(LL_WARNING,"Connection with master lost.");
        if (!(c->flags & (CLIENT_PROTOCOL_ERROR|CLIENT_BLOCKED))) {
            c->flags &= ~(CLIENT_CLOSE_ASAP|CLIENT_CLOSE_AFTER_REPLY);
            replicationCacheMaster(c); 保存主机客户端
            return;
        }
    }

    /* Log link disconnection with slave */ 日志链路断开与从机的连接
    if (getClientType(c) == CLIENT_TYPE_SLAVE) {
        serverLog(LL_WARNING,"Connection with replica %s lost.",
            replicationGetSlaveName(c)); 返回ip:端口记录在日志中
    }

    /* Free the query buffer */ 释放查询缓存
    sdsfree(c->querybuf);
    sdsfree(c->pending_querybuf); 
    c->querybuf = NULL;

    /* Deallocate structures used to block on blocking ops. */
    取消分配用于在阻塞操作时阻塞的结构。
    if (c->flags & CLIENT_BLOCKED) unblockClient(c);
    dictRelease(c->bpop.keys);释放阻塞的键

    /* UNWATCH all the keys */ 取消观察所有的键
    unwatchAllKeys(c);
    listRelease(c->watched_keys); 释放观察键列表

    /* Unsubscribe from all the pubsub channels */ 取消订阅所有频道
    pubsubUnsubscribeAllChannels(c,0); 渠道
    pubsubUnsubscribeAllPatterns(c,0); 模式
    dictRelease(c->pubsub_channels); 
    listRelease(c->pubsub_patterns);

    /* Free data structures. */ 释放客户短的回复数据结构
    listRelease(c->reply);
    freeClientArgv(c); 释放参数

    /* Unlink the client: this will close the socket, remove the I/O
     * handlers, and remove references of the client from different
     * places where active clients may be referenced. */
取消客户机链接:这将关闭套接字,删除I/O处理程序,并从可能引用活动客户机的不同位置删除客户机的引用。
    unlinkClient(c);

    /* Master/slave cleanup Case 1:  主/从清理情况1
     * we lost the connection with a slave. */  我们和一个从机失去了联系
    if (c->flags & CLIENT_SLAVE) {
        if (c->replstate == SLAVE_STATE_SEND_BULK) {
            if (c->repldbfd != -1) close(c->repldbfd);
            if (c->replpreamble) sdsfree(c->replpreamble);
        }
        list *l = (c->flags & CLIENT_MONITOR) ? server.monitors : server.slaves; 确认时从机还是监视器
        ln = listSearchKey(l,c);
        serverAssert(ln != NULL);
        listDelNode(l,ln);
        /* We need to remember the time when we started to have zero
         * attached slaves, as after some time we'll free the replication
         * backlog. */
我们需要记住我们开始拥有零连接从机的时间,因为一段时间后我们将释放复制积压。
        if (getClientType(c) == CLIENT_TYPE_SLAVE && listLength(server.slaves) == 0)
            server.repl_no_slaves_since = server.unixtime;
        refreshGoodSlavesCount(); 刷新可用从机数量(这里不可用从机为 积压太多命令没有复制)
        /* Fire the replica change modules event. */ 触发复制更改模块事件
        if (c->replstate == SLAVE_STATE_ONLINE) 只是处于发送更新命令状态
            moduleFireServerEvent(REDISMODULE_EVENT_REPLICA_CHANGE,
                                  REDISMODULE_SUBEVENT_REPLICA_CHANGE_OFFLINE,
                                  NULL); 触发复制下线事件
    }

    /* Master/slave cleanup Case 2: 主/从清理情况2:
     * we lost the connection with the master. */  我们和一个主机失去了联系
    if (c->flags & CLIENT_MASTER) replicationHandleMasterDisconnection(); 处理主机丢失连接的情况

   /* Remove the contribution that this client gave to our
     * incrementally computed memory usage. */
删除此客户端对增量计算内存使用量的贡献
    server.stat_clients_type_memory[c->client_cron_last_memory_type] -=
        c->client_cron_last_memory_usage;

    /* Release other dynamically allocated client structure fields,
     * and finally release the client structure itself. */
释放其他动态分配的客户机结构字段,最后释放客户机结构本身
    if (c->name) decrRefCount(c->name);
    zfree(c->argv);
    freeClientMultiState(c);
    sdsfree(c->peerid);
    zfree(c);
}

/* Schedule a client to free it at a safe time in the serverCron() function.
 * This function is useful when we need to terminate a client but we are in
 * a context where calling freeClient() is not possible, because the client
 * should be valid for the continuation of the flow of the program. */
在serverCron函数中安排客户端在安全时间释放它。当我们需要终止一个客户机,
但在无法调用freeClient的上下文中,因为该客户机对于接下来的部分程序还有用。所以需要延时释放
void freeClientAsync(client *c) {
    /* We need to handle concurrent access to the server.clients_to_close list
     * only in the freeClientAsync() function, since it's the only function that
     * may access the list while Redis uses I/O threads. All the other accesses
     * are in the context of the main thread while the other threads are
     * idle. */
我们只需要在freeclientsync函数中处理对server.clients_to_close列表的并发访问,
因为它是Redis使用I/O线程时唯一可以访问列表的函数。所有其他访问都在主线程的上下文中,而其他线程处于空闲状态。
    if (c->flags & CLIENT_CLOSE_ASAP || c->flags & CLIENT_LUA) return; 已经处于尽快退出 或者 脚本客户端 直接返回
    c->flags |= CLIENT_CLOSE_ASAP; 设置为尽快退出
    if (server.io_threads_num == 1) { 
        /* no need to bother with locking if there's just one thread (the main thread) */
        如果只有一个线程(主线程),则无需进行锁定
        listAddNodeTail(server.clients_to_close,c); 添加到等待释放列表
        return;
    }
    多线程的情况下需要锁定操作
    static pthread_mutex_t async_free_queue_mutex = PTHREAD_MUTEX_INITIALIZER;
    pthread_mutex_lock(&async_free_queue_mutex);
    listAddNodeTail(server.clients_to_close,c);
    pthread_mutex_unlock(&async_free_queue_mutex);
}

/* Free the clietns marked as CLOSE_ASAP, return the number of clients
 * freed. */ 尽快释放标记为CLOSE_的客户端,返回释放的客户端数。
int freeClientsInAsyncFreeQueue(void) {
    int freed = 0;
    listIter li;
    listNode *ln;

    listRewind(server.clients_to_close,&li); 初始化列表迭代器
    while ((ln = listNext(&li)) != NULL) { 遍历待释放客户端列表
        client *c = listNodeValue(ln); 获取客户端

        if (c->flags & CLIENT_PROTECTED) continue; 保护模式,下一个

        c->flags &= ~CLIENT_CLOSE_ASAP; 取消标志
        freeClient(c); 释放客户单
        listDelNode(server.clients_to_close,ln); 从列表中删除
        freed++; 删除计数+1
    }
    return freed;
}

/* Return a client by ID, or NULL if the client ID is not in the set
 * of registered clients. Note that "fake clients", created with -1 as FD,
 * are not registered clients. */
通过ID返回客户端,如果客户端ID不在注册的客户端集合中,返回空。
注意 伪客户端用-1作为FD的,不是注册客户端.
client *lookupClientByID(uint64_t id) {
    id = htonu64(id); 网络序到无符号64位整数
    client *c = raxFind(server.clients_index,(unsigned char*)&id,sizeof(id)); 从基树中找到客户端
    return (c == raxNotFound) ? NULL : c;
}

/* Write data in output buffers to client. Return C_OK if the client
 * is still valid after the call, C_ERR if it was freed because of some
 * error.  If handler_installed is set, it will attempt to clear the
 * write event.
将输出缓冲区的数据写入到客户端。如果客户端在调用后仍然有效,则返回C_OK,
如果因为错误客户端被释放了返回C_ERR。如果处理句柄被设置,函数将尝试清理写事件
 * This function is called by threads, but always with handler_installed
 * set to 0. So when handler_installed is set to 0 the function must be
 * thread safe. */
这个函数通过线程调用,总是设置处理句柄为0。因此,当处理句柄设置为0时,函数必须是线程安全的
int writeToClient(client *c, int handler_installed) {
    ssize_t nwritten = 0, totwritten = 0;
    size_t objlen;
    clientReplyBlock *o;

    while(clientHasPendingReplies(c)) { 客户端有数据需要写
        if (c->bufpos > 0) { 缓冲区要写的字节数大于0
            nwritten = connWrite(c->conn,c->buf+c->sentlen,c->bufpos-c->sentlen); 往连接写数据
            if (nwritten <= 0) break; 写入失败,停止
            c->sentlen += nwritten; 成功的数据
            totwritten += nwritten; 

            /* If the buffer was sent, set bufpos to zero to continue with
             * the remainder of the reply. */
如果已发送缓冲区,请将bufpos设置为零,以继续执行应答的其余部分
            if ((int)c->sentlen == c->bufpos) {
                c->bufpos = 0;
                c->sentlen = 0;
            }
        } else { 否则写回复客户短列表的数据
            o = listNodeValue(listFirst(c->reply));
            objlen = o->used;

            if (objlen == 0) { 如果使用的大小为0,释放这个回复
                c->reply_bytes -= o->size;
                listDelNode(c->reply,listFirst(c->reply));
                continue;
            }
            不为0的情况,写数据
            nwritten = connWrite(c->conn, o->buf + c->sentlen, objlen - c->sentlen);
            if (nwritten <= 0) break;
            c->sentlen += nwritten;
            totwritten += nwritten;

            /* If we fully sent the object on head go to the next one */
            如果我们把这个对象写完,就转到下一个
            if (c->sentlen == objlen) {
                c->reply_bytes -= o->size;
                listDelNode(c->reply,listFirst(c->reply));
                c->sentlen = 0;
                /* If there are no longer objects in the list, we expect
                 * the count of reply bytes to be exactly zero. */
如果列表中不再有对象,我们希望回复字节的计数为零
                if (listLength(c->reply) == 0)
                    serverAssert(c->reply_bytes == 0);
            }
        }
        /* Note that we avoid to send more than NET_MAX_WRITES_PER_EVENT
         * bytes, in a single threaded server it's a good idea to serve
         * other clients as well, even if a very large request comes from
         * super fast link that is always able to accept data (in real world
         * scenario think about 'KEYS *' against the loopback interface).
请注意,我们避免在单线程服务器中发送超过NET_MAX_WRITES_PER_EVENT字节的数据,
因为也要为其他客户端提供服务(数据太大其它客户端就会阻塞),
即使非常大的请求来自始终能够接受数据的超高速链接(在现实世界场景中,考虑环回接口的命令“keys *”)
         * However if we are over the maxmemory limit we ignore that and
         * just deliver as much data as it is possible to deliver.
但是,如果我们超过了maxmemory限制,我们就会忽略这一点,只传递尽可能多的数据
         * Moreover, we also send as much as possible if the client is
         * a slave or a monitor (otherwise, on high-speed traffic, the
         * replication/output buffer will grow indefinitely) */
此外,如果客户机是从机或监视器,我们也会尽可能多地发送数据(否则,在高速流量下,复制/输出缓冲区将无限增长)
        if (totwritten > NET_MAX_WRITES_PER_EVENT &&  超过设置的最大发送数据量
            (server.maxmemory == 0 || 内存不限制
             zmalloc_used_memory() < server.maxmemory) && 内存没有超过最大设置
            !(c->flags & CLIENT_SLAVE)) break; 非从机或者监视器, 停止发送数据
    }
    server.stat_net_output_bytes += totwritten; 本次总的发送数据
    if (nwritten == -1) { 写入数据为-1
        if (connGetState(c->conn) == CONN_STATE_CONNECTED) { 确认连接状态是否正常
            nwritten = 0;
        } else {
            serverLog(LL_VERBOSE,
                "Error writing to client: %s", connGetLastError(c->conn));
            freeClientAsync(c);
            return C_ERR;
        }
    }
    if (totwritten > 0) { 总的写入数据大于0
        /* For clients representing masters we don't count sending data
         * as an interaction, since we always send REPLCONF ACK commands
         * that take some time to just fill the socket output buffer.
         * We just rely on data / pings received for timeout detection. */
对于代表主机的客户机,我们不将发送数据视为交互,因为我们总是发送REPLCONF ACK命令,
这些命令需要一些时间才能填满套接字输出缓冲区。我们仅仅依靠接收到的数据/pings进行超时检测
        if (!(c->flags & CLIENT_MASTER)) c->lastinteraction = server.unixtime; 设置最新交互时间
    }
    if (!clientHasPendingReplies(c)) { 客户端没有写入数据
        c->sentlen = 0;
        /* Note that writeToClient() is called in a threaded way, but
         * adDeleteFileEvent() is not thread safe: however writeToClient()
         * is always called with handler_installed set to 0 from threads
         * so we are fine. */
请注意,writeToClient是以线程方式调用的,但adDeleteFileEvent不是线程安全的:
然而我们总是在从线程将已安装的处理程序设置为0的情况下调用writeToClient,因此没有问题
        if (handler_installed) connSetWriteHandler(c->conn, NULL);

        /* Close connection after entire reply has been sent. */ 发送完整回复后关闭连接
        if (c->flags & CLIENT_CLOSE_AFTER_REPLY) {
            freeClientAsync(c);
            return C_ERR;
        }
    }
    return C_OK;
}

/* Write event handler. Just send data to the client. */
编写事件处理程序。只需将数据发送到客户端
void sendReplyToClient(connection *conn) {
    client *c = connGetPrivateData(conn); 获取客户端
    writeToClient(c,1); 向其写入数据
}

/* This function is called just before entering the event loop, in the hope
 * we can just write the replies to the client output buffer without any
 * need to use a syscall in order to install the writable event handler,
 * get it called, and so forth. */
该函数在进入事件循环之前被调用,希望我们可以将响应写入客户机输出缓冲区,
而无需使用系统调用来安装可写事件处理程序、调用它等等。
int handleClientsWithPendingWrites(void) {
    listIter li;
    listNode *ln;
    int processed = listLength(server.clients_pending_write); 等待写的客户端列表

    listRewind(server.clients_pending_write,&li); 初始化迭代器
    while((ln = listNext(&li))) {
        client *c = listNodeValue(ln);
        c->flags &= ~CLIENT_PENDING_WRITE; 去除标志
        listDelNode(server.clients_pending_write,ln);

        /* If a client is protected, don't do anything,
         * that may trigger write error or recreate handler. */
如果客户机受到保护,请不要执行任何可能触发写入错误或重新创建处理程序的操作
        if (c->flags & CLIENT_PROTECTED) continue;

        /* Try to write buffers to the client socket. */
        尝试将缓冲区内容写入客户端套接字
        if (writeToClient(c,0) == C_ERR) continue;

        /* If after the synchronous writes above we still have data to
         * output to the client, we need to install the writable handler. */
如果在上面的同步写入之后,我们仍然有数据要输出到客户机,那么我们需要安装可写处理程序
        if (clientHasPendingReplies(c)) { 还存在挂起写数据
            int ae_barrier = 0;
            /* For the fsync=always policy, we want that a given FD is never
             * served for reading and writing in the same event loop iteration,
             * so that in the middle of receiving the query, and serving it
             * to the client, we'll call beforeSleep() that will do the
             * actual fsync of AOF to disk. the write barrier ensures that. */
对于FSYNC等于总是的策略,我们希望在同一事件循环迭代中从不为给定的FD服务同时读和写,
因此在接收查询的中间,并将其提供给客户端时,我们将调用BeSeEclipse来执行磁盘的AOF的实际FSyc。
写屏障确保这个过程正确。
            if (server.aof_state == AOF_ON &&
                server.aof_fsync == AOF_FSYNC_ALWAYS) AOF开启,并且是AOF_FSYNC_ALWAYS模式
            {
                ae_barrier = 1;
            }
            if (connSetWriteHandlerWithBarrier(c->conn, sendReplyToClient, ae_barrier) == C_ERR) {  设置写处理器
                freeClientAsync(c); 异步释放
            }
        }
    }
    return processed;
}

/* resetClient prepare the client to process the next command */
resetClient 准备客户端以处理下一个命令
void resetClient(client *c) {
    redisCommandProc *prevcmd = c->cmd ? c->cmd->proc : NULL; 前一个命令

    freeClientArgv(c); 释放参数
    c->reqtype = 0;
    c->multibulklen = 0;
    c->bulklen = -1;

    /* We clear the ASKING flag as well if we are not inside a MULTI, and
     * if what we just executed is not the ASKING command itself. */
如果我们不在MULTI中,并且刚刚执行的不是ASKING命令本身,那么我们也会清除ASKING标志。
    if (!(c->flags & CLIENT_MULTI) && prevcmd != askingCommand)
        c->flags &= ~CLIENT_ASKING;

    /* We do the same for the CACHING command as well. It also affects
     * the next command or transaction executed, in a way very similar
     * to ASKING. */
我们也对缓存命令执行相同的操作。它还影响执行的下一个命令或事务,其方式与询问非常类似。
    if (!(c->flags & CLIENT_MULTI) && prevcmd != clientCommand)
        c->flags &= ~CLIENT_TRACKING_CACHING;

    /* Remove the CLIENT_REPLY_SKIP flag if any so that the reply
     * to the next command will be sent, but set the flag if the command
     * we just processed was "CLIENT REPLY SKIP". */
删除CLIENT_REPLY_SKIP标志(如果有),以便发送对下一个命令的回复,
但如果我们刚刚处理的命令是“CLIENT REPLY SKIP”,则设置该标志
    c->flags &= ~CLIENT_REPLY_SKIP;
    if (c->flags & CLIENT_REPLY_SKIP_NEXT) {
        c->flags |= CLIENT_REPLY_SKIP;
        c->flags &= ~CLIENT_REPLY_SKIP_NEXT;
    }
}

/* This funciton is used when we want to re-enter the event loop but there
 * is the risk that the client we are dealing with will be freed in some
 * way. This happens for instance in:
当我们想要重新进入事件循环,但存在以某种方式释放正在处理的客户机的风险时,可以使用此函数。例如,这种情况发生在
 * * DEBUG RELOAD and similar.  调试重新加载以及类似
 * * When a Lua script is in -BUSY state. 当Lua脚本处于忙状态时
 *
 * So the function will protect the client by doing two things:
因此,该函数将通过做两件事来保护客户端
 * 1) It removes the file events. This way it is not possible that an
 *    error is signaled on the socket, freeing the client.
它删除文件事件。这样就不可能在套接字上发出错误信号,从而释放客户端
 * 2) Moreover it makes sure that if the client is freed in a different code
 *    path, it is not really released, but only marked for later release. */
此外,它还确保如果客户机在不同的代码路径中被释放,它不会真正被释放,而只是被标记为以后的版本
void protectClient(client *c) {
    c->flags |= CLIENT_PROTECTED;保护模式
    connSetReadHandler(c->conn,NULL);设置读处理句柄为空
    connSetWriteHandler(c->conn,NULL);设置写处理句柄为空
}

/* This will undo the client protection done by protectClient() */
这将撤消protectClient执行的客户端保护
void unprotectClient(client *c) {
    if (c->flags & CLIENT_PROTECTED) {
        c->flags &= ~CLIENT_PROTECTED;
        connSetReadHandler(c->conn,readQueryFromClient);设置读取处理句柄
        if (clientHasPendingReplies(c)) clientInstallWriteHandler(c); 如果有挂起写,安装写处理句柄
    }
}

/* Like processMultibulkBuffer(), but for the inline protocol instead of RESP,
 * this function consumes the client query buffer and creates a command ready
 * to be executed inside the client structure. Returns C_OK if the command
 * is ready to be executed, or C_ERR if there is still protocol to read to
 * have a well formed command. The function also returns C_ERR when there is
 * a protocol error: in such a case the client structure is setup to reply
 * with the error and close the connection. */
与processMultibulkBuffer类似,但对于内联协议而不是RESP,此函数使用客户机查询缓冲区,
并创建准备在客户机结构内执行的命令。如果命令已准备好执行,则返回C_OK;
如果仍要读取协议以获得格式良好的命令,则返回C_ERR。当出现协议错误时,该函数还返回C_ERR:
在这种情况下,客户端结构被设置为用错误回复并关闭连接。
int processInlineBuffer(client *c) {
    char *newline;
    int argc, j, linefeed_chars = 1;
    sds *argv, aux;
    size_t querylen;

    /* Search for end of line */ 搜索行尾
    newline = strchr(c->querybuf+c->qb_pos,'\n');

    /* Nothing to do without a \r\n */ 没有回车换行不做任何事情
    if (newline == NULL) {
        if (sdslen(c->querybuf)-c->qb_pos > PROTO_INLINE_MAX_SIZE) { 单行超过了最大字节数64k
            addReplyError(c,"Protocol error: too big inline request");
            setProtocolError("too big inline request",c);
        }
        return C_ERR;
    }

    /* Handle the \r\n case. */ 处理回车换行的情况
    if (newline && newline != c->querybuf+c->qb_pos && *(newline-1) == '\r')
        newline--, linefeed_chars++;

    /* Split the input buffer up to the \r\n */ 依赖回车换行将输入缓冲区分割
    querylen = newline-(c->querybuf+c->qb_pos);
    aux = sdsnewlen(c->querybuf+c->qb_pos,querylen);
    argv = sdssplitargs(aux,&argc);
    sdsfree(aux);
    if (argv == NULL) {
        addReplyError(c,"Protocol error: unbalanced quotes in request");
        setProtocolError("unbalanced quotes in inline request",c);
        return C_ERR;
    }

    /* Newline from slaves can be used to refresh the last ACK time.
     * This is useful for a slave to ping back while loading a big
     * RDB file. */
从机换行符可用于刷新上次确认时间。这对于从机在加载大型RDB文件时进行ping回非常有用。
    if (querylen == 0 && getClientType(c) == CLIENT_TYPE_SLAVE)
        c->repl_ack_time = server.unixtime;

    /* Masters should never send us inline protocol to run actual
     * commands. If this happens, it is likely due to a bug in Redis where
     * we got some desynchronization in the protocol, for example
     * beause of a PSYNC gone bad.
主机不应该向我们发送内联协议来运行实际命令。如果发生这种情况,
很可能是由于Redis中的一个错误,我们在协议中得到了一些不同步,例如,由于PSYNC坏了
     * However the is an exception: masters may send us just a newline
     * to keep the connection active. */
不过,这是一个例外:主机可能只会给我们发送一条新行,以保持连接的活动状态。
    if (querylen != 0 && c->flags & CLIENT_MASTER) {
        serverLog(LL_WARNING,"WARNING: Receiving inline protocol from master, master stream corruption? Closing the master connection and discarding the cached master.");
        setProtocolError("Master using the inline protocol. Desync?",c);
        return C_ERR;
    }

    /* Move querybuffer position to the next query in the buffer. */
    将querybuffer位置移动到缓冲区中的下一个查询
    c->qb_pos += querylen+linefeed_chars;

    /* Setup argv array on client structure */在客户端结构上设置argv数组
    if (argc) {
        if (c->argv) zfree(c->argv);
        c->argv = zmalloc(sizeof(robj*)*argc);
    }

    /* Create redis objects for all arguments. */ 为所有参数创建redis对象
    for (c->argc = 0, j = 0; j < argc; j++) {
        c->argv[c->argc] = createObject(OBJ_STRING,argv[j]);
        c->argc++;
    }
    zfree(argv); 释放参数
    return C_OK;
}

/* Helper function. Record protocol erro details in server log,
 * and set the client as CLIENT_CLOSE_AFTER_REPLY and
 * CLIENT_PROTOCOL_ERROR. */
辅助函数。在服务器日志中记录协议错误详细信息,并在回复和客户端协议错误后将客户端设置为客户端关闭。
#define PROTO_DUMP_LEN 128
static void setProtocolError(const char *errstr, client *c) {
    if (server.verbosity <= LL_VERBOSE || c->flags & CLIENT_MASTER) {
        sds client = catClientInfoString(sdsempty(),c); 拼接客户端信息

        /* Sample some protocol to given an idea about what was inside. */
        对一些协议进行示例,以了解其中的内容。
        char buf[256];
        if (sdslen(c->querybuf)-c->qb_pos < PROTO_DUMP_LEN) {
            snprintf(buf,sizeof(buf),"Query buffer during protocol error: '%s'", c->querybuf+c->qb_pos);
        } else {
            snprintf(buf,sizeof(buf),"Query buffer during protocol error: '%.*s' (... more %zu bytes ...) '%.*s'", PROTO_DUMP_LEN/2, c->querybuf+c->qb_pos, sdslen(c->querybuf)-c->qb_pos-PROTO_DUMP_LEN, PROTO_DUMP_LEN/2, c->querybuf+sdslen(c->querybuf)-PROTO_DUMP_LEN/2);
        }

        /* Remove non printable chars. */ 删除不可打印的字符
        char *p = buf;
        while (*p != '\0') {
            if (!isprint(*p)) *p = '.';
            p++;
        }

        /* Log all the client and protocol info. */
        int loglevel = (c->flags & CLIENT_MASTER) ? LL_WARNING :
                                                    LL_VERBOSE;
        serverLog(loglevel,
            "Protocol error (%s) from client: %s. %s", errstr, client, buf);
        sdsfree(client);
    }
    c->flags |= (CLIENT_CLOSE_AFTER_REPLY|CLIENT_PROTOCOL_ERROR);
}

/* Process the query buffer for client 'c', setting up the client argument
 * vector for command execution. Returns C_OK if after running the function
 * the client has a well-formed ready to be processed command, otherwise
 * C_ERR if there is still to read more buffer to get the full command.
 * The function also returns C_ERR when there is a protocol error: in such a
 * case the client structure is setup to reply with the error and close
 * the connection.
处理客户端“c”的查询缓冲区,为命令执行设置客户端参数向量。
如果在运行函数后客户端有一个格式良好的准备处理命令,则返回C_OK;
否则,如果仍然需要读取更多缓冲区以获取完整命令,则返回C_ERR。当出现协议错误时,该函数还返回C_ERR:
在这种情况下,客户端结构被设置为用错误回复并关闭连接。
 * This function is called if processInputBuffer() detects that the next
 * command is in RESP format, so the first byte in the command is found
 * to be '*'. Otherwise for inline commands processInlineBuffer() is called. */
如果processInputBuffer检测到下一个命令为RESP格式,则调用此函数,
因此命令中的第一个字节为“*”。否则,对于内联命令,将调用processInlineBuffer。
int processMultibulkBuffer(client *c) {
    char *newline = NULL;
    int ok;
    long long ll;

    if (c->multibulklen == 0) {
        /* The client should have been reset */ 客户端应该已重置
        serverAssertWithInfo(c,NULL,c->argc == 0);

        /* Multi bulk length cannot be read without a \r\n */
        如果没有回车换行,多批量长度不能被读取
        newline = strchr(c->querybuf+c->qb_pos,'\r');
        if (newline == NULL) { 没有回车符
            if (sdslen(c->querybuf)-c->qb_pos > PROTO_INLINE_MAX_SIZE) { 单行过长
                addReplyError(c,"Protocol error: too big mbulk count string");
                setProtocolError("too big mbulk count string",c);
            }
            return C_ERR;
        }

        /* Buffer should also contain \n */ 缓存应该包含换行
        新行的长度超过了剩余总的剩余长度 最多的时候就是多两个字符 一个是\n 还有一个结尾符
        if (newline-(c->querybuf+c->qb_pos) > (ssize_t)(sdslen(c->querybuf)-c->qb_pos-2))
            return C_ERR;

        /* We know for sure there is a whole line since newline != NULL,
         * so go ahead and find out the multi bulk length. */
我们确认这里有一整行,因为变量newline不为空,因此继续找出多批量的长度
        serverAssertWithInfo(c,NULL,c->querybuf[c->qb_pos] == '*');
        ok = string2ll(c->querybuf+1+c->qb_pos,newline-(c->querybuf+1+c->qb_pos),&ll); *号后面的长度内容
        if (!ok || ll > 1024*1024) { 转化失败或者数字过大
            addReplyError(c,"Protocol error: invalid multibulk length");
            setProtocolError("invalid mbulk count",c);
            return C_ERR;
        }

        c->qb_pos = (newline-c->querybuf)+2;正常的情况下,已读的数据指向新的地址 +2是\r\n

        if (ll <= 0) return C_OK; 长度小于等于0

        c->multibulklen = ll;

        /* Setup argv array on client structure */ 在客户端结构上设置argv数组
        if (c->argv) zfree(c->argv);
        c->argv = zmalloc(sizeof(robj*)*c->multibulklen);为参数数组分配空间
    }

    serverAssertWithInfo(c,NULL,c->multibulklen > 0); 确认长度不为0
    while(c->multibulklen) { 长度不为0
        /* Read bulk length if unknown */  如果未知读取批量长度
        if (c->bulklen == -1) {
            newline = strchr(c->querybuf+c->qb_pos,'\r');通过回车符号判断新行
            if (newline == NULL) { 不存在新行
                if (sdslen(c->querybuf)-c->qb_pos > PROTO_INLINE_MAX_SIZE) {
                    addReplyError(c,
                        "Protocol error: too big bulk count string");
                    setProtocolError("too big bulk count string",c);
                    return C_ERR;
                }
                break;
            }

            /* Buffer should also contain \n */
            if (newline-(c->querybuf+c->qb_pos) > (ssize_t)(sdslen(c->querybuf)-c->qb_pos-2))
                break;

            if (c->querybuf[c->qb_pos] != '$') { 第一个字符是$,不是就是格式错误
                addReplyErrorFormat(c,
                    "Protocol error: expected '$', got '%c'",
                    c->querybuf[c->qb_pos]);
                setProtocolError("expected $ but got something else",c);
                return C_ERR;
            }

            ok = string2ll(c->querybuf+c->qb_pos+1,newline-(c->querybuf+c->qb_pos+1),&ll);
            if (!ok || ll < 0 || ll > server.proto_max_bulk_len) { 长度转化不成功 或者 长度过小或过大
                addReplyError(c,"Protocol error: invalid bulk length");
                setProtocolError("invalid bulk length",c);
                return C_ERR;
            }

            c->qb_pos = newline-c->querybuf+2; 新的已读位置
            if (ll >= PROTO_MBULK_BIG_ARG) {
                /* If we are going to read a large object from network
                 * try to make it likely that it will start at c->querybuf
                 * boundary so that we can optimize object creation
                 * avoiding a large copy of data.
如果我们要从网络中读取一个大对象,请尝试使它可能从c->querybuf边界开始,
这样我们就可以优化对象创建,避免数据的大拷贝。
                 * But only when the data we have not parsed is less than
                 * or equal to ll+2. If the data length is greater than
                 * ll+2, trimming querybuf is just a waste of time, because
                 * at this time the querybuf contains not only our bulk. */
但是只有当我们没有解析的数据小于或等于ll+2时。如果数据长度大于ll+2,
则修剪querybuf只是浪费时间,因为此时querybuf不仅包含我们的数据(还有其它数据)。
                if (sdslen(c->querybuf)-c->qb_pos <= (size_t)ll+2) {
                    sdsrange(c->querybuf,c->qb_pos,-1); 截取从已读位置到结束位置的字符串
                    c->qb_pos = 0;
                    /* Hint the sds library about the amount of bytes this string is
                     * going to contain. */
                     向sds库提示此字符串将包含的字节数
                    c->querybuf = sdsMakeRoomFor(c->querybuf,ll+2);
                }
            }
            c->bulklen = ll;
        }

        /* Read bulk argument */ 读取批量参数
        if (sdslen(c->querybuf)-c->qb_pos < (size_t)(c->bulklen+2)) {
            /* Not enough data (+2 == trailing \r\n) */ 数据不足(+2 等于 尾部的\r\n)
            break;
        } else {
            /* Optimization: if the buffer contains JUST our bulk element
             * instead of creating a new object by *copying* the sds we
             * just use the current sds string. */
优化:如果缓冲区只包含我们的bulk元素,而不是通过复制sds来创建新对象,那么我们就可以只使用当前的sds字符串。
            if (c->qb_pos == 0 &&
                c->bulklen >= PROTO_MBULK_BIG_ARG &&
                sdslen(c->querybuf) == (size_t)(c->bulklen+2))
            {
                c->argv[c->argc++] = createObject(OBJ_STRING,c->querybuf);
                sdsIncrLen(c->querybuf,-2); /* remove CRLF */除去末尾的\r\n
                /* Assume that if we saw a fat argument we'll see another one
                 * likely... */
                c->querybuf = sdsnewlen(SDS_NOINIT,c->bulklen+2);
                sdsclear(c->querybuf);
            } else {
                c->argv[c->argc++] =
                    createStringObject(c->querybuf+c->qb_pos,c->bulklen);
                c->qb_pos += c->bulklen+2;
            }
            c->bulklen = -1;
            c->multibulklen--;
        }
    }

    /* We're done when c->multibulk == 0 */ 当c->multibulk为0时,我们做完
    if (c->multibulklen == 0) return C_OK;

    /* Still not ready to process the command */ 仍然没有准备好处理该命令
    return C_ERR;
}

/* Perform necessary tasks after a command was executed:
执行命令后执行必要的任务:
 * 1. The client is reset unless there are reasons to avoid doing it.
 * 2. In the case of master clients, the replication offset is updated.
 * 3. Propagate commands we got from our master to replicas down the line. */
1.除非有理由避免这样做,否则将重置客户端。
2.对于主客户端,将更新复制偏移量。
3.将我们从主服务器获得的命令传播到复制副本。
void commandProcessed(client *c) {
    long long prev_offset = c->reploff;
    if (c->flags & CLIENT_MASTER && !(c->flags & CLIENT_MULTI)) { 主客户端  非事务
        /* Update the applied replication offset of our master. */
        更新主服务器的应用复制偏移量
        c->reploff = c->read_reploff - sdslen(c->querybuf) + c->qb_pos;
    }

    /* Don't reset the client structure for clients blocked in a
     * module blocking command, so that the reply callback will
     * still be able to access the client argv and argc field.
     * The client will be reset in unblockClientFromModule(). */
不要为模块阻塞命令中阻塞的客户端重置客户端结构,这样应答回调仍将能够访问客户端argv和argc字段。
客户端将在函数UnlockClientFromModule中重置
    if (!(c->flags & CLIENT_BLOCKED) ||  不在阻塞的模块命令中,重置客户端
        c->btype != BLOCKED_MODULE)
    {
        resetClient(c);
    }

    /* If the client is a master we need to compute the difference
     * between the applied offset before and after processing the buffer,
     * to understand how much of the replication stream was actually
     * applied to the master state: this quantity, and its corresponding
     * part of the replication stream, will be propagated to the
     * sub-replicas and to the replication backlog. */
如果客户机是主机,我们需要计算处理缓冲区前后应用的偏移量之间的差异,以了解有多少复制流实际应用到主机状态:
这个数量,以及复制流的相应部分,将传播到子副本和复制待办事项列表
    if (c->flags & CLIENT_MASTER) { 主机
        long long applied = c->reploff - prev_offset; 本次复制的数据大小
        if (applied) {
            replicationFeedSlavesFromMasterStream(server.slaves,
                    c->pending_querybuf, applied); 将主机的数据复制到从机
            sdsrange(c->pending_querybuf,applied,-1); 减去已经复制的部分
        }
    }
}

/* This function calls processCommand(), but also performs a few sub tasks
 * for the client that are useful in that context:
此函数调用processCommand,但也为客户端执行一些在该上下文中有用的子任务
 * 1. It sets the current client to the client 'c'. 它将当前客户端设置为客户端“c”
 * 2. calls commandProcessed() if the command was handled. 如果已处理命令,则调用commandProcessed
 *
 * The function returns C_ERR in case the client was freed as a side effect
 * of processing the command, otherwise C_OK is returned. */
如果客户端由于处理命令的副作用而被释放,则函数返回C_ERR,否则返回C_OK。
int processCommandAndResetClient(client *c) {
    int deadclient = 0;
    server.current_client = c;
    if (processCommand(c) == C_OK) { 处理命令成功
        commandProcessed(c);  进行清理工作
    }
    if (server.current_client == NULL) deadclient = 1; 不存在的客户端+1
    server.current_client = NULL;
    /* freeMemoryIfNeeded may flush slave output buffers. This may
     * result into a slave, that may be the active client, to be
     * freed. */
所需的空闲内存可能会刷新从属输出缓冲区。这可能会导致从机(可能是活动客户端)被释放。
    return deadclient ? C_ERR : C_OK;
}

/* This function is called every time, in the client structure 'c', there is
 * more query buffer to process, because we read more data from the socket
 * or because a client was blocked and later reactivated, so there could be
 * pending query buffer, already representing a full command, to process. */
每次调用此函数时,在客户端结构“c”中,都会有更多的查询缓冲区要处理,因为我们从套接字读取了更多的数据,
或者因为客户端被阻止并随后被重新激活,所以可能会有挂起的查询缓冲区要处理,该缓冲区已经表示完整的命令。
void processInputBuffer(client *c) {
    /* Keep processing while there is something in the input buffer */
    一直处理直到输入缓冲区没有东西为止
    while(c->qb_pos < sdslen(c->querybuf)) {
        /* Return if clients are paused. */ 如果客户端被暂停返回
        if (!(c->flags & CLIENT_SLAVE) && clientsArePaused()) break;

        /* Immediately abort if the client is in the middle of something. */
        如果客户端处于某个阻塞情况,则立即中止。
        if (c->flags & CLIENT_BLOCKED) break;

        /* Don't process more buffers from clients that have already pending
         * commands to execute in c->argv. */
         不要处理来自已在c->argv中执行挂起命令的客户端的更多缓冲区
        if (c->flags & CLIENT_PENDING_COMMAND) break; 客户端已挂起

        /* Don't process input from the master while there is a busy script
         * condition on the slave. We want just to accumulate the replication
         * stream (instead of replying -BUSY like we do with other clients) and
         * later resume the processing. */
当从机上存在繁忙的脚本条件时,不要处理来自主机的输入。
我们只想累积复制流(而不是像对其他客户机那样忙着回复),然后继续处理。
        if (server.lua_timedout && c->flags & CLIENT_MASTER) break;

        /* CLIENT_CLOSE_AFTER_REPLY closes the connection once the reply is
         * written to the client. Make sure to not let the reply grow after
         * this flag has been set (i.e. don't process more commands).
CLIENT_CLOSE_AFTER_REPLY将回复写入客户端后关闭连接。确保在设置此标志后不让回复增长(即不处理更多命令)。
         * The same applies for clients we want to terminate ASAP. */
         这同样适用于我们希望尽快终止的客户
        if (c->flags & (CLIENT_CLOSE_AFTER_REPLY|CLIENT_CLOSE_ASAP)) break;

        /* Determine request type when unknown. */ 确定未知时的请求类型。
        if (!c->reqtype) {
            if (c->querybuf[c->qb_pos] == '*') {
                c->reqtype = PROTO_REQ_MULTIBULK;多批量
            } else {
                c->reqtype = PROTO_REQ_INLINE;  单行
            }
        }

        if (c->reqtype == PROTO_REQ_INLINE) { 单行情况
            if (processInlineBuffer(c) != C_OK) break;
            /* If the Gopher mode and we got zero or one argument, process
             * the request in Gopher mode. */
             如果在Gopher模式下,我们得到零或一个参数,则在Gopher模式下处理请求
            if (server.gopher_enabled &&
                ((c->argc == 1 && ((char*)(c->argv[0]->ptr))[0] == '/') ||
                  c->argc == 0))
            {
                processGopherRequest(c); 处理Gopher协议的请求
                resetClient(c);
                c->flags |= CLIENT_CLOSE_AFTER_REPLY;
                break;
            }
        } else if (c->reqtype == PROTO_REQ_MULTIBULK) {多批量
            if (processMultibulkBuffer(c) != C_OK) break; 调用多批量处理命令
        } else {
            serverPanic("Unknown request type");
        }

        /* Multibulk processing could see a <= 0 length. */ 多批量处理可能会看到长度<=0if (c->argc == 0) {
            resetClient(c);
        } else {
            /* If we are in the context of an I/O thread, we can't really
             * execute the command here. All we can do is to flag the client
             * as one that needs to process the command. */
如果我们处于I/O线程的上下文中,我们就不能在这里真正执行命令。
我们所能做的就是将客户端标记为需要处理该命令的客户端。
            if (c->flags & CLIENT_PENDING_READ) {
                c->flags |= CLIENT_PENDING_COMMAND;
                break;
            }

            /* We are finally ready to execute the command. */ 我们终于准备好执行命令了。
            if (processCommandAndResetClient(c) == C_ERR) { 处理失败
                /* If the client is no longer valid, we avoid exiting this
                 * loop and trimming the client buffer later. So we return
                 * ASAP in that case. */
如果客户机不再有效,我们将避免退出此循环并在以后修剪客户机缓冲区。
因此,在这种情况下,我们会尽快返回。
                return;
            }
        }
    }

    /* Trim to pos */ 修剪为止
    if (c->qb_pos) {
        sdsrange(c->querybuf,c->qb_pos,-1);
        c->qb_pos = 0; 重置为0
    }
}

从客户端读取查询命令
void readQueryFromClient(connection *conn) {
    client *c = connGetPrivateData(conn); 获取查询客户端数据
    int nread, readlen;
    size_t qblen;

    /* Check if we want to read from the client later when exiting from
     * the event loop. This is the case if threaded I/O is enabled. */
检查我们是否希望在稍后退出事件循环时从客户端读取数据。如果启用了线程I/O,则会出现这种情况
    if (postponeClientRead(c)) return; 需要推迟处理

    readlen = PROTO_IOBUF_LEN; 正常IO缓存大小
    /* If this is a multi bulk request, and we are processing a bulk reply
     * that is large enough, try to maximize the probability that the query
     * buffer contains exactly the SDS string representing the object, even
     * at the risk of requiring more read(2) calls. This way the function
     * processMultiBulkBuffer() can avoid copying buffers to create the
     * Redis Object representing the argument. */
如果这是一个多批量请求,并且我们正在处理一个足够大的批量回复,
那么请尝试最大化查询缓冲区恰好包含表示对象的SDS字符串的概率,
即使冒着需要更多读取(2)调用的风险。
通过这种方式,函数processMultiBulkBuffer可以避免复制缓冲区来创建表示参数的Redis对象。
    if (c->reqtype == PROTO_REQ_MULTIBULK && c->multibulklen && c->bulklen != -1
        && c->bulklen >= PROTO_MBULK_BIG_ARG)
    {
        ssize_t remaining = (size_t)(c->bulklen+2)-sdslen(c->querybuf);

        /* Note that the 'remaining' variable may be zero in some edge case,
         * for example once we resume a blocked client after CLIENT PAUSE. */
请注意,在某些边缘情况下,“剩余”变量可能为零,例如在客户端暂停后恢复被阻止的客户端时。
        if (remaining > 0 && remaining < readlen) readlen = remaining;
    }

    qblen = sdslen(c->querybuf);
    if (c->querybuf_peak < qblen) c->querybuf_peak = qblen; 修改最近读的最大值
    c->querybuf = sdsMakeRoomFor(c->querybuf, readlen); 开辟空间
    nread = connRead(c->conn, c->querybuf+qblen, readlen); 读取字节
    if (nread == -1) { 读不到数据
        if (connGetState(conn) == CONN_STATE_CONNECTED) { 确认连接是否正常
            return;
        } else {
            serverLog(LL_VERBOSE, "Reading from client: %s",connGetLastError(c->conn));
            freeClientAsync(c);
            return;
        }
    } else if (nread == 0) { 连接已关闭
        serverLog(LL_VERBOSE, "Client closed connection");
        freeClientAsync(c);
        return;
    } else if (c->flags & CLIENT_MASTER) { 是主机
        /* Append the query buffer to the pending (not applied) buffer
         * of the master. We'll use this buffer later in order to have a
         * copy of the string applied by the last command executed. */
将查询缓冲区追加到主机的挂起(未应用)缓冲区。稍后我们将使用此缓冲区,
以便执行最后一个命令应用的字符串的副本。
        c->pending_querybuf = sdscatlen(c->pending_querybuf,
                                        c->querybuf+qblen,nread);
    }

    sdsIncrLen(c->querybuf,nread); 修改已读长度
    c->lastinteraction = server.unixtime; 修改最后交互时间
    if (c->flags & CLIENT_MASTER) c->read_reploff += nread; 主机修改复制偏移位置
    server.stat_net_input_bytes += nread;
    if (sdslen(c->querybuf) > server.client_max_querybuf_len) { 如果读取的缓存长度超过了定义的最大长度
        sds ci = catClientInfoString(sdsempty(),c), bytes = sdsempty();

        bytes = sdscatrepr(bytes,c->querybuf,64);
        serverLog(LL_WARNING,"Closing client that reached max query buffer length: %s (qbuf initial bytes: %s)", ci, bytes);
        sdsfree(ci);
        sdsfree(bytes);
        freeClientAsync(c);
        return;
    }

    /* There is more data in the client input buffer, continue parsing it
     * in case to check if there is a full command to execute. */
客户端输入缓冲区中有更多数据,请继续分析它,以防检查是否有完整的命令要执行
     processInputBuffer(c);
}

获取客户端最长的列表和最大的输入缓存
void getClientsMaxBuffers(unsigned long *longest_output_list,
                          unsigned long *biggest_input_buffer) {
    client *c;
    listNode *ln;
    listIter li;
    unsigned long lol = 0, bib = 0;

    listRewind(server.clients,&li); 
    while ((ln = listNext(&li)) != NULL) { 遍历所有客户端
        c = listNodeValue(ln);

        if (listLength(c->reply) > lol) lol = listLength(c->reply); 最长的列表
        if (sdslen(c->querybuf) > bib) bib = sdslen(c->querybuf); 最大的输入缓存
    }
    *longest_output_list = lol;
    *biggest_input_buffer = bib;
}

/* A Redis "Peer ID" is a colon separated ip:port pair.
 * For IPv4 it's in the form x.y.z.k:port, example: "127.0.0.1:1234".
 * For IPv6 addresses we use [] around the IP part, like in "[::1]:1234".
 * For Unix sockets we use path:0, like in "/tmp/redis:0".
Redis“对等ID”是一个冒号分隔的ip:端口对。
对于IPv4,它的格式是x.y.z.k:port,例如:“127.0.0.1:1234”。
对于IPv6地址,我们在IP部分使用[],如“[::1]:1234”。
对于Unix套接字,我们使用路径:0,如“/tmp/redis:0”中的路径
 * A Peer ID always fits inside a buffer of NET_PEER_ID_LEN bytes, including
 * the null term.
对等ID始终位于NET_PEER_ID_LEN字节的缓冲区内,包括空项。
 * On failure the function still populates 'peerid' with the "?:0" string
 * in case you want to relax error checking or need to display something
 * anyway (see anetPeerToString implementation for more info). */
失败时,函数仍使用“?:0”字符串填充“peerid”,
以防您想要放松错误检查或需要显示某些内容(有关更多信息,请参阅anetPeerToString实现)
void genClientPeerId(client *client, char *peerid,
                            size_t peerid_len) {
    if (client->flags & CLIENT_UNIX_SOCKET) {
        /* Unix socket client. */unix套接字客户端
        snprintf(peerid,peerid_len,"%s:0",server.unixsocket);
    } else {
        /* TCP client. */ TCP客户端
        connFormatPeer(client->conn,peerid,peerid_len);
    }
}

/* This function returns the client peer id, by creating and caching it
 * if client->peerid is NULL, otherwise returning the cached value.
 * The Peer ID never changes during the life of the client, however it
 * is expensive to compute. */
如果client->peerid为NULL,此函数通过创建并缓存客户端对等id返回客户端对等id,
否则返回缓存的值。对等ID在客户机的生命周期内从未更改,但是计算成本很高(所以用缓存)
char *getClientPeerId(client *c) {
    char peerid[NET_PEER_ID_LEN];

    if (c->peerid == NULL) {
        genClientPeerId(c,peerid,sizeof(peerid));
        c->peerid = sdsnew(peerid);
    }
    return c->peerid;
}

/* Concatenate a string representing the state of a client in an human
 * readable format, into the sds string 's'. */
将表示用户可读格式的客户端状态的字符串连接到sds字符串的
sds catClientInfoString(sds s, client *client) {
    char flags[16], events[3], conninfo[CONN_INFO_LEN], *p;

    p = flags;
    if (client->flags & CLIENT_SLAVE) { 从机
        if (client->flags & CLIENT_MONITOR)
            *p++ = 'O';  监视器
        else
            *p++ = 'S'; 从机
    }
    if (client->flags & CLIENT_MASTER) *p++ = 'M';  主机
    if (client->flags & CLIENT_PUBSUB) *p++ = 'P';  订阅
    if (client->flags & CLIENT_MULTI) *p++ = 'x';   事务
    if (client->flags & CLIENT_BLOCKED) *p++ = 'b';  阻塞
    if (client->flags & CLIENT_TRACKING) *p++ = 't';  跟踪
    if (client->flags & CLIENT_TRACKING_BROKEN_REDIR) *p++ = 'R';
    if (client->flags & CLIENT_DIRTY_CAS) *p++ = 'd';
    if (client->flags & CLIENT_CLOSE_AFTER_REPLY) *p++ = 'c';
    if (client->flags & CLIENT_UNBLOCKED) *p++ = 'u';
    if (client->flags & CLIENT_CLOSE_ASAP) *p++ = 'A';
    if (client->flags & CLIENT_UNIX_SOCKET) *p++ = 'U';
    if (client->flags & CLIENT_READONLY) *p++ = 'r';
    if (p == flags) *p++ = 'N';
    *p++ = '\0';

    p = events;
    if (client->conn) {
        if (connHasReadHandler(client->conn)) *p++ = 'r';
        if (connHasWriteHandler(client->conn)) *p++ = 'w';
    }
    *p = '\0';
    return sdscatfmt(s,
        "id=%U addr=%s %s name=%s age=%I idle=%I flags=%s db=%i sub=%i psub=%i multi=%i qbuf=%U qbuf-free=%U obl=%U oll=%U omem=%U events=%s cmd=%s user=%s",
        (unsigned long long) client->id,
        getClientPeerId(client),
        connGetInfo(client->conn, conninfo, sizeof(conninfo)),
        client->name ? (char*)client->name->ptr : "",
        (long long)(server.unixtime - client->ctime),
        (long long)(server.unixtime - client->lastinteraction),
        flags,
        client->db->id,
        (int) dictSize(client->pubsub_channels),
        (int) listLength(client->pubsub_patterns),
        (client->flags & CLIENT_MULTI) ? client->mstate.count : -1,
        (unsigned long long) sdslen(client->querybuf),
        (unsigned long long) sdsavail(client->querybuf),
        (unsigned long long) client->bufpos,
        (unsigned long long) listLength(client->reply),
        (unsigned long long) getClientOutputBufferMemoryUsage(client),
        events,
        client->lastcmd ? client->lastcmd->name : "NULL",
        client->user ? client->user->name : "(superuser)");
}
获取所有客户端的信息
sds getAllClientsInfoString(int type) {
    listNode *ln;
    listIter li;
    client *client;
    sds o = sdsnewlen(SDS_NOINIT,200*listLength(server.clients));
    sdsclear(o);
    listRewind(server.clients,&li);
    while ((ln = listNext(&li)) != NULL) {遍历所有客户端
        client = listNodeValue(ln);
        if (type != -1 && getClientType(client) != type) continue;
        o = catClientInfoString(o,client); 获取客户端信息
        o = sdscatlen(o,"\n",1); 拼接客户端
    }
    return o;
}

/* This function implements CLIENT SETNAME, including replying to the
 * user with an error if the charset is wrong (in that case C_ERR is
 * returned). If the function succeeeded C_OK is returned, and it's up
 * to the caller to send a reply if needed.
此函数实现客户端SETNAME,包括在字符集错误时向用户回复错误(在这种情况下返回C_ERR)。
如果函数succeeded C_OK返回,则根据需要由调用方发送回复。
 * Setting an empty string as name has the effect of unsetting the
 * currently set name: the client will remain unnamed.
将空字符串设置为name具有取消设置当前设置的名称的效果:客户端将保持未命名。
 * This function is also used to implement the HELLO SETNAME option. */
此函数还用于实现HELLO SETNAME选项。
int clientSetNameOrReply(client *c, robj *name) {
    int len = sdslen(name->ptr); 获取名字字符串
    char *p = name->ptr;

    /* Setting the client name to an empty string actually removes
     * the current name. */
将客户端名称设置为空字符串实际上会删除当前名称。
    if (len == 0) {
        if (c->name) decrRefCount(c->name);
        c->name = NULL;
        return C_OK;
    }

    /* Otherwise check if the charset is ok. We need to do this otherwise
     * CLIENT LIST format will break. You should always be able to
     * split by space to get the different fields. */
否则,请检查字符集是否正常。我们需要这样做,否则客户端列表格式将中断。
应该始终能够按空间分割以获得不同的字段。
    for (int j = 0; j < len; j++) {
        if (p[j] < '!' || p[j] > '~') { /* ASCII is assumed. */ 假定为ASCII码
            addReplyError(c,
                "Client names cannot contain spaces, "
                "newlines or special characters.");
            return C_ERR;
        }
    }
    if (c->name) decrRefCount(c->name);
    c->name = name;
    incrRefCount(name);
    return C_OK;
}

客户端帮助命令 client help
void clientCommand(client *c) {
    listNode *ln;
    listIter li;

    if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"help")) { 是client help的格式,提示使用方式
        const char *help[] = {
"ID                     -- Return the ID of the current connection.",
"GETNAME                -- Return the name of the current connection.",
"KILL <ip:port>         -- Kill connection made from <ip:port>.",
"KILL <option> <value> [option value ...] -- Kill connections. Options are:",
"     ADDR <ip:port>                      -- Kill connection made from <ip:port>",
"     TYPE (normal|master|replica|pubsub) -- Kill connections by type.",
"     USER <username>   -- Kill connections authenticated with such user.",
"     SKIPME (yes|no)   -- Skip killing current connection (default: yes).",
"LIST [options ...]     -- Return information about client connections. Options:",
"     TYPE (normal|master|replica|pubsub) -- Return clients of specified type.",
"PAUSE <timeout>        -- Suspend all Redis clients for <timout> milliseconds.",
"REPLY (on|off|skip)    -- Control the replies sent to the current connection.",
"SETNAME <name>         -- Assign the name <name> to the current connection.",
"UNBLOCK <clientid> [TIMEOUT|ERROR] -- Unblock the specified blocked client.",
"TRACKING (on|off) [REDIRECT <id>] [BCAST] [PREFIX first] [PREFIX second] [OPTIN] [OPTOUT]... -- Enable client keys tracking for client side caching.",
"GETREDIR               -- Return the client ID we are redirecting to when tracking is enabled.",
NULL
        };
        addReplyHelp(c, help);
    } else if (!strcasecmp(c->argv[1]->ptr,"id") && c->argc == 2) { 返回客户端id
        /* CLIENT ID */
        addReplyLongLong(c,c->id);
    } else if (!strcasecmp(c->argv[1]->ptr,"list")) { 返回客户端列表
        /* CLIENT LIST */
        int type = -1;
        if (c->argc == 4 && !strcasecmp(c->argv[2]->ptr,"type")) {  4个参数可以指定类型
            type = getClientTypeByName(c->argv[3]->ptr);
            if (type == -1) {
                addReplyErrorFormat(c,"Unknown client type '%s'",
                    (char*) c->argv[3]->ptr);
                return;
             }
        } else if (c->argc != 2) {
            addReply(c,shared.syntaxerr);
            return;
        }
        sds o = getAllClientsInfoString(type); 根据type挑选出所有列表
        addReplyVerbatim(c,o,sdslen(o),"txt"); 用自定的文本格式
        sdsfree(o);
    } else if (!strcasecmp(c->argv[1]->ptr,"reply") && c->argc == 3) {
        /* CLIENT REPLY ON|OFF|SKIP */  命令格式 CLIENT REPLY ON|OFF|SKIP 
        if (!strcasecmp(c->argv[2]->ptr,"on")) { 打开回复
            c->flags &= ~(CLIENT_REPLY_SKIP|CLIENT_REPLY_OFF);
            addReply(c,shared.ok);
        } else if (!strcasecmp(c->argv[2]->ptr,"off")) {
            c->flags |= CLIENT_REPLY_OFF; 设置关闭
        } else if (!strcasecmp(c->argv[2]->ptr,"skip")) {
            if (!(c->flags & CLIENT_REPLY_OFF))
                c->flags |= CLIENT_REPLY_SKIP_NEXT; 设置跳过
        } else {
            addReply(c,shared.syntaxerr);
            return;
        }
    } else if (!strcasecmp(c->argv[1]->ptr,"kill")) {  
        /* CLIENT KILL <ip:port> 关闭连接
         * CLIENT KILL <option> [value] ... <option> [value] */ 根据参数关闭连接
        char *addr = NULL;
        user *user = NULL;
        int type = -1;
        uint64_t id = 0;
        int skipme = 1;
        int killed = 0, close_this_client = 0;

        if (c->argc == 3) {
            /* Old style syntax: CLIENT KILL <addr> */ 原风格的格式 CLIENT KILL <addr> 
            addr = c->argv[2]->ptr;
            skipme = 0; /* With the old form, you can kill yourself. */ 用旧的形式,可以kill自己
        } else if (c->argc > 3) {
            int i = 2; /* Next option index. */ 下一个选项索引

            /* New style syntax: parse options. */ 新风格语法:解析选项。
            while(i < c->argc) {
                int moreargs = c->argc > i+1;

                if (!strcasecmp(c->argv[i]->ptr,"id") && moreargs) { 通过ID匹配客户端
                    long long tmp;

                    if (getLongLongFromObjectOrReply(c,c->argv[i+1],&tmp,NULL)
                        != C_OK) return;
                    id = tmp;
                } else if (!strcasecmp(c->argv[i]->ptr,"type") && moreargs) { 通过type匹配客户端
                    type = getClientTypeByName(c->argv[i+1]->ptr);
                    if (type == -1) {
                        addReplyErrorFormat(c,"Unknown client type '%s'",
                            (char*) c->argv[i+1]->ptr);
                        return;
                    }
                } else if (!strcasecmp(c->argv[i]->ptr,"addr") && moreargs) { 通过地址匹配客户端
                    addr = c->argv[i+1]->ptr;
                } else if (!strcasecmp(c->argv[i]->ptr,"user") && moreargs) { 通过用户匹配客户端
                    user = ACLGetUserByName(c->argv[i+1]->ptr,
                                            sdslen(c->argv[i+1]->ptr));
                    if (user == NULL) {
                        addReplyErrorFormat(c,"No such user '%s'",
                            (char*) c->argv[i+1]->ptr);
                        return;
                    }
                } else if (!strcasecmp(c->argv[i]->ptr,"skipme") && moreargs) { 是否跳过该命令
                    if (!strcasecmp(c->argv[i+1]->ptr,"yes")) {
                        skipme = 1;
                    } else if (!strcasecmp(c->argv[i+1]->ptr,"no")) {
                        skipme = 0;
                    } else {
                        addReply(c,shared.syntaxerr);
                        return;
                    }
                } else {
                    addReply(c,shared.syntaxerr);
                    return;
                }
                i += 2;
            }
        } else {
            addReply(c,shared.syntaxerr);
            return;
        }

        /* Iterate clients killing all the matching clients. */
        遍历客户端,终止所有匹配的客户端。
        listRewind(server.clients,&li); 获取客户端列表迭代器
        while ((ln = listNext(&li)) != NULL) {  遍历
            client *client = listNodeValue(ln);
            if (addr && strcmp(getClientPeerId(client),addr) != 0) continue; 客户端地址
            if (type != -1 && getClientType(client) != type) continue;  客户端类型
            if (id != 0 && client->id != id) continue;  客户端ID
            if (user && client->user != user) continue; 客户端用户
            if (c == client && skipme) continue; 是否跳过

            /* Kill it. */ 终止客户端
            if (c == client) {  当前客户端也符合的情况,标志后续释放
                close_this_client = 1;
            } else {
                freeClient(client);
            }
            killed++;
        }

        /* Reply according to old/new format. */ 按照旧/新格式回复。
        if (c->argc == 3) {
            if (killed == 0)  终止的客户端数目为0
                addReplyError(c,"No such client");
            else
                addReply(c,shared.ok);
        } else {
            addReplyLongLong(c,killed);
        }

        /* If this client has to be closed, flag it as CLOSE_AFTER_REPLY
         * only after we queued the reply to its output buffers. */
如果必须关闭此客户端,则只有在我们将答复排入其输出缓冲区队列后,才将其标记为CLOSE_AFTER_REPLY
        if (close_this_client) c->flags |= CLIENT_CLOSE_AFTER_REPLY;
    } else if (!strcasecmp(c->argv[1]->ptr,"unblock") && (c->argc == 3 ||
                                                          c->argc == 4))
    {
        /* CLIENT UNBLOCK <id> [timeout|error] */ 命令格式
        long long id;
        int unblock_error = 0;

        if (c->argc == 4) {
            if (!strcasecmp(c->argv[3]->ptr,"timeout")) { 解除超时的阻塞
                unblock_error = 0;
            } else if (!strcasecmp(c->argv[3]->ptr,"error")) { 通过错误强制解除阻塞
                unblock_error = 1;
            } else {
                addReplyError(c,
                    "CLIENT UNBLOCK reason should be TIMEOUT or ERROR");
                return;
            }
        }
        if (getLongLongFromObjectOrReply(c,c->argv[2],&id,NULL) 获取id
            != C_OK) return;
        struct client *target = lookupClientByID(id); 
        if (target && target->flags & CLIENT_BLOCKED) { 客户端非空 并且被锁
            if (unblock_error)
                addReplyError(target,
                    "-UNBLOCKED client unblocked via CLIENT UNBLOCK");
            else
                replyToBlockedClientTimedOut(target);
            unblockClient(target); 取消阻塞
            addReply(c,shared.cone);
        } else {
            addReply(c,shared.czero);
        }
    } else if (!strcasecmp(c->argv[1]->ptr,"setname") && c->argc == 3) { 重置客户端名字
        /* CLIENT SETNAME */
        if (clientSetNameOrReply(c,c->argv[2]) == C_OK)
            addReply(c,shared.ok);
    } else if (!strcasecmp(c->argv[1]->ptr,"getname") && c->argc == 2) { 获取客户端名字
        /* CLIENT GETNAME */
        if (c->name)
            addReplyBulk(c,c->name);
        else
            addReplyNull(c);
    } else if (!strcasecmp(c->argv[1]->ptr,"pause") && c->argc == 3) { 暂停
        /* CLIENT PAUSE */
        long long duration;

        if (getTimeoutFromObjectOrReply(c,c->argv[2],&duration,
                UNIT_MILLISECONDS) != C_OK) return;
        pauseClients(duration); 暂停时长
        addReply(c,shared.ok);
    } else if (!strcasecmp(c->argv[1]->ptr,"tracking") && c->argc >= 3) {
        /* CLIENT TRACKING (on|off) [REDIRECT <id>] [BCAST] [PREFIX first]
         *                          [PREFIX second] [OPTIN] [OPTOUT] ... */
        long long redir = 0;
        uint64_t options = 0;
        robj **prefix = NULL;
        size_t numprefix = 0;

        /* Parse the options. */ 解析选项
        for (int j = 3; j < c->argc; j++) {
            int moreargs = (c->argc-1) - j;

            if (!strcasecmp(c->argv[j]->ptr,"redirect") && moreargs) { 指向
                j++;
                if (redir != 0) {
                    addReplyError(c,"A client can only redirect to a single "
                                    "other client");
                    zfree(prefix);
                    return;
                }

                if (getLongLongFromObjectOrReply(c,c->argv[j],&redir,NULL) !=
                    C_OK)
                {
                    zfree(prefix);
                    return;
                }
                /* We will require the client with the specified ID to exist
                 * right now, even if it is possible that it gets disconnected
                 * later. Still a valid sanity check. */
我们将要求具有指定ID的客户机现在就存在,即使以后可能会断开连接。仍然是有效的正常检查。
                if (lookupClientByID(redir) == NULL) {
                    addReplyError(c,"The client ID you want redirect to "
                                    "does not exist");
                    zfree(prefix);
                    return;
                }
            } else if (!strcasecmp(c->argv[j]->ptr,"bcast")) { 广播模式
                options |= CLIENT_TRACKING_BCAST;
            } else if (!strcasecmp(c->argv[j]->ptr,"optin")) {
                options |= CLIENT_TRACKING_OPTIN;
            } else if (!strcasecmp(c->argv[j]->ptr,"optout")) {
                options |= CLIENT_TRACKING_OPTOUT;
            } else if (!strcasecmp(c->argv[j]->ptr,"noloop")) {
                options |= CLIENT_TRACKING_NOLOOP;
            } else if (!strcasecmp(c->argv[j]->ptr,"prefix") && moreargs) {
                j++;
                prefix = zrealloc(prefix,sizeof(robj*)*(numprefix+1)); 根据前缀数量分配空间
                prefix[numprefix++] = c->argv[j];
            } else {
                zfree(prefix);
                addReply(c,shared.syntaxerr);
                return;
            }
        }

        /* Options are ok: enable or disable the tracking for this client. */
        选项正常:启用或禁用此客户端的跟踪。
        if (!strcasecmp(c->argv[2]->ptr,"on")) {
            /* Before enabling tracking, make sure options are compatible
             * among each other and with the current state of the client. */
在启用跟踪之前,请确保选项彼此兼容,并且与客户端的当前状态兼容。
            if (!(options & CLIENT_TRACKING_BCAST) && numprefix) { 前缀模式和广播模式一起使用
                addReplyError(c,
                    "PREFIX option requires BCAST mode to be enabled");
                zfree(prefix);
                return;
            }

            if (c->flags & CLIENT_TRACKING) {
                int oldbcast = !!(c->flags & CLIENT_TRACKING_BCAST);两个感叹号就是肯定,原来是否广播模式
                int newbcast = !!(options & CLIENT_TRACKING_BCAST); 当前是否为广播模式
                if (oldbcast != newbcast) { 新旧模式不一致
                    addReplyError(c,
                    "You can't switch BCAST mode on/off before disabling "
                    "tracking for this client, and then re-enabling it with "
                    "a different mode.");
                    zfree(prefix);
                    return;
                }
            }

            if (options & CLIENT_TRACKING_BCAST &&
                options & (CLIENT_TRACKING_OPTIN|CLIENT_TRACKING_OPTOUT)) 广播模式和 OPTIN/OPTOUT 模式不兼容
            {
                addReplyError(c,
                "OPTIN and OPTOUT are not compatible with BCAST");
                zfree(prefix);
                return;
            }

            if (options & CLIENT_TRACKING_OPTIN && options & CLIENT_TRACKING_OPTOUT)  OOPTIN和OPTOUT 模式不兼容
            {
                addReplyError(c,
                "You can't specify both OPTIN mode and OPTOUT mode");
                zfree(prefix);
                return;
            }

            if ((options & CLIENT_TRACKING_OPTIN && c->flags & CLIENT_TRACKING_OPTOUT) ||  OOPTIN和OPTOUT 模式不能前后不一致
                (options & CLIENT_TRACKING_OPTOUT && c->flags & CLIENT_TRACKING_OPTIN))
            {
                addReplyError(c,
                "You can't switch OPTIN/OPTOUT mode before disabling "
                "tracking for this client, and then re-enabling it with "
                "a different mode.");
                zfree(prefix);
                return;
            }

            enableTracking(c,redir,options,prefix,numprefix);开始跟踪
        } else if (!strcasecmp(c->argv[2]->ptr,"off")) { 关闭跟踪
            disableTracking(c);
        } else {
            zfree(prefix);
            addReply(c,shared.syntaxerr);
            return;
        }
        zfree(prefix);
        addReply(c,shared.ok);
    } else if (!strcasecmp(c->argv[1]->ptr,"caching") && c->argc >= 3) { 缓存模式
        if (!(c->flags & CLIENT_TRACKING)) {
            addReplyError(c,"CLIENT CACHING can be called only when the "
                            "client is in tracking mode with OPTIN or "
                            "OPTOUT mode enabled");
            return;
        }

        char *opt = c->argv[2]->ptr;
        if (!strcasecmp(opt,"yes")) {
            if (c->flags & CLIENT_TRACKING_OPTIN) {
                c->flags |= CLIENT_TRACKING_CACHING;
            } else {
                addReplyError(c,"CLIENT CACHING YES is only valid when tracking is enabled in OPTIN mode.");
                return;
            }
        } else if (!strcasecmp(opt,"no")) {
            if (c->flags & CLIENT_TRACKING_OPTOUT) {
                c->flags |= CLIENT_TRACKING_CACHING;
            } else {
                addReplyError(c,"CLIENT CACHING NO is only valid when tracking is enabled in OPTOUT mode.");
                return;
            }
        } else {
            addReply(c,shared.syntaxerr);
            return;
        }

        /* Common reply for when we succeeded. */
        成功时共同回复
        addReply(c,shared.ok);
    } else if (!strcasecmp(c->argv[1]->ptr,"getredir") && c->argc == 2) {
        /* CLIENT GETREDIR */
        if (c->flags & CLIENT_TRACKING) {
            addReplyLongLong(c,c->client_tracking_redirection);
        } else {
            addReplyLongLong(c,-1);
        }
    } else {
        addReplyErrorFormat(c, "Unknown subcommand or wrong number of arguments for '%s'. Try CLIENT HELP", (char*)c->argv[1]->ptr);
    }
}

/* HELLO <protocol-version> [AUTH <user> <password>] [SETNAME <name>] */
HELLO  协议版本  AUTH 用户 密码  SETNAME 名字
void helloCommand(client *c) {
    long long ver;

    if (getLongLongFromObject(c->argv[1],&ver) != C_OK ||  协议版本
        ver < 2 || ver > 3)
    {
        addReplyError(c,"-NOPROTO unsupported protocol version");
        return;
    }

    for (int j = 2; j < c->argc; j++) {
        int moreargs = (c->argc-1) - j;
        const char *opt = c->argv[j]->ptr;
        if (!strcasecmp(opt,"AUTH") && moreargs >= 2) {
            if (ACLAuthenticateUser(c, c->argv[j+1], c->argv[j+2]) == C_ERR) { 检验用户身份
                addReplyError(c,"-WRONGPASS invalid username-password pair");
                return;
            }
            j += 2;
        } else if (!strcasecmp(opt,"SETNAME") && moreargs) { 设置新名
            if (clientSetNameOrReply(c, c->argv[j+1]) == C_ERR) return;
            j++;
        } else {
            addReplyErrorFormat(c,"Syntax error in HELLO option '%s'",opt);
            return;
        }
    }

    /* At this point we need to be authenticated to continue. */
    此时,我们需要进行身份验证才能继续。
    if (!c->authenticated) {
        addReplyError(c,"-NOAUTH HELLO must be called with the client already "
                        "authenticated, otherwise the HELLO AUTH <user> <pass> "
                        "option can be used to authenticate the client and "
                        "select the RESP protocol version at the same time");
        return;
    }

    /* Let's switch to the specified RESP mode. */
    让我们切换到指定的RESP模式。
    c->resp = ver;
    addReplyMapLen(c,6 + !server.sentinel_mode);

    addReplyBulkCString(c,"server");
    addReplyBulkCString(c,"redis");

    addReplyBulkCString(c,"version");
    addReplyBulkCString(c,REDIS_VERSION);

    addReplyBulkCString(c,"proto");
    addReplyLongLong(c,ver);

    addReplyBulkCString(c,"id");
    addReplyLongLong(c,c->id);

    addReplyBulkCString(c,"mode");
    if (server.sentinel_mode) addReplyBulkCString(c,"sentinel");
    else if (server.cluster_enabled) addReplyBulkCString(c,"cluster");
    else addReplyBulkCString(c,"standalone");

    if (!server.sentinel_mode) {
        addReplyBulkCString(c,"role");
        addReplyBulkCString(c,server.masterhost ? "replica" : "master");
    }

    addReplyBulkCString(c,"modules");
    addReplyLoadedModules(c);
}

/* This callback is bound to POST and "Host:" command names. Those are not
 * really commands, but are used in security attacks in order to talk to
 * Redis instances via HTTP, with a technique called "cross protocol scripting"
 * which exploits the fact that services like Redis will discard invalid
 * HTTP headers and will process what follows.
此回调绑定到POST和“主机:”命令名。这些不是真正的命令,而是用于安全攻击,以便通过HTTP与Redis实例进行对话,
使用一种称为“跨协议脚本”的技术,该技术利用诸如Redis之类的服务将丢弃无效的HTTP头并处理随后的内容这一事实
 * As a protection against this attack, Redis will terminate the connection
 * when a POST or "Host:" header is seen, and will log the event from
 * time to time (to avoid creating a DOS as a result of too many logs). */
为了防止这种攻击,Redis将在看到POST或“Host:”标头时终止连接,并不时记录事件(以避免由于日志过多而创建DOS)
void securityWarningCommand(client *c) {
    static time_t logged_time;
    time_t now = time(NULL);

    if (labs(now-logged_time) > 60) {
        serverLog(LL_WARNING,"Possible SECURITY ATTACK detected. It looks like somebody is sending POST or Host: commands to Redis. This is likely due to an attacker attempting to use Cross Protocol Scripting to compromise your Redis instance. Connection aborted.");
        logged_time = now;
    }
    freeClientAsync(c);
}

/* Rewrite the command vector of the client. All the new objects ref count
 * is incremented. The old command vector is freed, and the old objects
 * ref count is decremented. */
重写客户端的命令向量。所有新对象的引用计数都将增加。旧命令向量被释放,旧对象引用计数递减。
void rewriteClientCommandVector(client *c, int argc, ...) {
    va_list ap;
    int j;
    robj **argv; /* The new argument vector */ 定义新的参数向量

    argv = zmalloc(sizeof(robj*)*argc); 给新的参数向量分配空间
    va_start(ap,argc);
    for (j = 0; j < argc; j++) {
        robj *a;

        a = va_arg(ap, robj*);
        argv[j] = a;
        incrRefCount(a); 增加引用
    }
    /* We free the objects in the original vector at the end, so we are
     * sure that if the same objects are reused in the new vector the
     * refcount gets incremented before it gets decremented. */
最后,我们释放了原始向量中的对象,因此我们可以确定,如果在新向量中重用相同的对象,那么refcount将在递减之前递增。
    for (j = 0; j < c->argc; j++) decrRefCount(c->argv[j]);
    zfree(c->argv);
    /* Replace argv and argc with our new versions. */ 用新版的参数代替
    c->argv = argv;
    c->argc = argc;
    c->cmd = lookupCommandOrOriginal(c->argv[0]->ptr);
    serverAssertWithInfo(c,NULL,c->cmd != NULL);
    va_end(ap);
}

/* Completely replace the client command vector with the provided one. */
用提供的命令向量完全替换客户机命令向量。
void replaceClientCommandVector(client *c, int argc, robj **argv) {
    freeClientArgv(c);
    zfree(c->argv);
    c->argv = argv;
    c->argc = argc;
    c->cmd = lookupCommandOrOriginal(c->argv[0]->ptr);
    serverAssertWithInfo(c,NULL,c->cmd != NULL);
}

/* Rewrite a single item in the command vector.
 * The new val ref count is incremented, and the old decremented.
重写命令向量中的单个项。新值的引用计数递增,旧值的引用计数递减。
 * It is possible to specify an argument over the current size of the
 * argument vector: in this case the array of objects gets reallocated
 * and c->argc set to the max value. However it's up to the caller to
可以在参数向量的当前大小上指定参数:在这种情况下,对象数组被重新分配,
c->argc设置为最大值。然而,这取决于调用者
 * 1. Make sure there are no "holes" and all the arguments are set.
确保参数数组没有洞(就是空位),所有的参数被设置
 * 2. If the original argument vector was longer than the one we
 *    want to end with, it's up to the caller to set c->argc and
 *    free the no longer used objects on c->argv. */
如果原始参数向量比我们当前的向量长,则由调用方设置c->argc并释放c->argv上不再使用的对象。
void rewriteClientCommandArgument(client *c, int i, robj *newval) {
    robj *oldval;

    if (i >= c->argc) {
        c->argv = zrealloc(c->argv,sizeof(robj*)*(i+1));参数长度超过原来的长度,重新分配空间
        c->argc = i+1;
        c->argv[i] = NULL;
    }
    oldval = c->argv[i];
    c->argv[i] = newval;
    incrRefCount(newval);
    if (oldval) decrRefCount(oldval); 旧值引用-1

    /* If this is the command name make sure to fix c->cmd. */
    如果是命令的名字,设置变量c->cmd
    if (i == 0) {
        c->cmd = lookupCommandOrOriginal(c->argv[0]->ptr);
        serverAssertWithInfo(c,NULL,c->cmd != NULL);
    }
}

/* This function returns the number of bytes that Redis is
 * using to store the reply still not read by the client.
此函数返回Redis用于存储客户端尚未读取的回复的字节数
 * Note: this function is very fast so can be called as many time as
 * the caller wishes. The main usage of this function currently is
 * enforcing the client output length limits. */
注意:此函数非常快,因此可以根据调用方的意愿随时调用。
此函数当前的主要用途是强制执行客户端输出长度限制。
unsigned long getClientOutputBufferMemoryUsage(client *c) {
    unsigned long list_item_size = sizeof(listNode) + sizeof(clientReplyBlock);
    return c->reply_bytes + (list_item_size*listLength(c->reply));
}

/* Get the class of a client, used in order to enforce limits to different
 * classes of clients.
获取客户端的分类,用于对不同的客户端类实施限制
 * The function will return one of the following: 函数将返回以下一种类型:
 * CLIENT_TYPE_NORMAL -> Normal client
 * CLIENT_TYPE_SLAVE  -> Slave
 * CLIENT_TYPE_PUBSUB -> Client subscribed to Pub/Sub channels  客户端订阅发布/订阅频道
 * CLIENT_TYPE_MASTER -> The client representing our replication master. 代表复制主机的客户端
 */
int getClientType(client *c) {
    if (c->flags & CLIENT_MASTER) return CLIENT_TYPE_MASTER;
    /* Even though MONITOR clients are marked as replicas, we
     * want the expose them as normal clients. */
即使监视器客户端被标记为副本,我们也希望将它们作为普通客户端公开。
    if ((c->flags & CLIENT_SLAVE) && !(c->flags & CLIENT_MONITOR))
        return CLIENT_TYPE_SLAVE;
    if (c->flags & CLIENT_PUBSUB) return CLIENT_TYPE_PUBSUB;
    return CLIENT_TYPE_NORMAL;
}
通过名字获取客户端类型
int getClientTypeByName(char *name) {
    if (!strcasecmp(name,"normal")) return CLIENT_TYPE_NORMAL;
    else if (!strcasecmp(name,"slave")) return CLIENT_TYPE_SLAVE;
    else if (!strcasecmp(name,"replica")) return CLIENT_TYPE_SLAVE;
    else if (!strcasecmp(name,"pubsub")) return CLIENT_TYPE_PUBSUB;
    else if (!strcasecmp(name,"master")) return CLIENT_TYPE_MASTER;
    else return -1;
}
获取客户端名字
char *getClientTypeName(int class) {
    switch(class) {
    case CLIENT_TYPE_NORMAL: return "normal";
    case CLIENT_TYPE_SLAVE:  return "slave";
    case CLIENT_TYPE_PUBSUB: return "pubsub";
    case CLIENT_TYPE_MASTER: return "master";
    default:                       return NULL;
    }
}

/* The function checks if the client reached output buffer soft or hard
 * limit, and also update the state needed to check the soft limit as
 * a side effect.
该函数检查客户端是否达到了输出缓冲区软限制或硬限制,并作为副作用更新检查软限制所需的状态。
 * Return value: non-zero if the client reached the soft or the hard limit.
 *               Otherwise zero is returned. */
返回值:如果客户端达到软限制或硬限制,则非零。否则返回零。
int checkClientOutputBufferLimits(client *c) {
    int soft = 0, hard = 0, class;
    unsigned long used_mem = getClientOutputBufferMemoryUsage(c); 获取客户端使用内存大小

    class = getClientType(c);客户端类型
    /* For the purpose of output buffer limiting, masters are handled
     * like normal clients. */ 为了限制输出缓冲区,主机的处理方式与普通客户端相同
    if (class == CLIENT_TYPE_MASTER) class = CLIENT_TYPE_NORMAL;

    if (server.client_obuf_limits[class].hard_limit_bytes &&   硬限制
        used_mem >= server.client_obuf_limits[class].hard_limit_bytes)
        hard = 1;
    if (server.client_obuf_limits[class].soft_limit_bytes &&   软限制
        used_mem >= server.client_obuf_limits[class].soft_limit_bytes)
        soft = 1;

    /* We need to check if the soft limit is reached continuously for the
     * specified amount of seconds. */
我们需要检查是否在指定的秒数内连续达到软限制
    if (soft) {
        if (c->obuf_soft_limit_reached_time == 0) {
            c->obuf_soft_limit_reached_time = server.unixtime; 设置起始时间
            soft = 0; /* First time we see the soft limit reached */ 我们第一次看到软限制达到
        } else {
            time_t elapsed = server.unixtime - c->obuf_soft_limit_reached_time;  持续的时间

            if (elapsed <=
                server.client_obuf_limits[class].soft_limit_seconds) {
                soft = 0; /* The client still did not reached the max number of
                             seconds for the soft limit to be considered
                             reached. */ 客户端仍未达到软限制的最大秒数
            }
        }
    } else {
        c->obuf_soft_limit_reached_time = 0;
    }
    return soft || hard;
}

/* Asynchronously close a client if soft or hard limit is reached on the
 * output buffer size. The caller can check if the client will be closed
 * checking if the client CLIENT_CLOSE_ASAP flag is set.
如果输出缓冲区大小达到软限制或硬限制,则异步关闭客户端。
调用者可以检查客户机是否将关闭,检查是否设置了客户机-客户机-尽快关闭标志。
 * Note: we need to close the client asynchronously because this function is
 * called from contexts where the client can't be freed safely, i.e. from the
 * lower level functions pushing data inside the client output buffers. */
注意:我们需要异步关闭客户机,因为此函数是从无法安全释放客户机的上下文调用的,
例如,从较低级别的函数将数据推送到客户端输出缓冲区内
void asyncCloseClientOnOutputBufferLimitReached(client *c) {
    if (!c->conn) return; /* It is unsafe to free fake clients. */ 释放假客户端是不安全的
    serverAssert(c->reply_bytes < SIZE_MAX-(1024*64));
    if (c->reply_bytes == 0 || c->flags & CLIENT_CLOSE_ASAP) return; 没有需要回复或者标志是尽快关闭
    if (checkClientOutputBufferLimits(c)) {
        sds client = catClientInfoString(sdsempty(),c);

        freeClientAsync(c);
        serverLog(LL_WARNING,"Client %s scheduled to be closed ASAP for overcoming of output buffer limits.", client);
        sdsfree(client);
    }
}

/* Helper function used by freeMemoryIfNeeded() in order to flush slaves
 * output buffers without returning control to the event loop.
 * This is also called by SHUTDOWN for a best-effort attempt to send
 * slaves the latest writes. */
函数freeMemoryIfNeeded使用的Helper函数,用于刷新从属输出缓冲区,
而无需将控制返回到事件循环。这也被SHUTDOWN调用,以尽最大努力向从机发送最新的写入。
void flushSlavesOutputBuffers(void) {
    listIter li;
    listNode *ln;

    listRewind(server.slaves,&li);
    while((ln = listNext(&li))) { 遍历从机
        client *slave = listNodeValue(ln);
        int can_receive_writes = connHasWriteHandler(slave->conn) ||
                                 (slave->flags & CLIENT_PENDING_WRITE);

        /* We don't want to send the pending data to the replica in a few
         * cases:
         在某些情况下,我们不希望将挂起的数据发送到复制副本
         * 1. For some reason there is neither the write handler installed
         *    nor the client is flagged as to have pending writes: for some
         *    reason this replica may not be set to receive data. This is
         *    just for the sake of defensive programming.
由于某些原因,既没有安装写入处理程序,也没有将客户端标记为具有挂起的写入:
由于某些原因,此复制副本可能未设置为接收数据。这只是为了防御性编程。
         * 2. The put_online_on_ack flag is true. To know why we don't want
         *    to send data to the replica in this case, please grep for the
         *    flag for this flag.
put_online_on_ack标志为true。要知道在这种情况下我们为什么不想向复制副本发送数据,请查找这个标志的意思
         * 3. Obviously if the slave is not ONLINE. 明显从机不在线
         */
        if (slave->replstate == SLAVE_STATE_ONLINE &&  从机在线
            can_receive_writes && 可以接受数据
            !slave->repl_put_online_on_ack &&  非 在第一个应答之后安装写句柄
            clientHasPendingReplies(slave))  存在挂起等得写入的客户端
        {
            writeToClient(slave,0);
        }
    }
}

/* Pause clients up to the specified unixtime (in ms). While clients
 * are paused no command is processed from clients, so the data set can't
 * change during that time.
将客户端暂停到指定的unixtime(毫秒)。暂停客户端时,不会处理来自客户端的命令,
因此在此期间数据集不会更改。
 * However while this function pauses normal and Pub/Sub clients, slaves are
 * still served, so this function can be used on server upgrades where it is
 * required that slaves process the latest bytes from the replication stream
 * before being turned to masters.
但是,当此功能暂停正常和发布/订阅客户端时,从属客户端仍然提供服务,因此此功能可用于服务器升级,
其中要求从属客户端在转换为主客户端之前处理复制流中的最新字节
 * This function is also internally used by Redis Cluster for the manual
 * failover procedure implemented by CLUSTER FAILOVER.
Redis Cluster也在内部使用此功能来执行由群集故障切换实施的手动故障切换过程
 * The function always succeed, even if there is already a pause in progress.
 * In such a case, the pause is extended if the duration is more than the
 * time left for the previous duration. However if the duration is smaller
 * than the time left for the previous pause, no change is made to the
 * left duration. */
该函数始终成功,即使已在进行暂停。在这种情况下,如果持续时间超过前一个持续时间的剩余时间,暂停将延长。
但是,如果持续时间小于上次暂停的剩余时间,则不会更改剩下的持续时间。
void pauseClients(mstime_t end) {
    if (!server.clients_paused || end > server.clients_pause_end_time) 没有暂停或者暂停时间超过了当前设置的暂停时间
        server.clients_pause_end_time = end; 重置暂停时间
    server.clients_paused = 1; 设置暂停标志
}

/* Return non-zero if clients are currently paused. As a side effect the
 * function checks if the pause time was reached and clear it. */
如果客户端当前已暂停,则返回非零。作为一种副作用,该功能检查是否达到暂停时间并将其清除。
int clientsArePaused(void) {
    if (server.clients_paused &&  有暂停标志
        server.clients_pause_end_time < server.mstime)  暂停时间已过
    {
        listNode *ln;
        listIter li;
        client *c;

        server.clients_paused = 0;

        /* Put all the clients in the unblocked clients queue in order to
         * force the re-processing of the input buffer if any. */
将所有客户端放在未阻塞的客户端队列中,以便强制重新处理输入缓冲区(如果有)
        listRewind(server.clients,&li);
        while ((ln = listNext(&li)) != NULL) {
            c = listNodeValue(ln);

            /* Don't touch slaves and blocked clients. 不要关注从客户端和阻塞客户端
             * The latter pending requests will be processed when unblocked. */
             解除阻止后,将处理后一个挂起的请求。
            if (c->flags & (CLIENT_SLAVE|CLIENT_BLOCKED)) continue;
            queueClientForReprocessing(c);
        }
    }
    return server.clients_paused;
}

/* This function is called by Redis in order to process a few events from
 * time to time while blocked into some not interruptible operation.
 * This allows to reply to clients with the -LOADING error while loading the
 * data set at startup or after a full resynchronization with the master
 * and so forth.
Redis调用此函数是为了在阻止某些不可中断的操作时不时处理一些事件。
这允许在启动时或与主机完全重新同步后加载数据集时,向客户端回复-LOADING错误,以此类推。
 * It calls the event loop in order to process a few events. Specifically we
 * try to call the event loop 4 times as long as we receive acknowledge that
 * some event was processed, in order to go forward with the accept, read,
 * write, close sequence needed to serve a client.
它调用事件循环以处理一些事件。具体地说,只要我们收到某个事件已处理的确认,我们就会尝试调用事件循环4次,
以便继续执行为客户机提供服务所需的接受、读取、写入、关闭序列
 * The function returns the total number of events processed. */
函数返回处理的事件总数
void processEventsWhileBlocked(void) {
    int iterations = 4; /* See the function top-comment. */ 请参见函数顶部的注释

    /* Note: when we are processing events while blocked (for instance during
     * busy Lua scripts), we set a global flag. When such flag is set, we
     * avoid handling the read part of clients using threaded I/O.
     * See https://github.com/antirez/redis/issues/6988 for more info. */
注意:当我们在阻塞状态下处理事件时(例如在Lua脚本繁忙时),
我们会设置一个全局标志。当设置这样的标志时,我们避免使用线程I/O处理客户端的读取部分
更多的信息可以参见https://github.com/antirez/redis/issues/6988
    ProcessingEventsWhileBlocked = 1;
    while (iterations--) {
        long long startval = server.events_processed_while_blocked;
        long long ae_events = aeProcessEvents(server.el,
            AE_FILE_EVENTS|AE_DONT_WAIT|
            AE_CALL_BEFORE_SLEEP|AE_CALL_AFTER_SLEEP);
        /* Note that server.events_processed_while_blocked will also get
         * incremeted by callbacks called by the event loop handlers. */
注意,事件循环处理程序调用的回调也会增加server.events_processed_while_blocked
        server.events_processed_while_blocked += ae_events;
        long long events = server.events_processed_while_blocked - startval;
        if (!events) break;
    }
    ProcessingEventsWhileBlocked = 0;
}

/* ==========================================================================
 * Threaded I/O  线程I/O
 * ========================================================================== */

int tio_debug = 0;

#define IO_THREADS_MAX_NUM 128
#define IO_THREADS_OP_READ 0
#define IO_THREADS_OP_WRITE 1

pthread_t io_threads[IO_THREADS_MAX_NUM];
pthread_mutex_t io_threads_mutex[IO_THREADS_MAX_NUM];
_Atomic unsigned long io_threads_pending[IO_THREADS_MAX_NUM];
int io_threads_active;  /* Are the threads currently spinning waiting I/O? */
线程当前是否正在旋转等待I/O?
int io_threads_op;      /* IO_THREADS_OP_WRITE or IO_THREADS_OP_READ. */

/* This is the list of clients each thread will serve when threaded I/O is
 * used. We spawn io_threads_num-1 threads, since one is the main thread
 * itself. */
这是使用线程I/O时每个线程将服务的客户机列表。我们生成io_threads_num-1个线程,因为一个线程本身就是主线程。
list *io_threads_list[IO_THREADS_MAX_NUM];

void *IOThreadMain(void *myid) {
    /* The ID is the thread number (from 0 to server.iothreads_num-1), and is
     * used by the thread to just manipulate a single sub-array of clients. */
ID是线程号(从0到server.iothreads_num-1),线程使用它来操作单个客户端子数组
    long id = (unsigned long)myid;
    char thdname[16];

    snprintf(thdname, sizeof(thdname), "io_thd_%ld", id);
    redis_set_thread_title(thdname);  设置线程名
    redisSetCpuAffinity(server.server_cpulist); 设置亲和CPU

    while(1) {
        /* Wait for start */  等待开始
        for (int j = 0; j < 1000000; j++) {  一直循环直到有挂起的线程任务需要处理
            if (io_threads_pending[id] != 0) break;  线程挂起任务需要处理,退出
        }

        /* Give the main thread a chance to stop this thread. */
        给主线程一个机会来停止这个线程(如果一直空转,有需要可以关闭)
        if (io_threads_pending[id] == 0) {
            pthread_mutex_lock(&io_threads_mutex[id]);
            pthread_mutex_unlock(&io_threads_mutex[id]);
            continue;
        }

        serverAssert(io_threads_pending[id] != 0);

        if (tio_debug) printf("[%ld] %d to handle\n", id, (int)listLength(io_threads_list[id]));

        /* Process: note that the main thread will never touch our list
         * before we drop the pending count to 0. */
处理:请注意,在我们将挂起计数降至0之前,主线程将永远不会动我们的列表
        listIter li;
        listNode *ln;
        listRewind(io_threads_list[id],&li);
        while((ln = listNext(&li))) {
            client *c = listNodeValue(ln);
            if (io_threads_op == IO_THREADS_OP_WRITE) {  写
                writeToClient(c,0);
            } else if (io_threads_op == IO_THREADS_OP_READ) {  读
                readQueryFromClient(c->conn);
            } else {
                serverPanic("io_threads_op value is unknown");
            }
        }
        listEmpty(io_threads_list[id]); 清空列表
        io_threads_pending[id] = 0;

        if (tio_debug) printf("[%ld] Done\n", id);
    }
}

/* Initialize the data structures needed for threaded I/O. */
初始化线程I/O所需的数据结构。
void initThreadedIO(void) {
    io_threads_active = 0; /* We start with threads not active. */开始线程不是激活的

    /* Don't spawn any thread if the user selected a single thread:
     * we'll handle I/O directly from the main thread. */
如果用户选择了单个线程,则不生成任何线程:我们将直接从主线程处理I/O。
    if (server.io_threads_num == 1) return; 线程数为1,只有主线程

    if (server.io_threads_num > IO_THREADS_MAX_NUM) { 超过了最大线程数,退出程序
        serverLog(LL_WARNING,"Fatal: too many I/O threads configured. "
                             "The maximum number is %d.", IO_THREADS_MAX_NUM);
        exit(1);
    }

    /* Spawn and initialize the I/O threads. */ 产生和初始化线程i/o
    for (int i = 0; i < server.io_threads_num; i++) {
        /* Things we do for all the threads including the main thread. */
        我们为包括主线程在内的所有线程所做的事情。
        io_threads_list[i] = listCreate();
        if (i == 0) continue; /* Thread 0 is the main thread. */ 线程0是主线程

        /* Things we do only for the additional threads. */ 我们只为其它线程(除去主线程)准备的东西
        pthread_t tid;
        pthread_mutex_init(&io_threads_mutex[i],NULL); 初始化锁
        io_threads_pending[i] = 0;
        pthread_mutex_lock(&io_threads_mutex[i]); /* Thread will be stopped. */线程将会停止
        if (pthread_create(&tid,NULL,IOThreadMain,(void*)(long)i) != 0) {  创建线程
            serverLog(LL_WARNING,"Fatal: Can't initialize IO thread.");
            exit(1);
        }
        io_threads[i] = tid;
    }
}
开启线程
void startThreadedIO(void) {
    if (tio_debug) { printf("S"); fflush(stdout); }
    if (tio_debug) printf("--- STARTING THREADED IO ---\n");
    serverAssert(io_threads_active == 0); 确认没有被激活
    for (int j = 1; j < server.io_threads_num; j++)
        pthread_mutex_unlock(&io_threads_mutex[j]); 解锁
    io_threads_active = 1; 设置激活标志
}
停止线程
void stopThreadedIO(void) {
    /* We may have still clients with pending reads when this function
     * is called: handle them before stopping the threads. */
调用此函数时,我们可能仍有一些客户端具有挂起的读取:在停止线程之前处理它们。
    handleClientsWithPendingReadsUsingThreads();
    if (tio_debug) { printf("E"); fflush(stdout); }
    if (tio_debug) printf("--- STOPPING THREADED IO [R%d] [W%d] ---\n",
        (int) listLength(server.clients_pending_read),
        (int) listLength(server.clients_pending_write));
    serverAssert(io_threads_active == 1);
    for (int j = 1; j < server.io_threads_num; j++)
        pthread_mutex_lock(&io_threads_mutex[j]); 停止线程
    io_threads_active = 0; 设置不激活
}

/* This function checks if there are not enough pending clients to justify
 * taking the I/O threads active: in that case I/O threads are stopped if
 * currently active. We track the pending writes as a measure of clients
 * we need to handle in parallel, however the I/O threading is disabled
 * globally for reads as well if we have too little pending clients.
此函数检查是否没有足够的挂起客户端来证明I/O线程处于活动状态:
在这种情况下,如果当前处于活动状态,I/O线程将停止(I/O线程比较空闲的情况)
我们将跟踪挂起的写操作作为我们需要并行处理的客户机的一种度量,
但是,如果挂起的客户机太少,也会全局禁用I/O线程来进行读取。
 * The function returns 0 if the I/O threading should be used becuase there
 * are enough active threads, otherwise 1 is returned and the I/O threads
 * could be possibly stopped (if already active) as a side effect. */
如果由于有足够多的活动线程而应使用I/O线程,则函数返回0,否则返回1,
I/O线程可能会作为副作用停止(如果已经激活)。
int stopThreadedIOIfNeeded(void) {
    int pending = listLength(server.clients_pending_write);

    /* Return ASAP if IO threads are disabled (single threaded mode). */ 
    如果IO线程被禁用(单线程模式),尽快返回。
    if (server.io_threads_num == 1) return 1;

    if (pending < (server.io_threads_num*2)) {   I/O线程比较空闲的情况 
        if (io_threads_active) stopThreadedIO(); 如果是激活的,则停止
        return 1;
    } else {
        return 0;
    }
}

int handleClientsWithPendingWritesUsingThreads(void) {
    int processed = listLength(server.clients_pending_write); 挂起的等待处理的列表
    if (processed == 0) return 0; /* Return ASAP if there are no clients. */ 如果没有客户端尽快返回

    /* If I/O threads are disabled or we have few clients to serve, don't
     * use I/O threads, but thejboring synchronous code. */
如果I/O线程被禁用,或者服务的客户机很少,则不要使用I/O线程,而是使用同步代码。
    if (server.io_threads_num == 1 || stopThreadedIOIfNeeded()) {
        return handleClientsWithPendingWrites();
    }

    /* Start threads if needed. */启用线程
    if (!io_threads_active) startThreadedIO();

    if (tio_debug) printf("%d TOTAL WRITE pending clients\n", processed);

    /* Distribute the clients across N different lists. */
    将客户端分布在N个不同的列表中
    listIter li;
    listNode *ln;
    listRewind(server.clients_pending_write,&li);
    int item_id = 0;
    while((ln = listNext(&li))) {
        client *c = listNodeValue(ln);
        c->flags &= ~CLIENT_PENDING_WRITE;
        int target_id = item_id % server.io_threads_num;
        listAddNodeTail(io_threads_list[target_id],c); 分配到不同的线程队列中去
        item_id++;
    }

    /* Give the start condition to the waiting threads, by setting the
     * start condition atomic var. */
通过设置启动条件原子变量,为等待的线程提供启动条件
    io_threads_op = IO_THREADS_OP_WRITE;
    for (int j = 1; j < server.io_threads_num; j++) {
        int count = listLength(io_threads_list[j]);
        io_threads_pending[j] = count;
    }

    /* Also use the main thread to process a slice of clients. */
    还可以使用主线程来处理一部分客户端。
    listRewind(io_threads_list[0],&li);
    while((ln = listNext(&li))) {
        client *c = listNodeValue(ln);
        writeToClient(c,0);
    }
    listEmpty(io_threads_list[0]); 设置主线程的等待处理客户端列表为空0

    /* Wait for all the other threads to end their work. */
    等待所有其他线程结束
    while(1) {
        unsigned long pending = 0;
        for (int j = 1; j < server.io_threads_num; j++)
            pending += io_threads_pending[j];
        if (pending == 0) break;
    }
    if (tio_debug) printf("I/O WRITE All threads finshed\n");

    /* Run the list of clients again to install the write handler where
     * needed. */ 再次运行客户端列表以在需要时安装写入处理程序。
    listRewind(server.clients_pending_write,&li);
    while((ln = listNext(&li))) {
        client *c = listNodeValue(ln);

        /* Install the write handler if there are pending writes in some
         * of the clients. */
        如果某些客户端中存在挂起的写入,请安装写入处理程序。
        if (clientHasPendingReplies(c) &&
                connSetWriteHandler(c->conn, sendReplyToClient) == AE_ERR)
        {
            freeClientAsync(c);
        }
    }
    listEmpty(server.clients_pending_write);
    return processed;
}

/* Return 1 if we want to handle the client read later using threaded I/O.
 * This is called by the readable handler of the event loop.
 * As a side effect of calling this function the client is put in the
 * pending read clients and flagged as such. */
如果希望稍后使用线程I/O处理客户端读取,则返回1。这由事件循环的可读处理程序调用。
作为调用此函数的一个副作用,客户机被放入挂起的读取客户机中并标记为挂起的读取客户机
int postponeClientRead(client *c) {
    if (io_threads_active &&  i/o线程是激活的 表示可以多线程
        server.io_threads_do_reads &&   从线程中读取
        !ProcessingEventsWhileBlocked && 
        !(c->flags & (CLIENT_MASTER|CLIENT_SLAVE|CLIENT_PENDING_READ)))
    {
        c->flags |= CLIENT_PENDING_READ; 读挂起
        listAddNodeHead(server.clients_pending_read,c); 放入读挂起队列
        return 1;
    } else {
        return 0;
    }
}

/* When threaded I/O is also enabled for the reading + parsing side, the
 * readable handler will just put normal clients into a queue of clients to
 * process (instead of serving them synchronously). This function runs
 * the queue using the I/O threads, and process them in order to accumulate
 * the reads in the buffers, and also parse the first command available
 * rendering it in the client structures. */
当读取+解析端也启用了线程化I/O时,可读处理程序只会将普通客户机放入客户机队列中进行处理(而不是同步服务)。
此函数使用I/O线程运行队列,并对其进行处理,以便在缓冲区中累积读取,并解析在客户端结构中呈现的第一个可用命令。
int handleClientsWithPendingReadsUsingThreads(void) {
    if (!io_threads_active || !server.io_threads_do_reads) return 0; 没有激活多线程 或者 没有启用线程读
    int processed = listLength(server.clients_pending_read); 挂起的等待读列表
    if (processed == 0) return 0;

    if (tio_debug) printf("%d TOTAL READ pending clients\n", processed);

    /* Distribute the clients across N different lists. */ 将客户端分布在N个不同的列表中。
    listIter li;
    listNode *ln;
    listRewind(server.clients_pending_read,&li);
    int item_id = 0;
    while((ln = listNext(&li))) {
        client *c = listNodeValue(ln);
        int target_id = item_id % server.io_threads_num;
        listAddNodeTail(io_threads_list[target_id],c); 分配不同客户端等待列表到不同的线程队列
        item_id++;
    }

    /* Give the start condition to the waiting threads, by setting the
     * start condition atomic var. */
     通过设置启动条件原子变量,为等待的线程提供启动条件
    io_threads_op = IO_THREADS_OP_READ;
    for (int j = 1; j < server.io_threads_num; j++) {
        int count = listLength(io_threads_list[j]);
        io_threads_pending[j] = count;
    }

    /* Also use the main thread to process a slice of clients. */
    还可以使用主线程来处理客户机的一部分任务
    listRewind(io_threads_list[0],&li);
    while((ln = listNext(&li))) {
        client *c = listNodeValue(ln);
        readQueryFromClient(c->conn);
    }
    listEmpty(io_threads_list[0]);

    /* Wait for all the other threads to end their work. */
    等待其它线程结束工作
    while(1) {
        unsigned long pending = 0;
        for (int j = 1; j < server.io_threads_num; j++)
            pending += io_threads_pending[j];
        if (pending == 0) break;
    }
    if (tio_debug) printf("I/O READ All threads finshed\n");

    /* Run the list of clients again to process the new buffers. */
    再次运行客户端列表以处理新缓冲区。
    while(listLength(server.clients_pending_read)) {
        ln = listFirst(server.clients_pending_read);
        client *c = listNodeValue(ln);
        c->flags &= ~CLIENT_PENDING_READ;
        listDelNode(server.clients_pending_read,ln);

        if (c->flags & CLIENT_PENDING_COMMAND) {
            c->flags &= ~CLIENT_PENDING_COMMAND;
            if (processCommandAndResetClient(c) == C_ERR) {
                /* If the client is no longer valid, we avoid
                 * processing the client later. So we just go
                 * to the next. */
如果客户端不再有效,我们将避免以后处理客户端。所以我们就去下一个。
                continue;
            }
        }
        processInputBuffer(c);
    }
    return processed;
}

 

posted on 2021-09-28 17:29  子虚乌有  阅读(785)  评论(0)    收藏  举报