ebpf的sockmap的实现二

rcv_msg = smap_read_sock_strparser

   smap_do_verdict

    smap_verdict_func

      (*prog->bpf_func)(skb, prog->insnsi)

 

static int smap_init_sock(struct smap_psock *psock,
struct sock *sk)
{
static const struct strp_callbacks cb = {
.rcv_msg = smap_read_sock_strparser,
.parse_msg = smap_parse_func_strparser,
.read_sock_done = smap_read_sock_done,
};

return strp_init(&psock->strp, sk, &cb);
}

static void smap_read_sock_strparser(struct strparser *strp,
struct sk_buff *skb)
{
struct smap_psock *psock;

rcu_read_lock();
psock = container_of(strp, struct smap_psock, strp);
smap_do_verdict(psock, skb);
rcu_read_unlock();
}

 

static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
{
struct sock *sk;
int rc;

/* Because we use per cpu values to feed input from sock redirect
* in BPF program to do_sk_redirect_map() call we need to ensure we
* are not preempted. RCU read lock is not sufficient in this case
* with CONFIG_PREEMPT_RCU enabled so we must be explicit here.
*/
preempt_disable();
rc = smap_verdict_func(psock, skb);
switch (rc) {
case SK_REDIRECT:
sk = do_sk_redirect_map();
preempt_enable();
if (likely(sk)) {
struct smap_psock *peer = smap_psock_sk(sk);

if (likely(peer &&
test_bit(SMAP_TX_RUNNING, &peer->state) &&
!sock_flag(sk, SOCK_DEAD) &&
sock_writeable(sk))) {
skb_set_owner_w(skb, sk);
skb_queue_tail(&peer->rxqueue, skb);
schedule_work(&peer->tx_work);
break;
}
}
/* Fall through and free skb otherwise */
case SK_DROP:
default:
if (rc != SK_REDIRECT)
preempt_enable();
kfree_skb(skb);
}
}

 

static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
{
struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
int rc;

if (unlikely(!prog))
return SK_DROP;

skb_orphan(skb);
skb->sk = psock->sock;
bpf_compute_data_end(skb);
rc = (*prog->bpf_func)(skb, prog->insnsi);
skb->sk = NULL;

return rc;
}

posted on 2020-01-17 16:27  tycoon3  阅读(498)  评论(0)    收藏  举报

导航