Blame view

kernel/linux-imx6_3.14.28/net/ipv4/tcp_fastopen.c 2.43 KB
6b13f685e   김민수   BSP 최초 추가
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
  #include <linux/err.h>
  #include <linux/init.h>
  #include <linux/kernel.h>
  #include <linux/list.h>
  #include <linux/tcp.h>
  #include <linux/rcupdate.h>
  #include <linux/rculist.h>
  #include <net/inetpeer.h>
  #include <net/tcp.h>
  
  int sysctl_tcp_fastopen __read_mostly = TFO_CLIENT_ENABLE;
  
  struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
  
  static DEFINE_SPINLOCK(tcp_fastopen_ctx_lock);
  
  void tcp_fastopen_init_key_once(bool publish)
  {
  	static u8 key[TCP_FASTOPEN_KEY_LENGTH];
  
  	/* tcp_fastopen_reset_cipher publishes the new context
  	 * atomically, so we allow this race happening here.
  	 *
  	 * All call sites of tcp_fastopen_cookie_gen also check
  	 * for a valid cookie, so this is an acceptable risk.
  	 */
  	if (net_get_random_once(key, sizeof(key)) && publish)
  		tcp_fastopen_reset_cipher(key, sizeof(key));
  }
  
  static void tcp_fastopen_ctx_free(struct rcu_head *head)
  {
  	struct tcp_fastopen_context *ctx =
  	    container_of(head, struct tcp_fastopen_context, rcu);
  	crypto_free_cipher(ctx->tfm);
  	kfree(ctx);
  }
  
  int tcp_fastopen_reset_cipher(void *key, unsigned int len)
  {
  	int err;
  	struct tcp_fastopen_context *ctx, *octx;
  
  	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
  	if (!ctx)
  		return -ENOMEM;
  	ctx->tfm = crypto_alloc_cipher("aes", 0, 0);
  
  	if (IS_ERR(ctx->tfm)) {
  		err = PTR_ERR(ctx->tfm);
  error:		kfree(ctx);
  		pr_err("TCP: TFO aes cipher alloc error: %d
  ", err);
  		return err;
  	}
  	err = crypto_cipher_setkey(ctx->tfm, key, len);
  	if (err) {
  		pr_err("TCP: TFO cipher key error: %d
  ", err);
  		crypto_free_cipher(ctx->tfm);
  		goto error;
  	}
  	memcpy(ctx->key, key, len);
  
  	spin_lock(&tcp_fastopen_ctx_lock);
  
  	octx = rcu_dereference_protected(tcp_fastopen_ctx,
  				lockdep_is_held(&tcp_fastopen_ctx_lock));
  	rcu_assign_pointer(tcp_fastopen_ctx, ctx);
  	spin_unlock(&tcp_fastopen_ctx_lock);
  
  	if (octx)
  		call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
  	return err;
  }
  
  /* Computes the fastopen cookie for the IP path.
   * The path is a 128 bits long (pad with zeros for IPv4).
   *
   * The caller must check foc->len to determine if a valid cookie
   * has been generated successfully.
  */
  void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
  			     struct tcp_fastopen_cookie *foc)
  {
  	__be32 path[4] = { src, dst, 0, 0 };
  	struct tcp_fastopen_context *ctx;
  
  	tcp_fastopen_init_key_once(true);
  
  	rcu_read_lock();
  	ctx = rcu_dereference(tcp_fastopen_ctx);
  	if (ctx) {
  		crypto_cipher_encrypt_one(ctx->tfm, foc->val, (__u8 *)path);
  		foc->len = TCP_FASTOPEN_COOKIE_SIZE;
  	}
  	rcu_read_unlock();
  }