5113f6f70
김현기
kernel add
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
|
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _VNIC_WQ_COPY_H_
#define _VNIC_WQ_COPY_H_
#include <linux/pci.h>
#include "vnic_wq.h"
#include "fcpio.h"
#define VNIC_WQ_COPY_MAX 1
struct vnic_wq_copy {
unsigned int index;
struct vnic_dev *vdev;
struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
unsigned to_use_index;
unsigned to_clean_index;
};
static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq)
{
return wq->ring.desc_avail;
}
static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq)
{
return wq->ring.desc_count - 1 - wq->ring.desc_avail;
}
static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq)
{
struct fcpio_host_req *desc = wq->ring.descs;
return &desc[wq->to_use_index];
}
static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq)
{
((wq->to_use_index + 1) == wq->ring.desc_count) ?
(wq->to_use_index = 0) : (wq->to_use_index++);
wq->ring.desc_avail--;
/* Adding write memory barrier prevents compiler and/or CPU
* reordering, thus avoiding descriptor posting before
* descriptor is initialized. Otherwise, hardware can read
* stale descriptor fields.
*/
wmb();
iowrite32(wq->to_use_index, &wq->ctrl->posted_index);
}
static inline void vnic_wq_copy_desc_process(struct vnic_wq_copy *wq, u16 index)
{
unsigned int cnt;
if (wq->to_clean_index <= index)
cnt = (index - wq->to_clean_index) + 1;
else
cnt = wq->ring.desc_count - wq->to_clean_index + index + 1;
wq->to_clean_index = ((index + 1) % wq->ring.desc_count);
wq->ring.desc_avail += cnt;
}
static inline void vnic_wq_copy_service(struct vnic_wq_copy *wq,
u16 completed_index,
void (*q_service)(struct vnic_wq_copy *wq,
struct fcpio_host_req *wq_desc))
{
struct fcpio_host_req *wq_desc = wq->ring.descs;
unsigned int curr_index;
while (1) {
if (q_service)
(*q_service)(wq, &wq_desc[wq->to_clean_index]);
wq->ring.desc_avail++;
curr_index = wq->to_clean_index;
/* increment the to-clean index so that we start
* with an unprocessed index next time we enter the loop
*/
((wq->to_clean_index + 1) == wq->ring.desc_count) ?
(wq->to_clean_index = 0) : (wq->to_clean_index++);
if (curr_index == completed_index)
break;
/* we have cleaned all the entries */
if ((completed_index == (u16)-1) &&
(wq->to_clean_index == wq->to_use_index))
break;
}
}
void vnic_wq_copy_enable(struct vnic_wq_copy *wq);
int vnic_wq_copy_disable(struct vnic_wq_copy *wq);
void vnic_wq_copy_free(struct vnic_wq_copy *wq);
int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
unsigned int index, unsigned int desc_count, unsigned int desc_size);
void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset);
void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
void (*q_clean)(struct vnic_wq_copy *wq,
struct fcpio_host_req *wq_desc));
#endif /* _VNIC_WQ_COPY_H_ */
|