Blame view

kernel/linux-rt-4.4.41/drivers/gpu/drm/nouveau/nouveau_sgdma.c 2.6 KB
5113f6f70   김현기   kernel add
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
  #include <linux/pagemap.h>
  #include <linux/slab.h>
  
  #include "nouveau_drm.h"
  #include "nouveau_ttm.h"
  
  struct nouveau_sgdma_be {
  	/* this has to be the first field so populate/unpopulated in
  	 * nouve_bo.c works properly, otherwise have to move them here
  	 */
  	struct ttm_dma_tt ttm;
  	struct nvkm_mem *node;
  };
  
  static void
  nouveau_sgdma_destroy(struct ttm_tt *ttm)
  {
  	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
  
  	if (ttm) {
  		ttm_dma_tt_fini(&nvbe->ttm);
  		kfree(nvbe);
  	}
  }
  
  static int
  nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
  {
  	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
  	struct nvkm_mem *node = mem->mm_node;
  
  	if (ttm->sg) {
  		node->sg    = ttm->sg;
  		node->pages = NULL;
  	} else {
  		node->sg    = NULL;
  		node->pages = nvbe->ttm.dma_address;
  	}
  	node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
  
  	nvkm_vm_map(&node->vma[0], node);
  	nvbe->node = node;
  	return 0;
  }
  
  static int
  nv04_sgdma_unbind(struct ttm_tt *ttm)
  {
  	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
  	nvkm_vm_unmap(&nvbe->node->vma[0]);
  	return 0;
  }
  
  static struct ttm_backend_func nv04_sgdma_backend = {
  	.bind			= nv04_sgdma_bind,
  	.unbind			= nv04_sgdma_unbind,
  	.destroy		= nouveau_sgdma_destroy
  };
  
  static int
  nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
  {
  	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
  	struct nvkm_mem *node = mem->mm_node;
  
  	/* noop: bound in move_notify() */
  	if (ttm->sg) {
  		node->sg    = ttm->sg;
  		node->pages = NULL;
  	} else {
  		node->sg    = NULL;
  		node->pages = nvbe->ttm.dma_address;
  	}
  	node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
  	return 0;
  }
  
  static int
  nv50_sgdma_unbind(struct ttm_tt *ttm)
  {
  	/* noop: unbound in move_notify() */
  	return 0;
  }
  
  static struct ttm_backend_func nv50_sgdma_backend = {
  	.bind			= nv50_sgdma_bind,
  	.unbind			= nv50_sgdma_unbind,
  	.destroy		= nouveau_sgdma_destroy
  };
  
  struct ttm_tt *
  nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
  			 unsigned long size, uint32_t page_flags,
  			 struct page *dummy_read_page)
  {
  	struct nouveau_drm *drm = nouveau_bdev(bdev);
  	struct nouveau_sgdma_be *nvbe;
  
  	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
  	if (!nvbe)
  		return NULL;
  
  	if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA)
  		nvbe->ttm.ttm.func = &nv04_sgdma_backend;
  	else
  		nvbe->ttm.ttm.func = &nv50_sgdma_backend;
  
  	if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page))
  		/*
  		 * A failing ttm_dma_tt_init() will call ttm_tt_destroy()
  		 * and thus our nouveau_sgdma_destroy() hook, so we don't need
  		 * to free nvbe here.
  		 */
  		return NULL;
  	return &nvbe->ttm.ttm;
  }