Blame view

kernel/linux-rt-4.4.41/drivers/gpu/drm/amd/scheduler/sched_fence.c 3.07 KB
5113f6f70   김현기   kernel add
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
  /*
   * Copyright 2015 Advanced Micro Devices, Inc.
   *
   * Permission is hereby granted, free of charge, to any person obtaining a
   * copy of this software and associated documentation files (the "Software"),
   * to deal in the Software without restriction, including without limitation
   * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   * and/or sell copies of the Software, and to permit persons to whom the
   * Software is furnished to do so, subject to the following conditions:
   *
   * The above copyright notice and this permission notice shall be included in
   * all copies or substantial portions of the Software.
   *
   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
   * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
   * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
   * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
   * OTHER DEALINGS IN THE SOFTWARE.
   *
   *
   */
  #include <linux/kthread.h>
  #include <linux/wait.h>
  #include <linux/sched.h>
  #include <drm/drmP.h>
  #include "gpu_scheduler.h"
  
  struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity, void *owner)
  {
  	struct amd_sched_fence *fence = NULL;
  	unsigned seq;
  
  	fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
  	if (fence == NULL)
  		return NULL;
  
  	INIT_LIST_HEAD(&fence->scheduled_cb);
  	fence->owner = owner;
  	fence->sched = s_entity->sched;
  	spin_lock_init(&fence->lock);
  
  	seq = atomic_inc_return(&s_entity->fence_seq);
  	fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock,
  		   s_entity->fence_context, seq);
  
  	return fence;
  }
  
  void amd_sched_fence_signal(struct amd_sched_fence *fence)
  {
  	int ret = fence_signal(&fence->base);
  	if (!ret)
  		FENCE_TRACE(&fence->base, "signaled from irq context
  ");
  	else
  		FENCE_TRACE(&fence->base, "was already signaled
  ");
  }
  
  void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence)
  {
  	struct fence_cb *cur, *tmp;
  
  	set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags);
  	list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) {
  		list_del_init(&cur->node);
  		cur->func(&s_fence->base, cur);
  	}
  }
  
  static const char *amd_sched_fence_get_driver_name(struct fence *fence)
  {
  	return "amd_sched";
  }
  
  static const char *amd_sched_fence_get_timeline_name(struct fence *f)
  {
  	struct amd_sched_fence *fence = to_amd_sched_fence(f);
  	return (const char *)fence->sched->name;
  }
  
  static bool amd_sched_fence_enable_signaling(struct fence *f)
  {
  	return true;
  }
  
  static void amd_sched_fence_release(struct fence *f)
  {
  	struct amd_sched_fence *fence = to_amd_sched_fence(f);
  	kmem_cache_free(sched_fence_slab, fence);
  }
  
  const struct fence_ops amd_sched_fence_ops = {
  	.get_driver_name = amd_sched_fence_get_driver_name,
  	.get_timeline_name = amd_sched_fence_get_timeline_name,
  	.enable_signaling = amd_sched_fence_enable_signaling,
  	.signaled = NULL,
  	.wait = fence_default_wait,
  	.release = amd_sched_fence_release,
  };