linux DRM GPU scheduler 笔记
struct drm_gpu_scheduler; int drm_sched_init(struct drm_gpu_scheduler *sched, //sched: scheduler instance
const struct drm_sched_backend_ops *ops, //ops: backend operations for this scheduler
unsigned hw_submission, //hw_submission: number of hw submissions that can be in flight
unsigned hang_limit, //hang_limit: number of times to allow a job to hang before dropping it
long timeout, //timeout: timeout value in jiffies for the scheduler
const char *name) //name: name used for debugging
struct drm_sched_backend_ops {
struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity);
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
void (*timedout_job)(struct drm_sched_job *sched_job);
void (*free_job)(struct drm_sched_job *sched_job);
}
int drm_sched_entity_init(struct drm_sched_entity *entity,
enum drm_sched_priority priority,
struct drm_gpu_scheduler **sched_list,
unsigned int num_sched_list,
atomic_t *guilty)
DRM_SCHED_PRIORITY_MIN
DRM_SCHED_PRIORITY_NORMAL
DRM_SCHED_PRIORITY_HIGH
DRM_SCHED_PRIORITY_KERNEL
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
void *owner)
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
struct drm_sched_entity *entity)
1 static const struct drm_sched_backend_ops vc4_bin_sched_ops = {
2 .dependency = vc4_job_dependency,
3 .run_job = vc4_bin_job_run,
4 .timedout_job = NULL,
5 .free_job = vc4_job_free,
6 };
7
8 static const struct drm_sched_backend_ops vc4_render_sched_ops = {
9 .dependency = vc4_job_dependency,
10 .run_job = vc4_render_job_run,
11 .timedout_job = NULL,
12 .free_job = vc4_job_free,
13 };
14
15 int vc4_sched_init(struct vc4_dev *vc4)
16 {
17 int hw_jobs_limit = 1;
18 int job_hang_limit = 0;
19 int hang_limit_ms = 500;
20 int ret;
21
22 ret = drm_sched_init(&vc4->queue[VC4_BIN].sched,
23 &vc4_bin_sched_ops,
24 hw_jobs_limit,
25 job_hang_limit,
26 msecs_to_jiffies(hang_limit_ms),
27 "vc4_bin");
28 if (ret) {
29 dev_err(vc4->base.dev, "Failed to create bin scheduler: %d.", ret);
30 return ret;
31 }
32
33 ret = drm_sched_init(&vc4->queue[VC4_RENDER].sched,
34 &vc4_render_sched_ops,
35 hw_jobs_limit,
36 job_hang_limit,
37 msecs_to_jiffies(hang_limit_ms),
38 "vc4_render");
39 if (ret) {
40 dev_err(vc4->base.dev, "Failed to create render scheduler: %d.", ret);
41 vc4_sched_fini(vc4);
42 return ret;
43 }
44
45 return ret;
46 }
1 static int vc4_open(struct drm_device *dev, struct drm_file *file)
2 {
3 struct vc4_dev *vc4 = to_vc4_dev(dev);
4 struct vc4_file *vc4file;
5 struct drm_gpu_scheduler *sched;
6 int i;
7
8 vc4file = kzalloc(sizeof(*vc4file), GFP_KERNEL);
9 if (!vc4file)
10 return -ENOMEM;
11
12 vc4_perfmon_open_file(vc4file);
13
14 for (i = 0; i < VC4_MAX_QUEUES; i++) {
15 sched = &vc4->queue[i].sched;
16 drm_sched_entity_init(&vc4file->sched_entity[i],
17 DRM_SCHED_PRIORITY_NORMAL,
18 &sched, 1,
19 NULL);
20 }
21
22 file->driver_priv = vc4file;
23
24 return 0;
25 }
1 static void vc4_job_free(struct kref *ref)
2 {
3 struct vc4_job *job = container_of(ref, struct vc4_job, refcount);
4 struct vc4_dev *vc4 = job->dev;
5 struct vc4_exec_info *exec = job->exec;
6 struct vc4_seqno_cb *cb, *cb_temp;
7 struct dma_fence *fence;
8 unsigned long index;
9 unsigned long irqflags;
10
11 xa_for_each(&job->deps, index, fence) {
12 dma_fence_put(fence);
13 }
14 xa_destroy(&job->deps);
15
16 dma_fence_put(job->irq_fence);
17 dma_fence_put(job->done_fence);
18
19 if (exec)
20 vc4_complete_exec(&job->dev->base, exec);
21
22 spin_lock_irqsave(&vc4->job_lock, irqflags);
23 list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
24 if (cb->seqno <= vc4->finished_seqno) {
25 list_del_init(&cb->work.entry);
26 schedule_work(&cb->work);
27 }
28 }
29
30 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
31
32 kfree(job);
33 }
34
35 void vc4_job_put(struct vc4_job *job)
36 {
37 kref_put(&job->refcount, job->free);
38 }
39
40 static int vc4_job_init(struct vc4_dev *vc4, struct drm_file *file_priv,
41 struct vc4_job *job, void (*free)(struct kref *ref), u32 in_sync)
42 {
43 struct dma_fence *in_fence = NULL;
44 int ret;
45
46 xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
47
48 if (in_sync) {
49 ret = drm_syncobj_find_fence(file_priv, in_sync, 0, 0, &in_fence);
50 if (ret == -EINVAL)
51 goto fail;
52
53 ret = drm_gem_fence_array_add(&job->deps, in_fence);
54 if (ret) {
55 dma_fence_put(in_fence);
56 goto fail;
57 }
58 }
59
60 kref_init(&job->refcount);
61 job->free = free;
62
63 return 0;
64
65 fail:
66 xa_destroy(&job->deps);
67 return ret;
68 }
69
70 static int vc4_push_job(struct drm_file *file_priv, struct vc4_job *job, enum vc4_queue queue)
71 {
72 struct vc4_file *vc4file = file_priv->driver_priv;
73 int ret;
74
75 ret = drm_sched_job_init(&job->base, &vc4file->sched_entity[queue], vc4file);
76 if (ret)
77 return ret;
78
79 job->done_fence = dma_fence_get(&job->base.s_fence->finished);
80
81 kref_get(&job->refcount);
82
83 drm_sched_entity_push_job(&job->base, &vc4file->sched_entity[queue]);
84
85 return 0;
86 }
87
88 /* Queues a struct vc4_exec_info for execution. If no job is
89 * currently executing, then submits it.
90 *
91 * Unlike most GPUs, our hardware only handles one command list at a
92 * time. To queue multiple jobs at once, we'd need to edit the
93 * previous command list to have a jump to the new one at the end, and
94 * then bump the end address. That's a change for a later date,
95 * though.
96 */
97 static int
98 vc4_queue_submit_to_scheduler(struct drm_device *dev,
99 struct drm_file *file_priv,
100 struct vc4_exec_info *exec,
101 struct ww_acquire_ctx *acquire_ctx)
102 {
103 struct vc4_dev *vc4 = to_vc4_dev(dev);
104 struct drm_vc4_submit_cl *args = exec->args;
105 struct vc4_job *bin = NULL;
106 struct vc4_job *render = NULL;
107 struct drm_syncobj *out_sync;
108 uint64_t seqno;
109 unsigned long irqflags;
110 int ret;
111
112 spin_lock_irqsave(&vc4->job_lock, irqflags);
113
114 seqno = ++vc4->emit_seqno;
115 exec->seqno = seqno;
116
117 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
118
119 render = kcalloc(1, sizeof(*render), GFP_KERNEL);
120 if (!render)
121 return -ENOMEM;
122
123 render->exec = exec;
124
125 ret = vc4_job_init(vc4, file_priv, render, vc4_job_free, args->in_sync);
126 if (ret) {
127 kfree(render);
128 return ret;
129 }
130
131 if (args->bin_cl_size != 0) {
132 bin = kcalloc(1, sizeof(*bin), GFP_KERNEL);
133 if (!bin) {
134 vc4_job_put(render);
135 return -ENOMEM;
136 }
137
138 bin->exec = exec;
139
140 ret = vc4_job_init(vc4, file_priv, bin, vc4_job_free, args->in_sync);
141 if (ret) {
142 vc4_job_put(render);
143 kfree(bin);
144 return ret;
145 }
146 }
147
148 mutex_lock(&vc4->sched_lock);
149
150 if (bin) {
151 ret = vc4_push_job(file_priv, bin, VC4_BIN);
152 if (ret)
153 goto FAIL;
154
155 ret = drm_gem_fence_array_add(&render->deps, dma_fence_get(bin->done_fence));
156 if (ret)
157 goto FAIL;
158 }
159
160 vc4_push_job(file_priv, render, VC4_RENDER);
161
162 mutex_unlock(&vc4->sched_lock);
163
164 if (args->out_sync) {
165 out_sync = drm_syncobj_find(file_priv, args->out_sync);
166 if (!out_sync) {
167 ret = -EINVAL;
168 goto FAIL;;
169 }
170
171 drm_syncobj_replace_fence(out_sync, &bin->base.s_fence->scheduled);
172 exec->fence = render->done_fence;
173
174 drm_syncobj_put(out_sync);
175 }
176
177 vc4_update_bo_seqnos(exec, seqno);
178
179 vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
180
181 if (bin)
182 vc4_job_put(bin);
183 vc4_job_put(render);
184
185 return 0;
186
187 FAIL:
188 return ret;
189 }
linux DRM GPU scheduler 笔记的更多相关文章
- linux DRM 之 GEM 笔记
原文链接:https://www.cnblogs.com/yaongtime/p/14418357.html 在GPU上的各类操作中涉及到多种.多个buffer的使用. 通常我们GPU是通过图像API ...
- Linux DRM KMS 驱动简介【转】
转自:https://blog.csdn.net/yangkuanqaz85988/article/details/48689521 Whoops,上次写完<Linux DRM Graphic ...
- linux DRM/KMS 测试工具 modetest、kmscude、igt-gpu-tools (一)
这里整理几个在学习Linux DRM/KMS中用到的几个工具,modetest.kmscude.igt-gpu-tools. 简介: modetest 是由libdrm提供的测试程序,可以查询显示设备 ...
- linux DRM/KMS 测试工具 modetest、kmscude、igt-gpu-tools (二)
kmscube kmscube is a little demonstration program for how to drive bare metal graphics without a c ...
- linux 2.6 驱动笔记(一)
本文作为linux 2.6 驱动笔记,记录环境搭建及linux基本内核模块编译加载. 环境搭建: 硬件:OK6410开发板 目标板操作系统:linux 2.6 交叉编译环境:windows 7 + v ...
- Linux内核分析课程笔记(一)
linux内核分析课程笔记(一) 冯诺依曼体系结构 冯诺依曼体系结构实际上就是存储程序计算机. 从两个层面来讲: 从硬件的角度来看,冯诺依曼体系结构逻辑上可以抽象成CPU和内存,通过总线相连.CPU上 ...
- Linux进程间通信IPC学习笔记之同步二(SVR4 信号量)
Linux进程间通信IPC学习笔记之同步二(SVR4 信号量)
- Linux进程间通信IPC学习笔记之同步二(Posix 信号量)
Linux进程间通信IPC学习笔记之同步二(Posix 信号量)
- Linux进程间通信IPC学习笔记之消息队列(SVR4)
Linux进程间通信IPC学习笔记之消息队列(SVR4)
随机推荐
- 精尽Spring MVC源码分析 - HandlerMapping 组件(二)之 HandlerInterceptor 拦截器
该系列文档是本人在学习 Spring MVC 的源码过程中总结下来的,可能对读者不太友好,请结合我的源码注释 Spring MVC 源码分析 GitHub 地址 进行阅读 Spring 版本:5.2. ...
- js上 八.语句
1.什么是语句 概念:在JavaScript中表达式是短语,语句就是整句或命令. 作用:用来执行以使某件事发生 特征:用 ; 结尾 Js中语句的常见形式: ü 表达式语句 ü 复合语句 ü 声明语句 ...
- Eureka系列(六) TimedSupervisorTask类解析
为什么要单独讲解TimedSupervisorTask这个类呢?因为这个类在我们DiscoveryClient类的initScheduledTasks方法进行定时任务初始化时被使用得比较多,所以我 ...
- Eureka系列(八)服务剔除具体实现
服务下线的大致流程图 下面这张图很简单地描述了服务剔除的大致流程: 服务剔除实现源码分析 首先我们得了解下服务剔除这个定时任务是什么被初始化启动的,在百度搜索中,在我们Eureka Serve ...
- Javascript 根据文件名判断是否未图片
var isImage = (/\.(gif|jpe?g|tiff?|png|webp|bmp)$/i).test(filename)
- 浅析JavaWeb开发模式:Model1和Model2
一.前言 在学习JavaWeb的过程中,大家都会接触到Model1和Model2,历史的发展过程是Model1 → Model2.那么它们之间有何相同之处和不同之处呢? 二.Model1 Model1 ...
- 【Go语言绘图】图片添加文字(一)
前一篇讲解了利用gg包来进行图片旋转的操作,这一篇我们来看看怎么在图片上添加文字. 绘制纯色背景 首先,我们先绘制一个纯白色的背景,作为添加文字的背景板. package main import &q ...
- Getting unknown property: common\models\Teacher::auth_Key
找了一个半小时,不知道为什么会缺少这个属性,数据库里面的字段明明都是有的. 然后随后找到了原因,是因为key中的k大写了,所以无法识别这个属性.把自己坑到了,以此为戒,以后多注意细节问题
- Python机器学习课程:线性回归算法
本文的文字及图片来源于网络,仅供学习.交流使用,不具有任何商业用途,如有问题请及时联系我们以作处理 最基本的机器学习算法必须是具有单个变量的线性回归算法.如今,可用的高级机器学习算法,库和技术如此之多 ...
- k8s之深入解剖Pod(三)
目录: Pod的调度 Pod的扩容和缩容 Pod的滚动升级 一.Pod的调度 Pod只是容器的载体,通常需要通过RC.Deployment.DaemonSet.Job等对象来完成Pod的调度和自动控制 ...