blob: b61792d2aa65740db39b49f957dc41eda99c5824 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Eric Anholtc8b75bc2015-03-02 13:01:12 -08002/*
3 * Copyright (C) 2015 Broadcom
Eric Anholtc8b75bc2015-03-02 13:01:12 -08004 */
5
6/**
7 * DOC: VC4 KMS
8 *
9 * This is the general code for implementing KMS mode setting that
10 * doesn't clearly associate with any of the other objects (plane,
11 * crtc, HDMI encoder).
12 */
13
Maxime Ripardd7d96c02020-09-03 10:00:35 +020014#include <linux/clk.h>
15
Masahiro Yamadab7e8e252017-05-18 13:29:38 +090016#include <drm/drm_atomic.h>
17#include <drm/drm_atomic_helper.h>
Sam Ravnborgfd6d6d82019-07-16 08:42:07 +020018#include <drm/drm_crtc.h>
Noralf Trønnes97624772017-08-13 15:32:03 +020019#include <drm/drm_gem_framebuffer_helper.h>
Daniel Vetterfcd70cd2019-01-17 22:03:34 +010020#include <drm/drm_plane_helper.h>
21#include <drm/drm_probe_helper.h>
Sam Ravnborgfd6d6d82019-07-16 08:42:07 +020022#include <drm/drm_vblank.h>
23
Eric Anholtc8b75bc2015-03-02 13:01:12 -080024#include "vc4_drv.h"
Stefan Schake766cc6b2018-04-20 05:25:44 -070025#include "vc4_regs.h"
26
Maxime Riparda9661f22020-11-05 14:56:52 +010027#define HVS_NUM_CHANNELS 3
28
Stefan Schake766cc6b2018-04-20 05:25:44 -070029struct vc4_ctm_state {
30 struct drm_private_state base;
31 struct drm_color_ctm *ctm;
32 int fifo;
33};
34
35static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
36{
37 return container_of(priv, struct vc4_ctm_state, base);
38}
39
Maxime Ripardf2df84e2020-11-20 15:42:44 +010040struct vc4_hvs_state {
41 struct drm_private_state base;
Maxime Ripard9ec03d72020-12-04 16:11:35 +010042
43 struct {
44 unsigned in_use: 1;
45 struct drm_crtc_commit *pending_commit;
46 } fifo_state[HVS_NUM_CHANNELS];
Maxime Ripardf2df84e2020-11-20 15:42:44 +010047};
48
49static struct vc4_hvs_state *
50to_vc4_hvs_state(struct drm_private_state *priv)
51{
52 return container_of(priv, struct vc4_hvs_state, base);
53}
54
Boris Brezillon4686da82019-02-20 16:51:23 +010055struct vc4_load_tracker_state {
56 struct drm_private_state base;
57 u64 hvs_load;
58 u64 membus_load;
59};
60
61static struct vc4_load_tracker_state *
62to_vc4_load_tracker_state(struct drm_private_state *priv)
63{
64 return container_of(priv, struct vc4_load_tracker_state, base);
65}
66
Stefan Schake766cc6b2018-04-20 05:25:44 -070067static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
68 struct drm_private_obj *manager)
69{
70 struct drm_device *dev = state->dev;
Maxime Ripard88e08582020-10-29 20:01:02 +010071 struct vc4_dev *vc4 = to_vc4_dev(dev);
Stefan Schake766cc6b2018-04-20 05:25:44 -070072 struct drm_private_state *priv_state;
73 int ret;
74
75 ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
76 if (ret)
77 return ERR_PTR(ret);
78
79 priv_state = drm_atomic_get_private_obj_state(state, manager);
80 if (IS_ERR(priv_state))
81 return ERR_CAST(priv_state);
82
83 return to_vc4_ctm_state(priv_state);
84}
85
86static struct drm_private_state *
87vc4_ctm_duplicate_state(struct drm_private_obj *obj)
88{
89 struct vc4_ctm_state *state;
90
91 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
92 if (!state)
93 return NULL;
94
95 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
96
97 return &state->base;
98}
99
100static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
101 struct drm_private_state *state)
102{
103 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
104
105 kfree(ctm_state);
106}
107
108static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
109 .atomic_duplicate_state = vc4_ctm_duplicate_state,
110 .atomic_destroy_state = vc4_ctm_destroy_state,
111};
112
Maxime Riparddcda7c22020-10-29 20:01:04 +0100113static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused)
114{
115 struct vc4_dev *vc4 = to_vc4_dev(dev);
116
117 drm_atomic_private_obj_fini(&vc4->ctm_manager);
118}
119
120static int vc4_ctm_obj_init(struct vc4_dev *vc4)
121{
122 struct vc4_ctm_state *ctm_state;
123
124 drm_modeset_lock_init(&vc4->ctm_state_lock);
125
126 ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
127 if (!ctm_state)
128 return -ENOMEM;
129
130 drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base,
131 &vc4_ctm_state_funcs);
132
Maxime Ripard3c354ed2020-11-05 14:56:50 +0100133 return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL);
Maxime Riparddcda7c22020-10-29 20:01:04 +0100134}
135
Stefan Schake766cc6b2018-04-20 05:25:44 -0700136/* Converts a DRM S31.32 value to the HW S0.9 format. */
137static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
138{
139 u16 r;
140
141 /* Sign bit. */
142 r = in & BIT_ULL(63) ? BIT(9) : 0;
143
144 if ((in & GENMASK_ULL(62, 32)) > 0) {
145 /* We have zero integer bits so we can only saturate here. */
146 r |= GENMASK(8, 0);
147 } else {
148 /* Otherwise take the 9 most important fractional bits. */
149 r |= (in >> 23) & GENMASK(8, 0);
150 }
151
152 return r;
153}
154
155static void
156vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
157{
158 struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
159 struct drm_color_ctm *ctm = ctm_state->ctm;
160
161 if (ctm_state->fifo) {
162 HVS_WRITE(SCALER_OLEDCOEF2,
163 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
164 SCALER_OLEDCOEF2_R_TO_R) |
165 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
166 SCALER_OLEDCOEF2_R_TO_G) |
167 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
168 SCALER_OLEDCOEF2_R_TO_B));
169 HVS_WRITE(SCALER_OLEDCOEF1,
170 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
171 SCALER_OLEDCOEF1_G_TO_R) |
172 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
173 SCALER_OLEDCOEF1_G_TO_G) |
174 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
175 SCALER_OLEDCOEF1_G_TO_B));
176 HVS_WRITE(SCALER_OLEDCOEF0,
177 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
178 SCALER_OLEDCOEF0_B_TO_R) |
179 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
180 SCALER_OLEDCOEF0_B_TO_G) |
181 VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
182 SCALER_OLEDCOEF0_B_TO_B));
183 }
184
185 HVS_WRITE(SCALER_OLEDOFFS,
186 VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
187}
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800188
Maxime Ripardf2df84e2020-11-20 15:42:44 +0100189static struct vc4_hvs_state *
Maxime Ripard9ec03d72020-12-04 16:11:35 +0100190vc4_hvs_get_new_global_state(struct drm_atomic_state *state)
191{
192 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
193 struct drm_private_state *priv_state;
194
195 priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels);
196 if (IS_ERR(priv_state))
197 return ERR_CAST(priv_state);
198
199 return to_vc4_hvs_state(priv_state);
200}
201
202static struct vc4_hvs_state *
203vc4_hvs_get_old_global_state(struct drm_atomic_state *state)
204{
205 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
206 struct drm_private_state *priv_state;
207
208 priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels);
209 if (IS_ERR(priv_state))
210 return ERR_CAST(priv_state);
211
212 return to_vc4_hvs_state(priv_state);
213}
214
215static struct vc4_hvs_state *
Maxime Ripardf2df84e2020-11-20 15:42:44 +0100216vc4_hvs_get_global_state(struct drm_atomic_state *state)
217{
218 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
219 struct drm_private_state *priv_state;
220
221 priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels);
222 if (IS_ERR(priv_state))
223 return ERR_CAST(priv_state);
224
225 return to_vc4_hvs_state(priv_state);
226}
227
Maxime Ripard87ebcd42020-09-03 10:00:46 +0200228static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
229 struct drm_atomic_state *state)
230{
231 struct drm_crtc_state *crtc_state;
232 struct drm_crtc *crtc;
233 unsigned int i;
234
235 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
236 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
237 u32 dispctrl;
238 u32 dsp3_mux;
239
240 if (!crtc_state->active)
241 continue;
242
243 if (vc4_state->assigned_channel != 2)
244 continue;
245
246 /*
247 * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to
248 * FIFO X'.
249 * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'.
250 *
251 * DSP3 is connected to FIFO2 unless the transposer is
252 * enabled. In this case, FIFO 2 is directly accessed by the
253 * TXP IP, and we need to disable the FIFO2 -> pixelvalve1
254 * route.
255 */
256 if (vc4_state->feed_txp)
257 dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
258 else
259 dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
260
261 dispctrl = HVS_READ(SCALER_DISPCTRL) &
262 ~SCALER_DISPCTRL_DSP3_MUX_MASK;
263 HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux);
264 }
265}
266
267static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
268 struct drm_atomic_state *state)
269{
270 struct drm_crtc_state *crtc_state;
271 struct drm_crtc *crtc;
Maxime Ripard28205262020-11-20 15:42:45 +0100272 unsigned char mux;
Maxime Ripard87ebcd42020-09-03 10:00:46 +0200273 unsigned int i;
274 u32 reg;
275
276 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
277 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
278 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
279
Maxime Ripard28205262020-11-20 15:42:45 +0100280 if (!vc4_state->update_muxing)
Maxime Ripard87ebcd42020-09-03 10:00:46 +0200281 continue;
282
283 switch (vc4_crtc->data->hvs_output) {
284 case 2:
Maxime Ripard28205262020-11-20 15:42:45 +0100285 mux = (vc4_state->assigned_channel == 2) ? 0 : 1;
286 reg = HVS_READ(SCALER_DISPECTRL);
287 HVS_WRITE(SCALER_DISPECTRL,
288 (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
289 VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX));
Maxime Ripard87ebcd42020-09-03 10:00:46 +0200290 break;
291
292 case 3:
Maxime Ripard28205262020-11-20 15:42:45 +0100293 if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
294 mux = 3;
295 else
296 mux = vc4_state->assigned_channel;
297
298 reg = HVS_READ(SCALER_DISPCTRL);
299 HVS_WRITE(SCALER_DISPCTRL,
300 (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
301 VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX));
Maxime Ripard87ebcd42020-09-03 10:00:46 +0200302 break;
303
304 case 4:
Maxime Ripard28205262020-11-20 15:42:45 +0100305 if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
306 mux = 3;
307 else
308 mux = vc4_state->assigned_channel;
309
310 reg = HVS_READ(SCALER_DISPEOLN);
311 HVS_WRITE(SCALER_DISPEOLN,
312 (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
313 VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX));
314
Maxime Ripard87ebcd42020-09-03 10:00:46 +0200315 break;
316
317 case 5:
Maxime Ripard28205262020-11-20 15:42:45 +0100318 if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
319 mux = 3;
320 else
321 mux = vc4_state->assigned_channel;
322
323 reg = HVS_READ(SCALER_DISPDITHER);
324 HVS_WRITE(SCALER_DISPDITHER,
325 (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
326 VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX));
Maxime Ripard87ebcd42020-09-03 10:00:46 +0200327 break;
328
329 default:
330 break;
331 }
332 }
Maxime Ripard87ebcd42020-09-03 10:00:46 +0200333}
334
Maxime Ripardf3c420f2020-12-04 16:11:38 +0100335static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
Eric Anholtb501bac2015-11-30 12:34:01 -0800336{
Eric Anholtb501bac2015-11-30 12:34:01 -0800337 struct drm_device *dev = state->dev;
338 struct vc4_dev *vc4 = to_vc4_dev(dev);
Maxime Ripardd7d96c02020-09-03 10:00:35 +0200339 struct vc4_hvs *hvs = vc4->hvs;
Maxime Ripard596356672020-09-03 10:00:45 +0200340 struct drm_crtc_state *new_crtc_state;
341 struct drm_crtc *crtc;
Maxime Ripard9ec03d72020-12-04 16:11:35 +0100342 struct vc4_hvs_state *old_hvs_state;
Maxime Ripard6052a312021-11-17 10:45:27 +0100343 unsigned int channel;
Boris Brezillon531a1b62019-02-20 16:51:22 +0100344 int i;
345
Maxime Ripard596356672020-09-03 10:00:45 +0200346 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
Maxime Ripard87ebcd42020-09-03 10:00:46 +0200347 struct vc4_crtc_state *vc4_crtc_state;
Maxime Ripard596356672020-09-03 10:00:45 +0200348
349 if (!new_crtc_state->commit)
Boris Brezillon531a1b62019-02-20 16:51:22 +0100350 continue;
351
Maxime Ripard87ebcd42020-09-03 10:00:46 +0200352 vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
353 vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
Boris Brezillon531a1b62019-02-20 16:51:22 +0100354 }
Eric Anholtb501bac2015-11-30 12:34:01 -0800355
Maxime Ripard9ec03d72020-12-04 16:11:35 +0100356 old_hvs_state = vc4_hvs_get_old_global_state(state);
Maxime Ripardf9277672021-11-17 10:45:23 +0100357 if (IS_ERR(old_hvs_state))
Maxime Ripard9ec03d72020-12-04 16:11:35 +0100358 return;
359
Maxime Ripard6052a312021-11-17 10:45:27 +0100360 for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
Maxime Ripard049cfff2021-11-17 10:45:24 +0100361 struct drm_crtc_commit *commit;
Maxime Ripardb99c2c92021-01-11 09:44:01 +0100362 int ret;
Maxime Ripard9ec03d72020-12-04 16:11:35 +0100363
Maxime Ripard9ec03d72020-12-04 16:11:35 +0100364 if (!old_hvs_state->fifo_state[channel].in_use)
365 continue;
366
Maxime Ripard049cfff2021-11-17 10:45:24 +0100367 commit = old_hvs_state->fifo_state[channel].pending_commit;
368 if (!commit)
369 continue;
370
371 ret = drm_crtc_commit_wait(commit);
Maxime Ripardb99c2c92021-01-11 09:44:01 +0100372 if (ret)
373 drm_err(dev, "Timed out waiting for commit\n");
Maxime Ripard049cfff2021-11-17 10:45:24 +0100374
375 drm_crtc_commit_put(commit);
Maxime Ripardd134c5f2021-11-17 10:45:25 +0100376 old_hvs_state->fifo_state[channel].pending_commit = NULL;
Maxime Ripard9ec03d72020-12-04 16:11:35 +0100377 }
378
Maxime Ripard0c980a02021-11-17 10:45:22 +0100379 if (vc4->hvs->hvs5)
380 clk_set_min_rate(hvs->core_clk, 500000000);
381
Eric Anholtb501bac2015-11-30 12:34:01 -0800382 drm_atomic_helper_commit_modeset_disables(dev, state);
383
Stefan Schake766cc6b2018-04-20 05:25:44 -0700384 vc4_ctm_commit(vc4, state);
385
Maxime Ripard87ebcd42020-09-03 10:00:46 +0200386 if (vc4->hvs->hvs5)
387 vc5_hvs_pv_muxing_commit(vc4, state);
388 else
389 vc4_hvs_pv_muxing_commit(vc4, state);
390
Liu Ying2b58e982016-08-29 17:12:03 +0800391 drm_atomic_helper_commit_planes(dev, state, 0);
Eric Anholtb501bac2015-11-30 12:34:01 -0800392
393 drm_atomic_helper_commit_modeset_enables(dev, state);
394
Boris Brezillon1ebe99a2018-07-03 09:50:21 +0200395 drm_atomic_helper_fake_vblank(state);
396
Boris Brezillon34c8ea42017-06-02 10:32:08 +0200397 drm_atomic_helper_commit_hw_done(state);
398
Boris Brezillon184d3cf2018-07-03 09:50:18 +0200399 drm_atomic_helper_wait_for_flip_done(dev, state);
Eric Anholtb501bac2015-11-30 12:34:01 -0800400
401 drm_atomic_helper_cleanup_planes(dev, state);
402
Maxime Ripardd7d96c02020-09-03 10:00:35 +0200403 if (vc4->hvs->hvs5)
404 clk_set_min_rate(hvs->core_clk, 0);
Eric Anholtb501bac2015-11-30 12:34:01 -0800405}
406
Maxime Ripard9ec03d72020-12-04 16:11:35 +0100407static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
408{
409 struct drm_crtc_state *crtc_state;
410 struct vc4_hvs_state *hvs_state;
411 struct drm_crtc *crtc;
412 unsigned int i;
413
414 hvs_state = vc4_hvs_get_new_global_state(state);
Maxime Ripardf9277672021-11-17 10:45:23 +0100415 if (WARN_ON(IS_ERR(hvs_state)))
416 return PTR_ERR(hvs_state);
Maxime Ripard9ec03d72020-12-04 16:11:35 +0100417
418 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
419 struct vc4_crtc_state *vc4_crtc_state =
420 to_vc4_crtc_state(crtc_state);
421 unsigned int channel =
422 vc4_crtc_state->assigned_channel;
423
424 if (channel == VC4_HVS_CHANNEL_DISABLED)
425 continue;
426
427 if (!hvs_state->fifo_state[channel].in_use)
428 continue;
429
430 hvs_state->fifo_state[channel].pending_commit =
431 drm_crtc_commit_get(crtc_state->commit);
432 }
433
434 return 0;
435}
436
Eric Anholt83753112017-06-07 17:13:36 -0700437static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
438 struct drm_file *file_priv,
439 const struct drm_mode_fb_cmd2 *mode_cmd)
440{
441 struct drm_mode_fb_cmd2 mode_cmd_local;
442
443 /* If the user didn't specify a modifier, use the
444 * vc4_set_tiling_ioctl() state for the BO.
445 */
446 if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
447 struct drm_gem_object *gem_obj;
448 struct vc4_bo *bo;
449
450 gem_obj = drm_gem_object_lookup(file_priv,
451 mode_cmd->handles[0]);
452 if (!gem_obj) {
Eric Anholtfb959922017-07-25 09:27:32 -0700453 DRM_DEBUG("Failed to look up GEM BO %d\n",
Eric Anholt83753112017-06-07 17:13:36 -0700454 mode_cmd->handles[0]);
455 return ERR_PTR(-ENOENT);
456 }
457 bo = to_vc4_bo(gem_obj);
458
459 mode_cmd_local = *mode_cmd;
460
461 if (bo->t_format) {
462 mode_cmd_local.modifier[0] =
463 DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
464 } else {
465 mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
466 }
467
Emil Velikovf7a8cd32020-05-15 10:51:13 +0100468 drm_gem_object_put(gem_obj);
Eric Anholt83753112017-06-07 17:13:36 -0700469
470 mode_cmd = &mode_cmd_local;
471 }
472
Noralf Trønnes97624772017-08-13 15:32:03 +0200473 return drm_gem_fb_create(dev, file_priv, mode_cmd);
Eric Anholt83753112017-06-07 17:13:36 -0700474}
475
Stefan Schake766cc6b2018-04-20 05:25:44 -0700476/* Our CTM has some peculiar limitations: we can only enable it for one CRTC
477 * at a time and the HW only supports S0.9 scalars. To account for the latter,
478 * we don't allow userland to set a CTM that we have no hope of approximating.
479 */
480static int
481vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
482{
483 struct vc4_dev *vc4 = to_vc4_dev(dev);
484 struct vc4_ctm_state *ctm_state = NULL;
485 struct drm_crtc *crtc;
486 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
487 struct drm_color_ctm *ctm;
488 int i;
489
490 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
491 /* CTM is being disabled. */
492 if (!new_crtc_state->ctm && old_crtc_state->ctm) {
493 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
494 if (IS_ERR(ctm_state))
495 return PTR_ERR(ctm_state);
496 ctm_state->fifo = 0;
497 }
498 }
499
500 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
501 if (new_crtc_state->ctm == old_crtc_state->ctm)
502 continue;
503
504 if (!ctm_state) {
505 ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
506 if (IS_ERR(ctm_state))
507 return PTR_ERR(ctm_state);
508 }
509
510 /* CTM is being enabled or the matrix changed. */
511 if (new_crtc_state->ctm) {
Maxime Ripard87ebcd42020-09-03 10:00:46 +0200512 struct vc4_crtc_state *vc4_crtc_state =
513 to_vc4_crtc_state(new_crtc_state);
514
Stefan Schake766cc6b2018-04-20 05:25:44 -0700515 /* fifo is 1-based since 0 disables CTM. */
Maxime Ripard87ebcd42020-09-03 10:00:46 +0200516 int fifo = vc4_crtc_state->assigned_channel + 1;
Stefan Schake766cc6b2018-04-20 05:25:44 -0700517
518 /* Check userland isn't trying to turn on CTM for more
519 * than one CRTC at a time.
520 */
521 if (ctm_state->fifo && ctm_state->fifo != fifo) {
522 DRM_DEBUG_DRIVER("Too many CTM configured\n");
523 return -EINVAL;
524 }
525
526 /* Check we can approximate the specified CTM.
527 * We disallow scalars |c| > 1.0 since the HW has
528 * no integer bits.
529 */
530 ctm = new_crtc_state->ctm->data;
531 for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
532 u64 val = ctm->matrix[i];
533
534 val &= ~BIT_ULL(63);
535 if (val > BIT_ULL(32))
536 return -EINVAL;
537 }
538
539 ctm_state->fifo = fifo;
540 ctm_state->ctm = ctm;
541 }
542 }
543
544 return 0;
545}
546
Boris Brezillon4686da82019-02-20 16:51:23 +0100547static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
548{
549 struct drm_plane_state *old_plane_state, *new_plane_state;
550 struct vc4_dev *vc4 = to_vc4_dev(state->dev);
551 struct vc4_load_tracker_state *load_state;
552 struct drm_private_state *priv_state;
553 struct drm_plane *plane;
554 int i;
555
Maxime Ripardf437bc12020-09-03 10:01:51 +0200556 if (!vc4->load_tracker_available)
557 return 0;
558
Boris Brezillon4686da82019-02-20 16:51:23 +0100559 priv_state = drm_atomic_get_private_obj_state(state,
560 &vc4->load_tracker);
561 if (IS_ERR(priv_state))
562 return PTR_ERR(priv_state);
563
564 load_state = to_vc4_load_tracker_state(priv_state);
565 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
566 new_plane_state, i) {
567 struct vc4_plane_state *vc4_plane_state;
568
569 if (old_plane_state->fb && old_plane_state->crtc) {
570 vc4_plane_state = to_vc4_plane_state(old_plane_state);
571 load_state->membus_load -= vc4_plane_state->membus_load;
572 load_state->hvs_load -= vc4_plane_state->hvs_load;
573 }
574
575 if (new_plane_state->fb && new_plane_state->crtc) {
576 vc4_plane_state = to_vc4_plane_state(new_plane_state);
577 load_state->membus_load += vc4_plane_state->membus_load;
578 load_state->hvs_load += vc4_plane_state->hvs_load;
579 }
580 }
581
Paul Kocialkowski6b5c0292019-02-20 16:51:24 +0100582 /* Don't check the load when the tracker is disabled. */
583 if (!vc4->load_tracker_enabled)
584 return 0;
585
Boris Brezillon4686da82019-02-20 16:51:23 +0100586 /* The absolute limit is 2Gbyte/sec, but let's take a margin to let
587 * the system work when other blocks are accessing the memory.
588 */
589 if (load_state->membus_load > SZ_1G + SZ_512M)
590 return -ENOSPC;
591
592 /* HVS clock is supposed to run @ 250Mhz, let's take a margin and
593 * consider the maximum number of cycles is 240M.
594 */
595 if (load_state->hvs_load > 240000000ULL)
596 return -ENOSPC;
597
598 return 0;
599}
600
601static struct drm_private_state *
602vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
603{
604 struct vc4_load_tracker_state *state;
605
606 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
607 if (!state)
608 return NULL;
609
610 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
611
612 return &state->base;
613}
614
615static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
616 struct drm_private_state *state)
617{
618 struct vc4_load_tracker_state *load_state;
619
620 load_state = to_vc4_load_tracker_state(state);
621 kfree(load_state);
622}
623
624static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
625 .atomic_duplicate_state = vc4_load_tracker_duplicate_state,
626 .atomic_destroy_state = vc4_load_tracker_destroy_state,
627};
628
Maxime Riparddcda7c22020-10-29 20:01:04 +0100629static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused)
630{
631 struct vc4_dev *vc4 = to_vc4_dev(dev);
632
633 if (!vc4->load_tracker_available)
634 return;
635
636 drm_atomic_private_obj_fini(&vc4->load_tracker);
637}
638
639static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
640{
641 struct vc4_load_tracker_state *load_state;
642
643 if (!vc4->load_tracker_available)
644 return 0;
645
646 load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
647 if (!load_state)
648 return -ENOMEM;
649
650 drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker,
651 &load_state->base,
652 &vc4_load_tracker_state_funcs);
653
Maxime Ripard3c354ed2020-11-05 14:56:50 +0100654 return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL);
Maxime Riparddcda7c22020-10-29 20:01:04 +0100655}
656
Maxime Ripardf2df84e2020-11-20 15:42:44 +0100657static struct drm_private_state *
658vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
659{
660 struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state);
661 struct vc4_hvs_state *state;
Maxime Ripard9ec03d72020-12-04 16:11:35 +0100662 unsigned int i;
Maxime Ripardf2df84e2020-11-20 15:42:44 +0100663
664 state = kzalloc(sizeof(*state), GFP_KERNEL);
665 if (!state)
666 return NULL;
667
668 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
669
Maxime Ripardf2df84e2020-11-20 15:42:44 +0100670
Maxime Ripard9ec03d72020-12-04 16:11:35 +0100671 for (i = 0; i < HVS_NUM_CHANNELS; i++) {
672 state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
Maxime Ripard9ec03d72020-12-04 16:11:35 +0100673 }
674
Maxime Ripardf2df84e2020-11-20 15:42:44 +0100675 return &state->base;
676}
677
678static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj,
679 struct drm_private_state *state)
680{
681 struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
Maxime Ripard9ec03d72020-12-04 16:11:35 +0100682 unsigned int i;
683
684 for (i = 0; i < HVS_NUM_CHANNELS; i++) {
685 if (!hvs_state->fifo_state[i].pending_commit)
686 continue;
687
688 drm_crtc_commit_put(hvs_state->fifo_state[i].pending_commit);
689 }
Maxime Ripardf2df84e2020-11-20 15:42:44 +0100690
691 kfree(hvs_state);
692}
693
694static const struct drm_private_state_funcs vc4_hvs_state_funcs = {
695 .atomic_duplicate_state = vc4_hvs_channels_duplicate_state,
696 .atomic_destroy_state = vc4_hvs_channels_destroy_state,
697};
698
699static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused)
700{
701 struct vc4_dev *vc4 = to_vc4_dev(dev);
702
703 drm_atomic_private_obj_fini(&vc4->hvs_channels);
704}
705
706static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
707{
708 struct vc4_hvs_state *state;
709
710 state = kzalloc(sizeof(*state), GFP_KERNEL);
711 if (!state)
712 return -ENOMEM;
713
Maxime Ripardf2df84e2020-11-20 15:42:44 +0100714 drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
715 &state->base,
716 &vc4_hvs_state_funcs);
717
718 return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL);
719}
720
Maxime Ripardb5dbc4d2020-11-05 14:56:54 +0100721/*
722 * The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and
723 * the TXP (and therefore all the CRTCs found on that platform).
724 *
725 * The naive (and our initial) implementation would just iterate over
726 * all the active CRTCs, try to find a suitable FIFO, and then remove it
727 * from the pool of available FIFOs. However, there are a few corner
728 * cases that need to be considered:
729 *
730 * - When running in a dual-display setup (so with two CRTCs involved),
731 * we can update the state of a single CRTC (for example by changing
732 * its mode using xrandr under X11) without affecting the other. In
733 * this case, the other CRTC wouldn't be in the state at all, so we
734 * need to consider all the running CRTCs in the DRM device to assign
735 * a FIFO, not just the one in the state.
736 *
Maxime Ripardf2df84e2020-11-20 15:42:44 +0100737 * - To fix the above, we can't use drm_atomic_get_crtc_state on all
738 * enabled CRTCs to pull their CRTC state into the global state, since
739 * a page flip would start considering their vblank to complete. Since
740 * we don't have a guarantee that they are actually active, that
741 * vblank might never happen, and shouldn't even be considered if we
742 * want to do a page flip on a single CRTC. That can be tested by
743 * doing a modetest -v first on HDMI1 and then on HDMI0.
744 *
Maxime Ripardb5dbc4d2020-11-05 14:56:54 +0100745 * - Since we need the pixelvalve to be disabled and enabled back when
746 * the FIFO is changed, we should keep the FIFO assigned for as long
747 * as the CRTC is enabled, only considering it free again once that
748 * CRTC has been disabled. This can be tested by booting X11 on a
749 * single display, and changing the resolution down and then back up.
750 */
Maxime Riparda72b0452020-11-05 14:56:53 +0100751static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
752 struct drm_atomic_state *state)
Stefan Schake766cc6b2018-04-20 05:25:44 -0700753{
Maxime Ripardf2df84e2020-11-20 15:42:44 +0100754 struct vc4_hvs_state *hvs_new_state;
Maxime Ripard8ba0b6d12020-09-23 10:40:32 +0200755 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
Maxime Ripard87ebcd42020-09-03 10:00:46 +0200756 struct drm_crtc *crtc;
Maxime Ripard03b03ef2020-12-04 16:11:36 +0100757 unsigned int unassigned_channels = 0;
Maxime Riparda72b0452020-11-05 14:56:53 +0100758 unsigned int i;
Maxime Ripard87ebcd42020-09-03 10:00:46 +0200759
Maxime Ripardf2df84e2020-11-20 15:42:44 +0100760 hvs_new_state = vc4_hvs_get_global_state(state);
Maxime Ripardf9277672021-11-17 10:45:23 +0100761 if (IS_ERR(hvs_new_state))
762 return PTR_ERR(hvs_new_state);
Maxime Ripard089d8342020-09-17 14:16:23 +0200763
Maxime Ripard03b03ef2020-12-04 16:11:36 +0100764 for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
765 if (!hvs_new_state->fifo_state[i].in_use)
766 unassigned_channels |= BIT(i);
767
Maxime Ripard8ba0b6d12020-09-23 10:40:32 +0200768 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
Maxime Ripardf2df84e2020-11-20 15:42:44 +0100769 struct vc4_crtc_state *old_vc4_crtc_state =
770 to_vc4_crtc_state(old_crtc_state);
Maxime Ripard8ba0b6d12020-09-23 10:40:32 +0200771 struct vc4_crtc_state *new_vc4_crtc_state =
772 to_vc4_crtc_state(new_crtc_state);
Maxime Ripard87ebcd42020-09-03 10:00:46 +0200773 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
774 unsigned int matching_channels;
Maxime Ripardd62a8ed2020-12-04 16:11:34 +0100775 unsigned int channel;
Maxime Ripard87ebcd42020-09-03 10:00:46 +0200776
Maxime Ripard28205262020-11-20 15:42:45 +0100777 /* Nothing to do here, let's skip it */
778 if (old_crtc_state->enable == new_crtc_state->enable)
779 continue;
780
781 /* Muxing will need to be modified, mark it as such */
782 new_vc4_crtc_state->update_muxing = true;
783
784 /* If we're disabling our CRTC, we put back our channel */
785 if (!new_crtc_state->enable) {
Maxime Ripard9ec03d72020-12-04 16:11:35 +0100786 channel = old_vc4_crtc_state->assigned_channel;
Maxime Ripard9ec03d72020-12-04 16:11:35 +0100787 hvs_new_state->fifo_state[channel].in_use = false;
Maxime Ripard8ba0b6d12020-09-23 10:40:32 +0200788 new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
Maxime Ripard28205262020-11-20 15:42:45 +0100789 continue;
Maxime Ripardf2df84e2020-11-20 15:42:44 +0100790 }
Maxime Ripard8ba0b6d12020-09-23 10:40:32 +0200791
Maxime Ripard87ebcd42020-09-03 10:00:46 +0200792 /*
793 * The problem we have to solve here is that we have
794 * up to 7 encoders, connected to up to 6 CRTCs.
795 *
796 * Those CRTCs, depending on the instance, can be
797 * routed to 1, 2 or 3 HVS FIFOs, and we need to set
798 * the change the muxing between FIFOs and outputs in
799 * the HVS accordingly.
800 *
801 * It would be pretty hard to come up with an
802 * algorithm that would generically solve
803 * this. However, the current routing trees we support
804 * allow us to simplify a bit the problem.
805 *
806 * Indeed, with the current supported layouts, if we
807 * try to assign in the ascending crtc index order the
808 * FIFOs, we can't fall into the situation where an
809 * earlier CRTC that had multiple routes is assigned
810 * one that was the only option for a later CRTC.
811 *
812 * If the layout changes and doesn't give us that in
813 * the future, we will need to have something smarter,
814 * but it works so far.
815 */
Maxime Ripard03b03ef2020-12-04 16:11:36 +0100816 matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
Maxime Ripardd62a8ed2020-12-04 16:11:34 +0100817 if (!matching_channels)
Maxime Ripard87ebcd42020-09-03 10:00:46 +0200818 return -EINVAL;
Maxime Ripardd62a8ed2020-12-04 16:11:34 +0100819
820 channel = ffs(matching_channels) - 1;
821 new_vc4_crtc_state->assigned_channel = channel;
Maxime Ripard03b03ef2020-12-04 16:11:36 +0100822 unassigned_channels &= ~BIT(channel);
Maxime Ripard9ec03d72020-12-04 16:11:35 +0100823 hvs_new_state->fifo_state[channel].in_use = true;
Maxime Ripard87ebcd42020-09-03 10:00:46 +0200824 }
Stefan Schake766cc6b2018-04-20 05:25:44 -0700825
Maxime Riparda72b0452020-11-05 14:56:53 +0100826 return 0;
827}
828
829static int
830vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
831{
832 int ret;
833
834 ret = vc4_pv_muxing_atomic_check(dev, state);
835 if (ret)
836 return ret;
837
Stefan Schake766cc6b2018-04-20 05:25:44 -0700838 ret = vc4_ctm_atomic_check(dev, state);
839 if (ret < 0)
840 return ret;
841
Boris Brezillon4686da82019-02-20 16:51:23 +0100842 ret = drm_atomic_helper_check(dev, state);
843 if (ret)
844 return ret;
845
846 return vc4_load_tracker_atomic_check(state);
Stefan Schake766cc6b2018-04-20 05:25:44 -0700847}
848
Maxime Ripard9ec03d72020-12-04 16:11:35 +0100849static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = {
850 .atomic_commit_setup = vc4_atomic_commit_setup,
Maxime Ripardf3c420f2020-12-04 16:11:38 +0100851 .atomic_commit_tail = vc4_atomic_commit_tail,
Maxime Ripard9ec03d72020-12-04 16:11:35 +0100852};
853
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800854static const struct drm_mode_config_funcs vc4_mode_funcs = {
Stefan Schake766cc6b2018-04-20 05:25:44 -0700855 .atomic_check = vc4_atomic_check,
Maxime Ripardf3c420f2020-12-04 16:11:38 +0100856 .atomic_commit = drm_atomic_helper_commit,
Eric Anholt83753112017-06-07 17:13:36 -0700857 .fb_create = vc4_fb_create,
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800858};
859
860int vc4_kms_load(struct drm_device *dev)
861{
Derek Foreman48666d52015-07-02 11:19:54 -0500862 struct vc4_dev *vc4 = to_vc4_dev(dev);
Maxime Ripardf437bc12020-09-03 10:01:51 +0200863 bool is_vc5 = of_device_is_compatible(dev->dev->of_node,
864 "brcm,bcm2711-vc5");
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800865 int ret;
866
Maxime Ripardf437bc12020-09-03 10:01:51 +0200867 if (!is_vc5) {
868 vc4->load_tracker_available = true;
869
870 /* Start with the load tracker enabled. Can be
871 * disabled through the debugfs load_tracker file.
872 */
873 vc4->load_tracker_enabled = true;
874 }
Paul Kocialkowski6b5c0292019-02-20 16:51:24 +0100875
Mario Kleiner7d2818f2017-06-22 03:28:11 +0200876 /* Set support for vblank irq fast disable, before drm_vblank_init() */
877 dev->vblank_disable_immediate = true;
878
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800879 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
880 if (ret < 0) {
881 dev_err(dev->dev, "failed to initialize vblank\n");
882 return ret;
883 }
884
Maxime Ripardf437bc12020-09-03 10:01:51 +0200885 if (is_vc5) {
886 dev->mode_config.max_width = 7680;
887 dev->mode_config.max_height = 7680;
888 } else {
889 dev->mode_config.max_width = 2048;
890 dev->mode_config.max_height = 2048;
891 }
892
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800893 dev->mode_config.funcs = &vc4_mode_funcs;
Maxime Ripard9ec03d72020-12-04 16:11:35 +0100894 dev->mode_config.helper_private = &vc4_mode_config_helpers;
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800895 dev->mode_config.preferred_depth = 24;
Eric Anholtb501bac2015-11-30 12:34:01 -0800896 dev->mode_config.async_page_flip = true;
897
Maxime Riparddcda7c22020-10-29 20:01:04 +0100898 ret = vc4_ctm_obj_init(vc4);
899 if (ret)
900 return ret;
Stefan Schake766cc6b2018-04-20 05:25:44 -0700901
Maxime Riparddcda7c22020-10-29 20:01:04 +0100902 ret = vc4_load_tracker_obj_init(vc4);
903 if (ret)
904 return ret;
Boris Brezillon4686da82019-02-20 16:51:23 +0100905
Maxime Ripardf2df84e2020-11-20 15:42:44 +0100906 ret = vc4_hvs_channels_obj_init(vc4);
907 if (ret)
908 return ret;
909
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800910 drm_mode_config_reset(dev);
911
Eric Anholtc8b75bc2015-03-02 13:01:12 -0800912 drm_kms_helper_poll_init(dev);
913
914 return 0;
915}