drm.c (2ef6765ccaab2c69470d7049d9a9bf6456a6e666) drm.c (ab7d3f5826c55ad23101327eab435660caa83436)
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */

--- 19 unchanged lines hidden (view full) ---

28#define CARVEOUT_SZ SZ_64M
29#define CDMA_GATHER_FETCHES_MAX_NB 16383
30
31struct tegra_drm_file {
32 struct idr contexts;
33 struct mutex lock;
34};
35
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */

--- 19 unchanged lines hidden (view full) ---

28#define CARVEOUT_SZ SZ_64M
29#define CDMA_GATHER_FETCHES_MAX_NB 16383
30
31struct tegra_drm_file {
32 struct idr contexts;
33 struct mutex lock;
34};
35
36static void tegra_atomic_schedule(struct tegra_drm *tegra,
37 struct drm_atomic_state *state)
36static int tegra_atomic_check(struct drm_device *drm,
37 struct drm_atomic_state *state)
38{
38{
39 tegra->commit.state = state;
40 schedule_work(&tegra->commit.work);
41}
39 int err;
42
40
43static void tegra_atomic_complete(struct tegra_drm *tegra,
44 struct drm_atomic_state *state)
45{
46 struct drm_device *drm = tegra->drm;
41 err = drm_atomic_helper_check_modeset(drm, state);
42 if (err < 0)
43 return err;
47
44
48 /*
49 * Everything below can be run asynchronously without the need to grab
50 * any modeset locks at all under one condition: It must be guaranteed
51 * that the asynchronous work has either been cancelled (if the driver
52 * supports it, which at least requires that the framebuffers get
53 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
54 * before the new state gets committed on the software side with
55 * drm_atomic_helper_swap_state().
56 *
57 * This scheme allows new atomic state updates to be prepared and
58 * checked in parallel to the asynchronous completion of the previous
59 * update. Which is important since compositors need to figure out the
60 * composition of the next frame right after having submitted the
61 * current layout.
62 */
45 err = drm_atomic_normalize_zpos(drm, state);
46 if (err < 0)
47 return err;
63
48
64 drm_atomic_helper_commit_modeset_disables(drm, state);
65 drm_atomic_helper_commit_modeset_enables(drm, state);
66 drm_atomic_helper_commit_planes(drm, state,
67 DRM_PLANE_COMMIT_ACTIVE_ONLY);
49 err = drm_atomic_helper_check_planes(drm, state);
50 if (err < 0)
51 return err;
68
52
69 drm_atomic_helper_wait_for_vblanks(drm, state);
53 if (state->legacy_cursor_update)
54 state->async_update = !drm_atomic_helper_async_check(drm, state);
70
55
71 drm_atomic_helper_cleanup_planes(drm, state);
72 drm_atomic_state_put(state);
56 return 0;
73}
74
57}
58
75static void tegra_atomic_work(struct work_struct *work)
59static struct drm_atomic_state *
60tegra_atomic_state_alloc(struct drm_device *drm)
76{
61{
77 struct tegra_drm *tegra = container_of(work, struct tegra_drm,
78 commit.work);
62 struct tegra_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
79
63
80 tegra_atomic_complete(tegra, tegra->commit.state);
64 if (!state || drm_atomic_state_init(drm, &state->base) < 0) {
65 kfree(state);
66 return NULL;
67 }
68
69 return &state->base;
81}
82
70}
71
83static int tegra_atomic_commit(struct drm_device *drm,
84 struct drm_atomic_state *state, bool nonblock)
72static void tegra_atomic_state_clear(struct drm_atomic_state *state)
85{
73{
86 struct tegra_drm *tegra = drm->dev_private;
87 int err;
74 struct tegra_atomic_state *tegra = to_tegra_atomic_state(state);
88
75
89 err = drm_atomic_helper_prepare_planes(drm, state);
90 if (err)
91 return err;
76 drm_atomic_state_default_clear(state);
77 tegra->clk_disp = NULL;
78 tegra->dc = NULL;
79 tegra->rate = 0;
80}
92
81
93 /* serialize outstanding nonblocking commits */
94 mutex_lock(&tegra->commit.lock);
95 flush_work(&tegra->commit.work);
96
97 /*
98 * This is the point of no return - everything below never fails except
99 * when the hw goes bonghits. Which means we can commit the new state on
100 * the software side now.
101 */
102
103 err = drm_atomic_helper_swap_state(state, true);
104 if (err) {
105 mutex_unlock(&tegra->commit.lock);
106 drm_atomic_helper_cleanup_planes(drm, state);
107 return err;
108 }
109
110 drm_atomic_state_get(state);
111 if (nonblock)
112 tegra_atomic_schedule(tegra, state);
113 else
114 tegra_atomic_complete(tegra, state);
115
116 mutex_unlock(&tegra->commit.lock);
117 return 0;
82static void tegra_atomic_state_free(struct drm_atomic_state *state)
83{
84 drm_atomic_state_default_release(state);
85 kfree(state);
118}
119
86}
87
120static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
88static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
121 .fb_create = tegra_fb_create,
122#ifdef CONFIG_DRM_FBDEV_EMULATION
123 .output_poll_changed = tegra_fb_output_poll_changed,
124#endif
89 .fb_create = tegra_fb_create,
90#ifdef CONFIG_DRM_FBDEV_EMULATION
91 .output_poll_changed = tegra_fb_output_poll_changed,
92#endif
125 .atomic_check = drm_atomic_helper_check,
126 .atomic_commit = tegra_atomic_commit,
93 .atomic_check = tegra_atomic_check,
94 .atomic_commit = drm_atomic_helper_commit,
95 .atomic_state_alloc = tegra_atomic_state_alloc,
96 .atomic_state_clear = tegra_atomic_state_clear,
97 .atomic_state_free = tegra_atomic_state_free,
127};
128
98};
99
100static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state)
101{
102 struct drm_device *drm = old_state->dev;
103 struct tegra_drm *tegra = drm->dev_private;
104
105 if (tegra->hub) {
106 drm_atomic_helper_commit_modeset_disables(drm, old_state);
107 tegra_display_hub_atomic_commit(drm, old_state);
108 drm_atomic_helper_commit_planes(drm, old_state, 0);
109 drm_atomic_helper_commit_modeset_enables(drm, old_state);
110 drm_atomic_helper_commit_hw_done(old_state);
111 drm_atomic_helper_wait_for_vblanks(drm, old_state);
112 drm_atomic_helper_cleanup_planes(drm, old_state);
113 } else {
114 drm_atomic_helper_commit_tail_rpm(old_state);
115 }
116}
117
118static const struct drm_mode_config_helper_funcs
119tegra_drm_mode_config_helpers = {
120 .atomic_commit_tail = tegra_atomic_commit_tail,
121};
122
129static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
130{
131 struct host1x_device *device = to_host1x_device(drm->dev);
132 struct tegra_drm *tegra;
133 int err;
134
135 tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
136 if (!tegra)

--- 30 unchanged lines hidden (view full) ---

167 DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end);
168 DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start,
169 carveout_end);
170 }
171
172 mutex_init(&tegra->clients_lock);
173 INIT_LIST_HEAD(&tegra->clients);
174
123static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
124{
125 struct host1x_device *device = to_host1x_device(drm->dev);
126 struct tegra_drm *tegra;
127 int err;
128
129 tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
130 if (!tegra)

--- 30 unchanged lines hidden (view full) ---

161 DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end);
162 DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start,
163 carveout_end);
164 }
165
166 mutex_init(&tegra->clients_lock);
167 INIT_LIST_HEAD(&tegra->clients);
168
175 mutex_init(&tegra->commit.lock);
176 INIT_WORK(&tegra->commit.work, tegra_atomic_work);
177
178 drm->dev_private = tegra;
179 tegra->drm = drm;
180
181 drm_mode_config_init(drm);
182
183 drm->mode_config.min_width = 0;
184 drm->mode_config.min_height = 0;
185
186 drm->mode_config.max_width = 4096;
187 drm->mode_config.max_height = 4096;
188
189 drm->mode_config.allow_fb_modifiers = true;
190
169 drm->dev_private = tegra;
170 tegra->drm = drm;
171
172 drm_mode_config_init(drm);
173
174 drm->mode_config.min_width = 0;
175 drm->mode_config.min_height = 0;
176
177 drm->mode_config.max_width = 4096;
178 drm->mode_config.max_height = 4096;
179
180 drm->mode_config.allow_fb_modifiers = true;
181
191 drm->mode_config.funcs = &tegra_drm_mode_funcs;
182 drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
183 drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
192
193 err = tegra_drm_fb_prepare(drm);
194 if (err < 0)
195 goto config;
196
197 drm_kms_helper_poll_init(drm);
198
199 err = host1x_device_init(device);
200 if (err < 0)
201 goto fbdev;
202
184
185 err = tegra_drm_fb_prepare(drm);
186 if (err < 0)
187 goto config;
188
189 drm_kms_helper_poll_init(drm);
190
191 err = host1x_device_init(device);
192 if (err < 0)
193 goto fbdev;
194
195 if (tegra->hub) {
196 err = tegra_display_hub_prepare(tegra->hub);
197 if (err < 0)
198 goto device;
199 }
200
203 /*
204 * We don't use the drm_irq_install() helpers provided by the DRM
205 * core, so we need to set this manually in order to allow the
206 * DRM_IOCTL_WAIT_VBLANK to operate correctly.
207 */
208 drm->irq_enabled = true;
209
210 /* syncpoints are used for full 32-bit hardware VBLANK counters */
211 drm->max_vblank_count = 0xffffffff;
212
213 err = drm_vblank_init(drm, drm->mode_config.num_crtc);
214 if (err < 0)
201 /*
202 * We don't use the drm_irq_install() helpers provided by the DRM
203 * core, so we need to set this manually in order to allow the
204 * DRM_IOCTL_WAIT_VBLANK to operate correctly.
205 */
206 drm->irq_enabled = true;
207
208 /* syncpoints are used for full 32-bit hardware VBLANK counters */
209 drm->max_vblank_count = 0xffffffff;
210
211 err = drm_vblank_init(drm, drm->mode_config.num_crtc);
212 if (err < 0)
215 goto device;
213 goto hub;
216
217 drm_mode_config_reset(drm);
218
219 err = tegra_drm_fb_init(drm);
220 if (err < 0)
214
215 drm_mode_config_reset(drm);
216
217 err = tegra_drm_fb_init(drm);
218 if (err < 0)
221 goto device;
219 goto hub;
222
223 return 0;
224
220
221 return 0;
222
223hub:
224 if (tegra->hub)
225 tegra_display_hub_cleanup(tegra->hub);
225device:
226 host1x_device_exit(device);
227fbdev:
228 drm_kms_helper_poll_fini(drm);
229 tegra_drm_fb_free(drm);
230config:
231 drm_mode_config_cleanup(drm);
232

--- 910 unchanged lines hidden (view full) ---

1143{
1144 mutex_lock(&tegra->clients_lock);
1145 list_del_init(&client->list);
1146 mutex_unlock(&tegra->clients_lock);
1147
1148 return 0;
1149}
1150
226device:
227 host1x_device_exit(device);
228fbdev:
229 drm_kms_helper_poll_fini(drm);
230 tegra_drm_fb_free(drm);
231config:
232 drm_mode_config_cleanup(drm);
233

--- 910 unchanged lines hidden (view full) ---

1144{
1145 mutex_lock(&tegra->clients_lock);
1146 list_del_init(&client->list);
1147 mutex_unlock(&tegra->clients_lock);
1148
1149 return 0;
1150}
1151
1151void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size,
1152 dma_addr_t *dma)
1152void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma)
1153{
1154 struct iova *alloc;
1155 void *virt;
1156 gfp_t gfp;
1157 int err;
1158
1159 if (tegra->domain)
1160 size = iova_align(&tegra->carveout.domain, size);

--- 151 unchanged lines hidden (view full) ---

1312 { .compatible = "nvidia,tegra124-dsi", },
1313 { .compatible = "nvidia,tegra124-vic", },
1314 { .compatible = "nvidia,tegra132-dsi", },
1315 { .compatible = "nvidia,tegra210-dc", },
1316 { .compatible = "nvidia,tegra210-dsi", },
1317 { .compatible = "nvidia,tegra210-sor", },
1318 { .compatible = "nvidia,tegra210-sor1", },
1319 { .compatible = "nvidia,tegra210-vic", },
1153{
1154 struct iova *alloc;
1155 void *virt;
1156 gfp_t gfp;
1157 int err;
1158
1159 if (tegra->domain)
1160 size = iova_align(&tegra->carveout.domain, size);

--- 151 unchanged lines hidden (view full) ---

1312 { .compatible = "nvidia,tegra124-dsi", },
1313 { .compatible = "nvidia,tegra124-vic", },
1314 { .compatible = "nvidia,tegra132-dsi", },
1315 { .compatible = "nvidia,tegra210-dc", },
1316 { .compatible = "nvidia,tegra210-dsi", },
1317 { .compatible = "nvidia,tegra210-sor", },
1318 { .compatible = "nvidia,tegra210-sor1", },
1319 { .compatible = "nvidia,tegra210-vic", },
1320 { .compatible = "nvidia,tegra186-display", },
1321 { .compatible = "nvidia,tegra186-dc", },
1322 { .compatible = "nvidia,tegra186-sor", },
1323 { .compatible = "nvidia,tegra186-sor1", },
1320 { .compatible = "nvidia,tegra186-vic", },
1321 { /* sentinel */ }
1322};
1323
1324static struct host1x_driver host1x_drm_driver = {
1325 .driver = {
1326 .name = "drm",
1327 .pm = &host1x_drm_pm_ops,
1328 },
1329 .probe = host1x_drm_probe,
1330 .remove = host1x_drm_remove,
1331 .subdevs = host1x_drm_subdevs,
1332};
1333
1334static struct platform_driver * const drivers[] = {
1324 { .compatible = "nvidia,tegra186-vic", },
1325 { /* sentinel */ }
1326};
1327
1328static struct host1x_driver host1x_drm_driver = {
1329 .driver = {
1330 .name = "drm",
1331 .pm = &host1x_drm_pm_ops,
1332 },
1333 .probe = host1x_drm_probe,
1334 .remove = host1x_drm_remove,
1335 .subdevs = host1x_drm_subdevs,
1336};
1337
1338static struct platform_driver * const drivers[] = {
1339 &tegra_display_hub_driver,
1335 &tegra_dc_driver,
1336 &tegra_hdmi_driver,
1337 &tegra_dsi_driver,
1338 &tegra_dpaux_driver,
1339 &tegra_sor_driver,
1340 &tegra_gr2d_driver,
1341 &tegra_gr3d_driver,
1342 &tegra_vic_driver,

--- 32 unchanged lines hidden ---
1340 &tegra_dc_driver,
1341 &tegra_hdmi_driver,
1342 &tegra_dsi_driver,
1343 &tegra_dpaux_driver,
1344 &tegra_sor_driver,
1345 &tegra_gr2d_driver,
1346 &tegra_gr3d_driver,
1347 &tegra_vic_driver,

--- 32 unchanged lines hidden ---