Daniel Vetter | 4f03b1f | 2014-09-10 12:43:49 +0200 | [diff] [blame] | 1 | #ifndef __DRM_DRM_LEGACY_H__ |
| 2 | #define __DRM_DRM_LEGACY_H__ |
Daniel Vetter | 4f03b1f | 2014-09-10 12:43:49 +0200 | [diff] [blame] | 3 | /* |
| 4 | * Legacy driver interfaces for the Direct Rendering Manager |
| 5 | * |
| 6 | * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. |
| 7 | * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. |
| 8 | * Copyright (c) 2009-2010, Code Aurora Forum. |
| 9 | * All rights reserved. |
| 10 | * Copyright © 2014 Intel Corporation |
| 11 | * Daniel Vetter <daniel.vetter@ffwll.ch> |
| 12 | * |
| 13 | * Author: Rickard E. (Rik) Faith <faith@valinux.com> |
| 14 | * Author: Gareth Hughes <gareth@valinux.com> |
| 15 | * |
| 16 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 17 | * copy of this software and associated documentation files (the "Software"), |
| 18 | * to deal in the Software without restriction, including without limitation |
| 19 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 20 | * and/or sell copies of the Software, and to permit persons to whom the |
| 21 | * Software is furnished to do so, subject to the following conditions: |
| 22 | * |
| 23 | * The above copyright notice and this permission notice (including the next |
| 24 | * paragraph) shall be included in all copies or substantial portions of the |
| 25 | * Software. |
| 26 | * |
| 27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 28 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 29 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 30 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 31 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 32 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 33 | * OTHER DEALINGS IN THE SOFTWARE. |
| 34 | */ |
| 35 | |
Thomas Zimmermann | 04dfe19 | 2021-05-07 20:57:09 +0200 | [diff] [blame] | 36 | #include <linux/agp_backend.h> |
| 37 | |
Sam Ravnborg | cbe932a | 2019-05-26 19:35:30 +0200 | [diff] [blame] | 38 | #include <drm/drm.h> |
| 39 | #include <drm/drm_auth.h> |
Sam Ravnborg | cbe932a | 2019-05-26 19:35:30 +0200 | [diff] [blame] | 40 | |
| 41 | struct drm_device; |
Thomas Zimmermann | 1be9d5f | 2019-12-03 11:03:56 +0100 | [diff] [blame] | 42 | struct drm_driver; |
Sam Ravnborg | cbe932a | 2019-05-26 19:35:30 +0200 | [diff] [blame] | 43 | struct file; |
Thomas Zimmermann | 1be9d5f | 2019-12-03 11:03:56 +0100 | [diff] [blame] | 44 | struct pci_driver; |
Daniel Vetter | 4f03b1f | 2014-09-10 12:43:49 +0200 | [diff] [blame] | 45 | |
| 46 | /* |
| 47 | * Legacy Support for palateontologic DRM drivers |
| 48 | * |
| 49 | * If you add a new driver and it uses any of these functions or structures, |
| 50 | * you're doing it terribly wrong. |
| 51 | */ |
| 52 | |
Thomas Zimmermann | a21800b | 2021-11-29 10:48:41 +0100 | [diff] [blame] | 53 | /* |
| 54 | * Hash-table Support |
| 55 | */ |
| 56 | |
| 57 | struct drm_hash_item { |
| 58 | struct hlist_node head; |
| 59 | unsigned long key; |
| 60 | }; |
| 61 | |
| 62 | struct drm_open_hash { |
| 63 | struct hlist_head *table; |
| 64 | u8 order; |
| 65 | }; |
| 66 | |
Daniel Vetter | ba8286f | 2014-09-11 07:43:25 +0200 | [diff] [blame] | 67 | /** |
| 68 | * DMA buffer. |
| 69 | */ |
| 70 | struct drm_buf { |
| 71 | int idx; /**< Index into master buflist */ |
| 72 | int total; /**< Buffer size */ |
| 73 | int order; /**< log-base-2(total) */ |
| 74 | int used; /**< Amount of buffer in use (for DMA) */ |
| 75 | unsigned long offset; /**< Byte offset (used internally) */ |
| 76 | void *address; /**< Address of buffer */ |
| 77 | unsigned long bus_address; /**< Bus address of buffer */ |
| 78 | struct drm_buf *next; /**< Kernel-only: used for free list */ |
| 79 | __volatile__ int waiting; /**< On kernel DMA queue */ |
| 80 | __volatile__ int pending; /**< On hardware DMA queue */ |
| 81 | struct drm_file *file_priv; /**< Private of holding file descr */ |
| 82 | int context; /**< Kernel queue for this buffer */ |
| 83 | int while_locked; /**< Dispatch this buffer while locked */ |
| 84 | enum { |
| 85 | DRM_LIST_NONE = 0, |
| 86 | DRM_LIST_FREE = 1, |
| 87 | DRM_LIST_WAIT = 2, |
| 88 | DRM_LIST_PEND = 3, |
| 89 | DRM_LIST_PRIO = 4, |
| 90 | DRM_LIST_RECLAIM = 5 |
| 91 | } list; /**< Which list we're on */ |
| 92 | |
| 93 | int dev_priv_size; /**< Size of buffer private storage */ |
| 94 | void *dev_private; /**< Per-buffer private storage */ |
| 95 | }; |
| 96 | |
| 97 | typedef struct drm_dma_handle { |
| 98 | dma_addr_t busaddr; |
| 99 | void *vaddr; |
| 100 | size_t size; |
| 101 | } drm_dma_handle_t; |
| 102 | |
| 103 | /** |
| 104 | * Buffer entry. There is one of this for each buffer size order. |
| 105 | */ |
| 106 | struct drm_buf_entry { |
| 107 | int buf_size; /**< size */ |
| 108 | int buf_count; /**< number of buffers */ |
| 109 | struct drm_buf *buflist; /**< buffer list */ |
| 110 | int seg_count; |
| 111 | int page_order; |
| 112 | struct drm_dma_handle **seglist; |
| 113 | |
| 114 | int low_mark; /**< Low water mark */ |
| 115 | int high_mark; /**< High water mark */ |
| 116 | }; |
| 117 | |
| 118 | /** |
| 119 | * DMA data. |
| 120 | */ |
| 121 | struct drm_device_dma { |
| 122 | |
| 123 | struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ |
| 124 | int buf_count; /**< total number of buffers */ |
| 125 | struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ |
| 126 | int seg_count; |
| 127 | int page_count; /**< number of pages */ |
| 128 | unsigned long *pagelist; /**< page list */ |
| 129 | unsigned long byte_count; |
| 130 | enum { |
| 131 | _DRM_DMA_USE_AGP = 0x01, |
| 132 | _DRM_DMA_USE_SG = 0x02, |
| 133 | _DRM_DMA_USE_FB = 0x04, |
| 134 | _DRM_DMA_USE_PCI_RO = 0x08 |
| 135 | } flags; |
| 136 | |
| 137 | }; |
| 138 | |
| 139 | /** |
| 140 | * Scatter-gather memory. |
| 141 | */ |
| 142 | struct drm_sg_mem { |
| 143 | unsigned long handle; |
| 144 | void *virtual; |
| 145 | int pages; |
| 146 | struct page **pagelist; |
| 147 | dma_addr_t *busaddr; |
| 148 | }; |
| 149 | |
| 150 | /** |
| 151 | * Kernel side of a mapping |
| 152 | */ |
| 153 | struct drm_local_map { |
Chris Wilson | b2ecb89 | 2020-04-02 22:59:26 +0100 | [diff] [blame] | 154 | dma_addr_t offset; /**< Requested physical address (0 for SAREA)*/ |
Daniel Vetter | ba8286f | 2014-09-11 07:43:25 +0200 | [diff] [blame] | 155 | unsigned long size; /**< Requested physical size (bytes) */ |
| 156 | enum drm_map_type type; /**< Type of memory to map */ |
| 157 | enum drm_map_flags flags; /**< Flags */ |
| 158 | void *handle; /**< User-space: "Handle" to pass to mmap() */ |
| 159 | /**< Kernel-space: kernel-virtual address */ |
| 160 | int mtrr; /**< MTRR slot used */ |
| 161 | }; |
| 162 | |
| 163 | typedef struct drm_local_map drm_local_map_t; |
| 164 | |
| 165 | /** |
| 166 | * Mappings list |
| 167 | */ |
| 168 | struct drm_map_list { |
| 169 | struct list_head head; /**< list head */ |
| 170 | struct drm_hash_item hash; |
| 171 | struct drm_local_map *map; /**< mapping */ |
| 172 | uint64_t user_token; |
| 173 | struct drm_master *master; |
| 174 | }; |
| 175 | |
Daniel Vetter | 4f03b1f | 2014-09-10 12:43:49 +0200 | [diff] [blame] | 176 | int drm_legacy_addmap(struct drm_device *d, resource_size_t offset, |
| 177 | unsigned int size, enum drm_map_type type, |
| 178 | enum drm_map_flags flags, struct drm_local_map **map_p); |
Jani Nikula | c764268 | 2018-12-28 15:04:46 +0200 | [diff] [blame] | 179 | struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, unsigned int token); |
Daniel Vetter | 40647e4 | 2016-04-27 09:20:18 +0200 | [diff] [blame] | 180 | void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map); |
Daniel Vetter | 4f03b1f | 2014-09-10 12:43:49 +0200 | [diff] [blame] | 181 | int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map); |
| 182 | struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev); |
Daniel Vetter | bfbf3c8 | 2014-09-23 15:46:49 +0200 | [diff] [blame] | 183 | int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma); |
Daniel Vetter | 4f03b1f | 2014-09-10 12:43:49 +0200 | [diff] [blame] | 184 | |
| 185 | int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req); |
| 186 | int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req); |
| 187 | |
Daniel Vetter | 8f1a2c8 | 2014-09-10 12:43:58 +0200 | [diff] [blame] | 188 | /** |
| 189 | * Test that the hardware lock is held by the caller, returning otherwise. |
| 190 | * |
| 191 | * \param dev DRM device. |
| 192 | * \param filp file pointer of the caller. |
| 193 | */ |
| 194 | #define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \ |
| 195 | do { \ |
| 196 | if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \ |
| 197 | _file_priv->master->lock.file_priv != _file_priv) { \ |
| 198 | DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ |
| 199 | __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\ |
| 200 | _file_priv->master->lock.file_priv, _file_priv); \ |
| 201 | return -EINVAL; \ |
| 202 | } \ |
| 203 | } while (0) |
| 204 | |
Daniel Vetter | 4f03b1f | 2014-09-10 12:43:49 +0200 | [diff] [blame] | 205 | void drm_legacy_idlelock_take(struct drm_lock_data *lock); |
| 206 | void drm_legacy_idlelock_release(struct drm_lock_data *lock); |
| 207 | |
Thomas Zimmermann | c1736b9 | 2021-06-25 15:50:33 +0200 | [diff] [blame] | 208 | /* drm_irq.c */ |
| 209 | int drm_legacy_irq_uninstall(struct drm_device *dev); |
| 210 | |
Thomas Zimmermann | 1be9d5f | 2019-12-03 11:03:56 +0100 | [diff] [blame] | 211 | /* drm_pci.c */ |
| 212 | |
| 213 | #ifdef CONFIG_PCI |
| 214 | |
Laurent Pinchart | b1dda99 | 2020-02-21 00:19:22 +0200 | [diff] [blame] | 215 | int drm_legacy_pci_init(const struct drm_driver *driver, |
| 216 | struct pci_driver *pdriver); |
| 217 | void drm_legacy_pci_exit(const struct drm_driver *driver, |
| 218 | struct pci_driver *pdriver); |
Thomas Zimmermann | 1be9d5f | 2019-12-03 11:03:56 +0100 | [diff] [blame] | 219 | |
| 220 | #else |
| 221 | |
Daniel Vetter | 3377533 | 2020-04-03 13:06:09 +0200 | [diff] [blame] | 222 | static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, |
| 223 | size_t size, size_t align) |
| 224 | { |
| 225 | return NULL; |
| 226 | } |
| 227 | |
| 228 | static inline void drm_pci_free(struct drm_device *dev, |
| 229 | struct drm_dma_handle *dmah) |
| 230 | { |
| 231 | } |
| 232 | |
Laurent Pinchart | b1dda99 | 2020-02-21 00:19:22 +0200 | [diff] [blame] | 233 | static inline int drm_legacy_pci_init(const struct drm_driver *driver, |
Thomas Zimmermann | 1be9d5f | 2019-12-03 11:03:56 +0100 | [diff] [blame] | 234 | struct pci_driver *pdriver) |
| 235 | { |
| 236 | return -EINVAL; |
| 237 | } |
| 238 | |
Laurent Pinchart | b1dda99 | 2020-02-21 00:19:22 +0200 | [diff] [blame] | 239 | static inline void drm_legacy_pci_exit(const struct drm_driver *driver, |
Thomas Zimmermann | 1be9d5f | 2019-12-03 11:03:56 +0100 | [diff] [blame] | 240 | struct pci_driver *pdriver) |
| 241 | { |
| 242 | } |
| 243 | |
| 244 | #endif |
Daniel Vetter | 1c96e84 | 2014-09-10 12:43:51 +0200 | [diff] [blame] | 245 | |
Thomas Zimmermann | 04dfe19 | 2021-05-07 20:57:09 +0200 | [diff] [blame] | 246 | /* |
| 247 | * AGP Support |
| 248 | */ |
| 249 | |
| 250 | struct drm_agp_head { |
| 251 | struct agp_kern_info agp_info; |
| 252 | struct list_head memory; |
| 253 | unsigned long mode; |
| 254 | struct agp_bridge_data *bridge; |
| 255 | int enabled; |
| 256 | int acquired; |
| 257 | unsigned long base; |
| 258 | int agp_mtrr; |
| 259 | int cant_use_aperture; |
| 260 | unsigned long page_mask; |
| 261 | }; |
| 262 | |
| 263 | #if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP) |
| 264 | struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev); |
| 265 | int drm_legacy_agp_acquire(struct drm_device *dev); |
| 266 | int drm_legacy_agp_release(struct drm_device *dev); |
| 267 | int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); |
| 268 | int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info); |
| 269 | int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); |
| 270 | int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); |
| 271 | int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); |
| 272 | int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); |
| 273 | #else |
| 274 | static inline struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev) |
| 275 | { |
| 276 | return NULL; |
| 277 | } |
| 278 | |
| 279 | static inline int drm_legacy_agp_acquire(struct drm_device *dev) |
| 280 | { |
| 281 | return -ENODEV; |
| 282 | } |
| 283 | |
| 284 | static inline int drm_legacy_agp_release(struct drm_device *dev) |
| 285 | { |
| 286 | return -ENODEV; |
| 287 | } |
| 288 | |
| 289 | static inline int drm_legacy_agp_enable(struct drm_device *dev, |
| 290 | struct drm_agp_mode mode) |
| 291 | { |
| 292 | return -ENODEV; |
| 293 | } |
| 294 | |
| 295 | static inline int drm_legacy_agp_info(struct drm_device *dev, |
| 296 | struct drm_agp_info *info) |
| 297 | { |
| 298 | return -ENODEV; |
| 299 | } |
| 300 | |
| 301 | static inline int drm_legacy_agp_alloc(struct drm_device *dev, |
| 302 | struct drm_agp_buffer *request) |
| 303 | { |
| 304 | return -ENODEV; |
| 305 | } |
| 306 | |
| 307 | static inline int drm_legacy_agp_free(struct drm_device *dev, |
| 308 | struct drm_agp_buffer *request) |
| 309 | { |
| 310 | return -ENODEV; |
| 311 | } |
| 312 | |
| 313 | static inline int drm_legacy_agp_unbind(struct drm_device *dev, |
| 314 | struct drm_agp_binding *request) |
| 315 | { |
| 316 | return -ENODEV; |
| 317 | } |
| 318 | |
| 319 | static inline int drm_legacy_agp_bind(struct drm_device *dev, |
| 320 | struct drm_agp_binding *request) |
| 321 | { |
| 322 | return -ENODEV; |
| 323 | } |
| 324 | #endif |
| 325 | |
Daniel Vetter | 86c1fbd | 2014-09-10 12:43:56 +0200 | [diff] [blame] | 326 | /* drm_memory.c */ |
| 327 | void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev); |
| 328 | void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); |
| 329 | void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev); |
| 330 | |
Daniel Vetter | 4f03b1f | 2014-09-10 12:43:49 +0200 | [diff] [blame] | 331 | #endif /* __DRM_DRM_LEGACY_H__ */ |