blob: 52365188a1c20288a754dc7e1a530e4b25570ac3 [file] [log] [blame]
Bjorn Andersson4b638df42015-06-26 14:50:10 -07001/*
2 * Copyright (c) 2015, Sony Mobile Communications AB.
3 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/hwspinlock.h>
16#include <linux/io.h>
17#include <linux/module.h>
18#include <linux/of.h>
19#include <linux/of_address.h>
20#include <linux/platform_device.h>
21#include <linux/slab.h>
22#include <linux/soc/qcom/smem.h>
23
24/*
25 * The Qualcomm shared memory system is a allocate only heap structure that
26 * consists of one of more memory areas that can be accessed by the processors
27 * in the SoC.
28 *
29 * All systems contains a global heap, accessible by all processors in the SoC,
30 * with a table of contents data structure (@smem_header) at the beginning of
31 * the main shared memory block.
32 *
33 * The global header contains meta data for allocations as well as a fixed list
34 * of 512 entries (@smem_global_entry) that can be initialized to reference
35 * parts of the shared memory space.
36 *
37 *
38 * In addition to this global heap a set of "private" heaps can be set up at
39 * boot time with access restrictions so that only certain processor pairs can
40 * access the data.
41 *
42 * These partitions are referenced from an optional partition table
43 * (@smem_ptable), that is found 4kB from the end of the main smem region. The
44 * partition table entries (@smem_ptable_entry) lists the involved processors
45 * (or hosts) and their location in the main shared memory region.
46 *
47 * Each partition starts with a header (@smem_partition_header) that identifies
48 * the partition and holds properties for the two internal memory regions. The
49 * two regions are cached and non-cached memory respectively. Each region
50 * contain a link list of allocation headers (@smem_private_entry) followed by
51 * their data.
52 *
53 * Items in the non-cached region are allocated from the start of the partition
54 * while items in the cached region are allocated from the end. The free area
55 * is hence the region between the cached and non-cached offsets.
56 *
57 *
58 * To synchronize allocations in the shared memory heaps a remote spinlock must
59 * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
60 * platforms.
61 *
62 */
63
64/*
65 * Item 3 of the global heap contains an array of versions for the various
66 * software components in the SoC. We verify that the boot loader version is
67 * what the expected version (SMEM_EXPECTED_VERSION) as a sanity check.
68 */
69#define SMEM_ITEM_VERSION 3
70#define SMEM_MASTER_SBL_VERSION_INDEX 7
71#define SMEM_EXPECTED_VERSION 11
72
73/*
74 * The first 8 items are only to be allocated by the boot loader while
75 * initializing the heap.
76 */
77#define SMEM_ITEM_LAST_FIXED 8
78
79/* Highest accepted item number, for both global and private heaps */
80#define SMEM_ITEM_COUNT 512
81
82/* Processor/host identifier for the application processor */
83#define SMEM_HOST_APPS 0
84
85/* Max number of processors/hosts in a system */
86#define SMEM_HOST_COUNT 9
87
88/**
89 * struct smem_proc_comm - proc_comm communication struct (legacy)
90 * @command: current command to be executed
91 * @status: status of the currently requested command
92 * @params: parameters to the command
93 */
94struct smem_proc_comm {
95 u32 command;
96 u32 status;
97 u32 params[2];
98};
99
100/**
101 * struct smem_global_entry - entry to reference smem items on the heap
102 * @allocated: boolean to indicate if this entry is used
103 * @offset: offset to the allocated space
104 * @size: size of the allocated space, 8 byte aligned
105 * @aux_base: base address for the memory region used by this unit, or 0 for
106 * the default region. bits 0,1 are reserved
107 */
108struct smem_global_entry {
109 u32 allocated;
110 u32 offset;
111 u32 size;
112 u32 aux_base; /* bits 1:0 reserved */
113};
114#define AUX_BASE_MASK 0xfffffffc
115
116/**
117 * struct smem_header - header found in beginning of primary smem region
118 * @proc_comm: proc_comm communication interface (legacy)
119 * @version: array of versions for the various subsystems
120 * @initialized: boolean to indicate that smem is initialized
121 * @free_offset: index of the first unallocated byte in smem
122 * @available: number of bytes available for allocation
123 * @reserved: reserved field, must be 0
124 * toc: array of references to items
125 */
126struct smem_header {
127 struct smem_proc_comm proc_comm[4];
128 u32 version[32];
129 u32 initialized;
130 u32 free_offset;
131 u32 available;
132 u32 reserved;
133 struct smem_global_entry toc[SMEM_ITEM_COUNT];
134};
135
136/**
137 * struct smem_ptable_entry - one entry in the @smem_ptable list
138 * @offset: offset, within the main shared memory region, of the partition
139 * @size: size of the partition
140 * @flags: flags for the partition (currently unused)
141 * @host0: first processor/host with access to this partition
142 * @host1: second processor/host with access to this partition
143 * @reserved: reserved entries for later use
144 */
145struct smem_ptable_entry {
146 u32 offset;
147 u32 size;
148 u32 flags;
149 u16 host0;
150 u16 host1;
151 u32 reserved[8];
152};
153
154/**
155 * struct smem_ptable - partition table for the private partitions
156 * @magic: magic number, must be SMEM_PTABLE_MAGIC
157 * @version: version of the partition table
158 * @num_entries: number of partitions in the table
159 * @reserved: for now reserved entries
160 * @entry: list of @smem_ptable_entry for the @num_entries partitions
161 */
162struct smem_ptable {
163 u32 magic;
164 u32 version;
165 u32 num_entries;
166 u32 reserved[5];
167 struct smem_ptable_entry entry[];
168};
169#define SMEM_PTABLE_MAGIC 0x434f5424 /* "$TOC" */
170
171/**
172 * struct smem_partition_header - header of the partitions
173 * @magic: magic number, must be SMEM_PART_MAGIC
174 * @host0: first processor/host with access to this partition
175 * @host1: second processor/host with access to this partition
176 * @size: size of the partition
177 * @offset_free_uncached: offset to the first free byte of uncached memory in
178 * this partition
179 * @offset_free_cached: offset to the first free byte of cached memory in this
180 * partition
181 * @reserved: for now reserved entries
182 */
183struct smem_partition_header {
184 u32 magic;
185 u16 host0;
186 u16 host1;
187 u32 size;
188 u32 offset_free_uncached;
189 u32 offset_free_cached;
190 u32 reserved[3];
191};
192#define SMEM_PART_MAGIC 0x54525024 /* "$PRT" */
193
194/**
195 * struct smem_private_entry - header of each item in the private partition
196 * @canary: magic number, must be SMEM_PRIVATE_CANARY
197 * @item: identifying number of the smem item
198 * @size: size of the data, including padding bytes
199 * @padding_data: number of bytes of padding of data
200 * @padding_hdr: number of bytes of padding between the header and the data
201 * @reserved: for now reserved entry
202 */
203struct smem_private_entry {
204 u16 canary;
205 u16 item;
206 u32 size; /* includes padding bytes */
207 u16 padding_data;
208 u16 padding_hdr;
209 u32 reserved;
210};
211#define SMEM_PRIVATE_CANARY 0xa5a5
212
213/**
214 * struct smem_region - representation of a chunk of memory used for smem
215 * @aux_base: identifier of aux_mem base
216 * @virt_base: virtual base address of memory with this aux_mem identifier
217 * @size: size of the memory region
218 */
219struct smem_region {
220 u32 aux_base;
221 void __iomem *virt_base;
222 size_t size;
223};
224
225/**
226 * struct qcom_smem - device data for the smem device
227 * @dev: device pointer
228 * @hwlock: reference to a hwspinlock
229 * @partitions: list of pointers to partitions affecting the current
230 * processor/host
231 * @num_regions: number of @regions
232 * @regions: list of the memory regions defining the shared memory
233 */
234struct qcom_smem {
235 struct device *dev;
236
237 struct hwspinlock *hwlock;
238
239 struct smem_partition_header *partitions[SMEM_HOST_COUNT];
240
241 unsigned num_regions;
242 struct smem_region regions[0];
243};
244
245/* Pointer to the one and only smem handle */
246static struct qcom_smem *__smem;
247
248/* Timeout (ms) for the trylock of remote spinlocks */
249#define HWSPINLOCK_TIMEOUT 1000
250
251static int qcom_smem_alloc_private(struct qcom_smem *smem,
252 unsigned host,
253 unsigned item,
254 size_t size)
255{
256 struct smem_partition_header *phdr;
257 struct smem_private_entry *hdr;
258 size_t alloc_size;
259 void *p;
260
Bjorn Andersson4b638df42015-06-26 14:50:10 -0700261 phdr = smem->partitions[host];
262
263 p = (void *)phdr + sizeof(*phdr);
264 while (p < (void *)phdr + phdr->offset_free_uncached) {
265 hdr = p;
266
267 if (hdr->canary != SMEM_PRIVATE_CANARY) {
268 dev_err(smem->dev,
269 "Found invalid canary in host %d partition\n",
270 host);
271 return -EINVAL;
272 }
273
274 if (hdr->item == item)
275 return -EEXIST;
276
277 p += sizeof(*hdr) + hdr->padding_hdr + hdr->size;
278 }
279
280 /* Check that we don't grow into the cached region */
281 alloc_size = sizeof(*hdr) + ALIGN(size, 8);
282 if (p + alloc_size >= (void *)phdr + phdr->offset_free_cached) {
283 dev_err(smem->dev, "Out of memory\n");
284 return -ENOSPC;
285 }
286
287 hdr = p;
288 hdr->canary = SMEM_PRIVATE_CANARY;
289 hdr->item = item;
290 hdr->size = ALIGN(size, 8);
291 hdr->padding_data = hdr->size - size;
292 hdr->padding_hdr = 0;
293
294 /*
295 * Ensure the header is written before we advance the free offset, so
296 * that remote processors that does not take the remote spinlock still
297 * gets a consistent view of the linked list.
298 */
299 wmb();
300 phdr->offset_free_uncached += alloc_size;
301
302 return 0;
303}
304
305static int qcom_smem_alloc_global(struct qcom_smem *smem,
306 unsigned item,
307 size_t size)
308{
309 struct smem_header *header;
310 struct smem_global_entry *entry;
311
312 if (WARN_ON(item >= SMEM_ITEM_COUNT))
313 return -EINVAL;
314
315 header = smem->regions[0].virt_base;
316 entry = &header->toc[item];
317 if (entry->allocated)
318 return -EEXIST;
319
320 size = ALIGN(size, 8);
321 if (WARN_ON(size > header->available))
322 return -ENOMEM;
323
324 entry->offset = header->free_offset;
325 entry->size = size;
326
327 /*
328 * Ensure the header is consistent before we mark the item allocated,
329 * so that remote processors will get a consistent view of the item
330 * even though they do not take the spinlock on read.
331 */
332 wmb();
333 entry->allocated = 1;
334
335 header->free_offset += size;
336 header->available -= size;
337
338 return 0;
339}
340
341/**
342 * qcom_smem_alloc() - allocate space for a smem item
343 * @host: remote processor id, or -1
344 * @item: smem item handle
345 * @size: number of bytes to be allocated
346 *
347 * Allocate space for a given smem item of size @size, given that the item is
348 * not yet allocated.
349 */
350int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
351{
352 unsigned long flags;
353 int ret;
354
355 if (!__smem)
356 return -EPROBE_DEFER;
357
358 if (item < SMEM_ITEM_LAST_FIXED) {
359 dev_err(__smem->dev,
360 "Rejecting allocation of static entry %d\n", item);
361 return -EINVAL;
362 }
363
364 ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
365 HWSPINLOCK_TIMEOUT,
366 &flags);
367 if (ret)
368 return ret;
369
Andy Gross18912802015-08-11 23:48:15 -0500370 if (host < SMEM_HOST_COUNT && __smem->partitions[host])
371 ret = qcom_smem_alloc_private(__smem, host, item, size);
372 else
Bjorn Andersson4b638df42015-06-26 14:50:10 -0700373 ret = qcom_smem_alloc_global(__smem, item, size);
374
375 hwspin_unlock_irqrestore(__smem->hwlock, &flags);
376
377 return ret;
378}
379EXPORT_SYMBOL(qcom_smem_alloc);
380
381static int qcom_smem_get_global(struct qcom_smem *smem,
382 unsigned item,
383 void **ptr,
384 size_t *size)
385{
386 struct smem_header *header;
387 struct smem_region *area;
388 struct smem_global_entry *entry;
389 u32 aux_base;
390 unsigned i;
391
392 if (WARN_ON(item >= SMEM_ITEM_COUNT))
393 return -EINVAL;
394
395 header = smem->regions[0].virt_base;
396 entry = &header->toc[item];
397 if (!entry->allocated)
398 return -ENXIO;
399
400 if (ptr != NULL) {
401 aux_base = entry->aux_base & AUX_BASE_MASK;
402
403 for (i = 0; i < smem->num_regions; i++) {
404 area = &smem->regions[i];
405
406 if (area->aux_base == aux_base || !aux_base) {
407 *ptr = area->virt_base + entry->offset;
408 break;
409 }
410 }
411 }
412 if (size != NULL)
413 *size = entry->size;
414
415 return 0;
416}
417
418static int qcom_smem_get_private(struct qcom_smem *smem,
419 unsigned host,
420 unsigned item,
421 void **ptr,
422 size_t *size)
423{
424 struct smem_partition_header *phdr;
425 struct smem_private_entry *hdr;
426 void *p;
427
Bjorn Andersson4b638df42015-06-26 14:50:10 -0700428 phdr = smem->partitions[host];
429
430 p = (void *)phdr + sizeof(*phdr);
431 while (p < (void *)phdr + phdr->offset_free_uncached) {
432 hdr = p;
433
434 if (hdr->canary != SMEM_PRIVATE_CANARY) {
435 dev_err(smem->dev,
436 "Found invalid canary in host %d partition\n",
437 host);
438 return -EINVAL;
439 }
440
441 if (hdr->item == item) {
442 if (ptr != NULL)
443 *ptr = p + sizeof(*hdr) + hdr->padding_hdr;
444
445 if (size != NULL)
446 *size = hdr->size - hdr->padding_data;
447
448 return 0;
449 }
450
451 p += sizeof(*hdr) + hdr->padding_hdr + hdr->size;
452 }
453
454 return -ENOENT;
455}
456
457/**
458 * qcom_smem_get() - resolve ptr of size of a smem item
459 * @host: the remote processor, or -1
460 * @item: smem item handle
461 * @ptr: pointer to be filled out with address of the item
462 * @size: pointer to be filled out with size of the item
463 *
464 * Looks up pointer and size of a smem item.
465 */
466int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size)
467{
468 unsigned long flags;
469 int ret;
470
471 if (!__smem)
472 return -EPROBE_DEFER;
473
474 ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
475 HWSPINLOCK_TIMEOUT,
476 &flags);
477 if (ret)
478 return ret;
479
Andy Gross18912802015-08-11 23:48:15 -0500480 if (host < SMEM_HOST_COUNT && __smem->partitions[host])
481 ret = qcom_smem_get_private(__smem, host, item, ptr, size);
482 else
Bjorn Andersson4b638df42015-06-26 14:50:10 -0700483 ret = qcom_smem_get_global(__smem, item, ptr, size);
484
485 hwspin_unlock_irqrestore(__smem->hwlock, &flags);
486 return ret;
487
488}
489EXPORT_SYMBOL(qcom_smem_get);
490
491/**
492 * qcom_smem_get_free_space() - retrieve amount of free space in a partition
493 * @host: the remote processor identifying a partition, or -1
494 *
495 * To be used by smem clients as a quick way to determine if any new
496 * allocations has been made.
497 */
498int qcom_smem_get_free_space(unsigned host)
499{
500 struct smem_partition_header *phdr;
501 struct smem_header *header;
502 unsigned ret;
503
504 if (!__smem)
505 return -EPROBE_DEFER;
506
507 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
508 phdr = __smem->partitions[host];
509 ret = phdr->offset_free_cached - phdr->offset_free_uncached;
510 } else {
511 header = __smem->regions[0].virt_base;
512 ret = header->available;
513 }
514
515 return ret;
516}
517EXPORT_SYMBOL(qcom_smem_get_free_space);
518
519static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
520{
521 unsigned *versions;
522 size_t size;
523 int ret;
524
525 ret = qcom_smem_get_global(smem, SMEM_ITEM_VERSION,
526 (void **)&versions, &size);
527 if (ret < 0) {
528 dev_err(smem->dev, "Unable to read the version item\n");
529 return -ENOENT;
530 }
531
532 if (size < sizeof(unsigned) * SMEM_MASTER_SBL_VERSION_INDEX) {
533 dev_err(smem->dev, "Version item is too small\n");
534 return -EINVAL;
535 }
536
537 return versions[SMEM_MASTER_SBL_VERSION_INDEX];
538}
539
540static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
541 unsigned local_host)
542{
543 struct smem_partition_header *header;
544 struct smem_ptable_entry *entry;
545 struct smem_ptable *ptable;
546 unsigned remote_host;
547 int i;
548
549 ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
550 if (ptable->magic != SMEM_PTABLE_MAGIC)
551 return 0;
552
553 if (ptable->version != 1) {
554 dev_err(smem->dev,
555 "Unsupported partition header version %d\n",
556 ptable->version);
557 return -EINVAL;
558 }
559
560 for (i = 0; i < ptable->num_entries; i++) {
561 entry = &ptable->entry[i];
562
563 if (entry->host0 != local_host && entry->host1 != local_host)
564 continue;
565
566 if (!entry->offset)
567 continue;
568
569 if (!entry->size)
570 continue;
571
572 if (entry->host0 == local_host)
573 remote_host = entry->host1;
574 else
575 remote_host = entry->host0;
576
577 if (remote_host >= SMEM_HOST_COUNT) {
578 dev_err(smem->dev,
579 "Invalid remote host %d\n",
580 remote_host);
581 return -EINVAL;
582 }
583
584 if (smem->partitions[remote_host]) {
585 dev_err(smem->dev,
586 "Already found a partition for host %d\n",
587 remote_host);
588 return -EINVAL;
589 }
590
591 header = smem->regions[0].virt_base + entry->offset;
592
593 if (header->magic != SMEM_PART_MAGIC) {
594 dev_err(smem->dev,
595 "Partition %d has invalid magic\n", i);
596 return -EINVAL;
597 }
598
599 if (header->host0 != local_host && header->host1 != local_host) {
600 dev_err(smem->dev,
601 "Partition %d hosts are invalid\n", i);
602 return -EINVAL;
603 }
604
605 if (header->host0 != remote_host && header->host1 != remote_host) {
606 dev_err(smem->dev,
607 "Partition %d hosts are invalid\n", i);
608 return -EINVAL;
609 }
610
611 if (header->size != entry->size) {
612 dev_err(smem->dev,
613 "Partition %d has invalid size\n", i);
614 return -EINVAL;
615 }
616
617 if (header->offset_free_uncached > header->size) {
618 dev_err(smem->dev,
619 "Partition %d has invalid free pointer\n", i);
620 return -EINVAL;
621 }
622
623 smem->partitions[remote_host] = header;
624 }
625
626 return 0;
627}
628
629static int qcom_smem_count_mem_regions(struct platform_device *pdev)
630{
631 struct resource *res;
632 int num_regions = 0;
633 int i;
634
635 for (i = 0; i < pdev->num_resources; i++) {
636 res = &pdev->resource[i];
637
638 if (resource_type(res) == IORESOURCE_MEM)
639 num_regions++;
640 }
641
642 return num_regions;
643}
644
645static int qcom_smem_probe(struct platform_device *pdev)
646{
647 struct smem_header *header;
648 struct device_node *np;
649 struct qcom_smem *smem;
650 struct resource *res;
651 struct resource r;
652 size_t array_size;
653 int num_regions = 0;
654 int hwlock_id;
655 u32 version;
656 int ret;
657 int i;
658
659 num_regions = qcom_smem_count_mem_regions(pdev) + 1;
660
661 array_size = num_regions * sizeof(struct smem_region);
662 smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
663 if (!smem)
664 return -ENOMEM;
665
666 smem->dev = &pdev->dev;
667 smem->num_regions = num_regions;
668
669 np = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
670 if (!np) {
671 dev_err(&pdev->dev, "No memory-region specified\n");
672 return -EINVAL;
673 }
674
675 ret = of_address_to_resource(np, 0, &r);
676 of_node_put(np);
677 if (ret)
678 return ret;
679
680 smem->regions[0].aux_base = (u32)r.start;
681 smem->regions[0].size = resource_size(&r);
682 smem->regions[0].virt_base = devm_ioremap_nocache(&pdev->dev,
683 r.start,
684 resource_size(&r));
685 if (!smem->regions[0].virt_base)
686 return -ENOMEM;
687
688 for (i = 1; i < num_regions; i++) {
689 res = platform_get_resource(pdev, IORESOURCE_MEM, i - 1);
690
691 smem->regions[i].aux_base = (u32)res->start;
692 smem->regions[i].size = resource_size(res);
693 smem->regions[i].virt_base = devm_ioremap_nocache(&pdev->dev,
694 res->start,
695 resource_size(res));
696 if (!smem->regions[i].virt_base)
697 return -ENOMEM;
698 }
699
700 header = smem->regions[0].virt_base;
701 if (header->initialized != 1 || header->reserved) {
702 dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
703 return -EINVAL;
704 }
705
706 version = qcom_smem_get_sbl_version(smem);
707 if (version >> 16 != SMEM_EXPECTED_VERSION) {
708 dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
709 return -EINVAL;
710 }
711
712 ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
713 if (ret < 0)
714 return ret;
715
716 hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
717 if (hwlock_id < 0) {
718 dev_err(&pdev->dev, "failed to retrieve hwlock\n");
719 return hwlock_id;
720 }
721
722 smem->hwlock = hwspin_lock_request_specific(hwlock_id);
723 if (!smem->hwlock)
724 return -ENXIO;
725
726 __smem = smem;
727
728 return 0;
729}
730
731static int qcom_smem_remove(struct platform_device *pdev)
732{
733 __smem = NULL;
734 hwspin_lock_free(__smem->hwlock);
735
736 return 0;
737}
738
739static const struct of_device_id qcom_smem_of_match[] = {
740 { .compatible = "qcom,smem" },
741 {}
742};
743MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
744
745static struct platform_driver qcom_smem_driver = {
746 .probe = qcom_smem_probe,
747 .remove = qcom_smem_remove,
748 .driver = {
749 .name = "qcom-smem",
750 .of_match_table = qcom_smem_of_match,
751 .suppress_bind_attrs = true,
752 },
753};
754
755static int __init qcom_smem_init(void)
756{
757 return platform_driver_register(&qcom_smem_driver);
758}
759arch_initcall(qcom_smem_init);
760
761static void __exit qcom_smem_exit(void)
762{
763 platform_driver_unregister(&qcom_smem_driver);
764}
765module_exit(qcom_smem_exit)
766
767MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
768MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
769MODULE_LICENSE("GPL v2");