4
* This module is primarily used to configure IPC-wide settings and
5
* initialize IPC at runtime
7
* Copyright (C) 2009 Texas Instruments, Inc.
9
* This package is free software; you can redistribute it and/or modify
10
* it under the terms of the GNU General Public License version 2 as
11
* published by the Free Software Foundation.
13
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
20
/* Standard headers */
21
#include <linux/types.h>
22
#include <linux/module.h>
24
#include <syslink/atomic_linux.h>
27
#include <multiproc.h>
32
#include <sharedregion.h>
34
#include <notify_ducatidriver.h>
35
#include <notify_setup_proxy.h>
38
#include <heapbufmp.h>
39
#include <heapmemmp.h>
42
#include <nameserver.h>
43
#include <nameserver_remotenotify.h>
45
/* Ipu Power Management Header (ipu_pm) */
46
#include "../ipu_pm/ipu_pm.h"
47
/* =============================================================================
49
* =============================================================================
51
/* Macro to make a correct module magic number with ref_count */
52
#define IPC_MAKE_MAGICSTAMP(x)((IPC_MODULEID << 16u) | (x))
54
/* flag for starting processor synchronization */
55
#define IPC_PROCSYNCSTART 1
57
/* flag for finishing processor synchronization */
58
#define IPC_PROCSYNCFINISH 2
60
#define ROUND_UP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
62
/* =============================================================================
64
* =============================================================================
67
/* The structure used for reserving memory in SharedRegion */
69
VOLATILE u32 started_key;
72
u32 *transport_sr_ptr;
73
u32 *config_list_head;
77
/* head of the config list */
78
struct ipc_config_head {
80
/* Address of first config entry */
85
* This structure captures Configuration details of a module/instance
86
* written by a slave to synchornize with a remote slave/HOST
88
struct ipc_config_entry {
89
VOLATILE u32 remote_proc_id;
90
/* Remote processor identifier */
91
VOLATILE u32 local_proc_id;
92
/* Config Entry owner processor identifier */
94
/* Unique tag to distinguish config from other config entries */
96
/* Size of the config pointer */
98
/* Address of next config entry (In SRPtr format) */
102
* This structure defines the fields that are to be configured
103
* between the executing processor and a remote processor.
106
u16 remote_proc_id; /* the remote processor id */
107
bool setup_notify; /* whether to setup Notify */
108
bool setup_messageq; /* whether to setup messageq */
109
bool setup_ipu_pm; /* whether to setup ipu_pm */
112
/* Ipc instance structure. */
113
struct ipc_proc_entry {
114
void *local_config_list;
115
void *remote_config_list;
118
struct ipc_entry entry;
123
/* Module state structure */
124
struct ipc_module_state {
126
atomic_t start_ref_count;
127
void *ipc_shared_addr;
128
void *gatemp_shared_addr;
129
void *ipu_pm_shared_addr;
130
enum ipc_proc_sync proc_sync;
131
struct ipc_config cfg;
132
struct ipc_proc_entry proc_entry[MULTIPROC_MAXPROCESSORS];
136
/* =============================================================================
137
* Forward declaration
138
* =============================================================================
141
* ======== ipc_get_master_addr() ========
143
static void *ipc_get_master_addr(u16 remote_proc_id, void *shared_addr);
146
* ======== ipc_get_region0_reserved_size ========
147
* Returns the amount of memory to be reserved for Ipc in SharedRegion 0.
149
* This is used for synchronizing processors.
151
static u32 ipc_get_region0_reserved_size(void);
154
* ======== ipc_get_slave_addr() ========
156
static void *ipc_get_slave_addr(u16 remote_proc_id, void *shared_addr);
159
* ======== ipc_proc_sync_start ========
160
* Starts the process of synchronizing processors.
162
* Shared memory in region 0 will be used. The processor which owns
163
* SharedRegion 0 writes its reserve memory address in region 0
164
* to let the other processors know it has started. It then spins
165
* until the other processors start. The other processors write their
166
* reserve memory address in region 0 to let the owner processor
167
* know they've started. The other processors then spin until the
168
* owner processor writes to let them know its finished the process
169
* of synchronization before continuing.
171
static int ipc_proc_sync_start(u16 remote_proc_id, void *shared_addr);
174
* ======== ipc_proc_sync_finish ========
175
* Finishes the process of synchronizing processors.
177
* Each processor writes its reserve memory address in SharedRegion 0
178
* to let the other processors know its finished the process of
181
static int ipc_proc_sync_finish(u16 remote_proc_id, void *shared_addr);
184
* ======== ipc_reserved_size_per_proc ========
185
* The amount of memory required to be reserved per processor.
187
static u32 ipc_reserved_size_per_proc(void);
189
/* TODO: figure these out */
190
#define gate_enter_system() 0
191
#define gate_leave_system(key) {}
193
/* =============================================================================
195
* =============================================================================
197
static struct ipc_module_state ipc_module_state = {
198
.proc_sync = IPC_PROCSYNC_ALL,
201
static struct ipc_module_state *ipc_module = &ipc_module_state;
203
/* =============================================================================
205
* =============================================================================
208
* ========== ipc_attach ==========
209
* attaches to a remote processor
211
int ipc_attach(u16 remote_proc_id)
215
u32 reserved_size = ipc_reserved_size_per_proc();
216
bool cache_enabled = sharedregion_is_cache_enabled(0);
218
void *notify_shared_addr;
219
void *msgq_shared_addr;
220
void *nsrn_shared_addr;
222
VOLATILE struct ipc_reserved *slave;
223
struct ipc_proc_entry *ipc;
225
if (remote_proc_id >= MULTIPROC_MAXPROCESSORS) {
226
pr_err("Invalid remote_proc_id passed\n");
230
/* determine if self is master or slave */
231
if (multiproc_self() < remote_proc_id)
232
ipc_module->proc_entry[remote_proc_id].slave = true;
234
ipc_module->proc_entry[remote_proc_id].slave = false;
236
/* determine the slave's slot */
237
slave = ipc_get_slave_addr(remote_proc_id, ipc_module->ipc_shared_addr);
240
Cache_inv((void *)slave, reserved_size, Cache_Type_ALL, true);
242
/* get the attach paramters associated with remote_proc_id */
243
ipc = &(ipc_module->proc_entry[remote_proc_id]);
245
/* Synchronize the processors. */
246
status = ipc_proc_sync_start(remote_proc_id, ipc_module->
250
pr_err("ipc_attach : ipc_proc_sync_start "
251
"failed [0x%x]\n", status);
253
pr_err("ipc_proc_sync_start : status [0x%x]\n", status);
257
/* must be called before SharedRegion_attach */
258
status = gatemp_attach(remote_proc_id, ipc_module->
261
pr_err("ipc_attach : gatemp_attach "
262
"failed [0x%x]\n", status);
264
pr_err("gatemp_attach : status [0x%x]\n", status);
268
/* retrieves the SharedRegion Heap handles */
270
status = sharedregion_attach(remote_proc_id);
272
pr_err("ipc_attach : sharedregion_attach "
273
"failed [0x%x]\n", status);
275
pr_err("sharedregion_attach : status "
279
/* attach Notify if not yet attached and specified to set internal
281
if (status >= 0 && !notify_is_registered(remote_proc_id, 0) &&
282
(ipc->entry.setup_notify)) {
283
/* call notify_attach */
284
if (ipc_module->proc_entry[remote_proc_id].slave) {
285
notify_mem_req = notify_shared_mem_req(remote_proc_id,
286
ipc_module->ipc_shared_addr);
287
notify_shared_addr = sl_heap_alloc(
288
sharedregion_get_heap(0),
290
sharedregion_get_cache_line_size(0));
291
memset(notify_shared_addr, 0, notify_mem_req);
292
slave->notify_sr_ptr = sharedregion_get_srptr(
293
notify_shared_addr, 0);
294
if (slave->notify_sr_ptr ==
295
SHAREDREGION_INVALIDSRPTR) {
297
pr_err("ipc_attach : "
298
"sharedregion_get_srptr "
299
"failed [0x%x]\n", status);
302
"sharedregion_get_srptr : "
303
"status [0x%x]\n", status);
306
notify_shared_addr = sharedregion_get_ptr(slave->
308
if (notify_shared_addr == NULL) {
310
pr_err("ipc_attach : "
311
"sharedregion_get_ptr "
312
"failed [0x%x]\n", status);
315
"sharedregion_get_ptr : "
316
"status [0x%x]\n", status);
321
status = notify_attach(remote_proc_id,
324
pr_err("ipc_attach : "
326
"failed [0x%x]\n", status);
330
"status [0x%x]\n", status);
334
/* Must come before Notify because depends on default Notify */
335
if (status >= 0 && ipc->entry.setup_notify && ipc->entry.setup_ipu_pm) {
337
status = ipu_pm_attach(remote_proc_id, ipc_module->
340
pr_err("ipc_attach : "
342
"failed [0x%x]\n", status);
346
"status [0x%x]\n", status);
350
/* Must come after gatemp_start because depends on default GateMP */
351
if (status >= 0 && ipc->entry.setup_notify) {
352
if (ipc_module->proc_entry[remote_proc_id].slave) {
353
nsrn_shared_addr = sl_heap_alloc(
354
sharedregion_get_heap(0),
355
nameserver_remotenotify_shared_mem_req(
357
sharedregion_get_cache_line_size(0));
360
sharedregion_get_srptr(nsrn_shared_addr, 0);
361
if (slave->nsrn_sr_ptr == SHAREDREGION_INVALIDSRPTR) {
363
pr_err("ipc_attach : "
364
"sharedregion_get_srptr "
365
"failed [0x%x]\n", status);
368
"sharedregion_get_srptr : "
369
"status [0x%x]\n", status);
373
sharedregion_get_ptr(slave->nsrn_sr_ptr);
374
if (nsrn_shared_addr == NULL) {
376
pr_err("ipc_attach : "
377
"sharedregion_get_ptr "
378
"failed [0x%x]\n", status);
381
"sharedregion_get_ptr : "
382
"status [0x%x]\n", status);
387
/* create the nameserver_remotenotify instances */
388
status = nameserver_remotenotify_attach(remote_proc_id,
392
pr_err("ipc_attach : "
393
"nameserver_remotenotify_attach "
394
"failed [0x%x]\n", status);
397
"nameserver_remotenotify_attach : "
398
"status [0x%x]\n", status);
402
/* Must come after gatemp_start because depends on default GateMP */
403
if (status >= 0 && ipc->entry.setup_messageq) {
404
if (ipc_module->proc_entry[remote_proc_id].slave) {
405
msgq_shared_addr = sl_heap_alloc
406
(sharedregion_get_heap(0),
407
messageq_shared_mem_req(ipc_module->
409
sharedregion_get_cache_line_size(0));
411
slave->transport_sr_ptr =
412
sharedregion_get_srptr(msgq_shared_addr, 0);
413
if (slave->transport_sr_ptr ==
414
SHAREDREGION_INVALIDSRPTR) {
416
pr_err("ipc_attach : "
417
"sharedregion_get_srptr "
418
"failed [0x%x]\n", status);
421
"sharedregion_get_srptr : "
422
"status [0x%x]\n", status);
425
msgq_shared_addr = sharedregion_get_ptr(slave->
427
if (msgq_shared_addr == NULL) {
429
pr_err("ipc_attach : "
430
"sharedregion_get_ptr "
431
"failed [0x%x]\n", status);
434
"sharedregion_get_ptr : "
435
"status [0x%x]\n", status);
440
/* create the messageq Transport instances */
441
status = messageq_attach(remote_proc_id,
444
pr_err("ipc_attach : "
446
"failed [0x%x]\n", status);
450
"status [0x%x]\n", status);
455
if (ipc_module->proc_entry[remote_proc_id].slave)
456
Cache_wbInv((void *)slave, reserved_size,
457
Cache_Type_ALL, true);
462
/* Finish the processor synchronization */
463
status = ipc_proc_sync_finish(remote_proc_id,
464
ipc_module->ipc_shared_addr);
466
pr_err("ipc_attach : "
467
"ipc_proc_sync_finish "
468
"failed [0x%x]\n", status);
471
"ipc_proc_sync_finish : "
472
"status [0x%x]\n", status);
476
ipc->is_attached = true;
478
pr_err("ipc_attach failed! status = 0x%x\n", status);
485
* ============= ipc_detach ==============
486
* detaches from a remote processor
488
int ipc_detach(u16 remote_proc_id)
492
u32 reserved_size = ipc_reserved_size_per_proc();
494
bool cache_enabled = sharedregion_is_cache_enabled(0);
495
void *notify_shared_addr;
496
void *nsrn_shared_addr;
497
void *msgq_shared_addr;
498
VOLATILE struct ipc_reserved *slave;
499
VOLATILE struct ipc_reserved *master;
500
struct ipc_proc_entry *ipc;
501
u32 nsrn_mem_req = nameserver_remotenotify_shared_mem_req(NULL);
502
/* prefetching into local variable because of
503
later space restrictions */
505
/* get the paramters associated with remote_proc_id */
506
ipc = &(ipc_module->proc_entry[remote_proc_id]);
508
if (ipc->is_attached == false) {
509
status = IPC_E_INVALIDSTATE;
513
/* determine the slave's slot */
514
slave = ipc_get_slave_addr(remote_proc_id, ipc_module->
519
if (unlikely(cache_enabled))
520
Cache_inv((void *) slave, reserved_size,
521
Cache_Type_ALL, true);
523
if (ipc->entry.setup_messageq) {
524
/* call messageq_detach for remote processor */
525
status = messageq_detach(remote_proc_id);
527
pr_err("ipc_detach : "
529
"failed [0x%x]\n", status);
533
"status [0x%x]\n", status);
535
/* free the memory if slave processor */
536
if (ipc_module->proc_entry[remote_proc_id].slave) {
537
/* get the pointer to messageq transport
539
msgq_shared_addr = sharedregion_get_ptr(
540
slave->transport_sr_ptr);
542
if (msgq_shared_addr != NULL) {
543
/* free the memory back to sharedregion
545
sl_heap_free(sharedregion_get_heap(0),
547
messageq_shared_mem_req(
551
/* set the pointer for messageq transport
552
instance back to invalid */
553
slave->transport_sr_ptr =
554
SHAREDREGION_INVALIDSRPTR;
558
if (ipc->entry.setup_notify) {
559
/* call nameserver_remotenotify_detach for
561
status = nameserver_remotenotify_detach(
564
pr_err("ipc_detach : "
565
"nameserver_remotenotify_detach "
566
"failed [0x%x]\n", status);
569
"nameserver_remotenotify_detach : "
570
"status [0x%x]\n", status);
572
/* free the memory if slave processor */
573
if (ipc_module->proc_entry[remote_proc_id].slave) {
574
/* get the pointer to NSRN instance */
575
nsrn_shared_addr = sharedregion_get_ptr(
578
if (nsrn_shared_addr != NULL)
579
/* free the memory back to
580
SharedRegion 0 heap */
581
sl_heap_free(sharedregion_get_heap(0),
585
/* set the pointer for NSRN instance back
588
SHAREDREGION_INVALIDSRPTR;
592
if (ipc->entry.setup_notify && ipc->entry.setup_ipu_pm) {
593
/* call ipu_pm_detach for remote processor */
594
status = ipu_pm_detach(remote_proc_id);
596
pr_err("ipc_detach : "
598
"failed [0x%x]\n", status);
602
"status [0x%x]\n", status);
605
if (ipc->entry.setup_notify) {
606
/* call notify_detach for remote processor */
607
status = notify_detach(remote_proc_id);
609
pr_err("ipc_detach : "
611
"failed [0x%x]\n", status);
615
"status [0x%x]\n", status);
617
/* free the memory if slave processor */
618
if (ipc_module->proc_entry[remote_proc_id].slave) {
619
/* get the pointer to Notify instance */
620
notify_shared_addr = sharedregion_get_ptr(
621
slave->notify_sr_ptr);
623
if (notify_shared_addr != NULL) {
624
/* free the memory back to
625
SharedRegion 0 heap */
626
sl_heap_free(sharedregion_get_heap(0),
628
notify_shared_mem_req(
630
notify_shared_addr));
633
/* set the pointer for Notify instance
635
slave->notify_sr_ptr =
636
SHAREDREGION_INVALIDSRPTR;
640
if (unlikely(cache_enabled)) {
641
if (ipc_module->proc_entry[remote_proc_id].slave) {
642
slave->started_key = 0;
643
slave->config_list_head =
644
SHAREDREGION_INVALIDSRPTR;
646
Cache_wbInv((void *)slave, reserved_size,
647
Cache_Type_ALL, true);
650
/* determine the master's slot */
651
master = ipc_get_master_addr(remote_proc_id,
652
ipc_module->ipc_shared_addr);
654
if (master != NULL) {
655
master->started_key = 0;
656
master->config_list_head =
657
SHAREDREGION_INVALIDSRPTR;
659
Cache_wbInv((void *) master,
668
/* Now detach the SharedRegion */
669
status = sharedregion_detach(remote_proc_id);
672
/* Now detach the GateMP */
673
status = gatemp_detach(remote_proc_id, ipc_module->
677
ipc->is_attached = false;
681
pr_err("ipc_detach failed with status [0x%x]\n", status);
687
* ========= ipc_control ==========
688
* Function to destroy an ipc instance for a slave
691
ipc_control(u16 proc_id, u32 cmd_id, void *arg)
693
int status = IPC_S_SUCCESS;
696
case IPC_CONTROLCMD_LOADCALLBACK:
698
#if defined CONFIG_SYSLINK_USE_SYSMGR
699
status = platform_load_callback(proc_id, arg);
701
pr_err("ipc_control : platform_load_callback "
702
"failed [0x%x]\n", status);
707
case IPC_CONTROLCMD_STARTCALLBACK:
709
#if defined CONFIG_SYSLINK_USE_SYSMGR
710
status = platform_start_callback(proc_id, arg);
712
pr_err("ipc_control : platform_start_callback"
713
" failed [0x%x]\n", status);
718
case IPC_CONTROLCMD_STOPCALLBACK:
720
#if defined CONFIG_SYSLINK_USE_SYSMGR
721
status = platform_stop_callback(proc_id, arg);
723
pr_err("ipc_control : platform_stop_callback"
724
" failed [0x%x]\n", status);
732
pr_err("ipc_control : invalid "
733
" command code [0x%x]\n", cmd_id);
743
* ======== ipc_get_master_addr ========
745
static void *ipc_get_master_addr(u16 remote_proc_id, void *shared_addr)
747
u32 reserved_size = ipc_reserved_size_per_proc();
750
VOLATILE struct ipc_reserved *master;
752
/* determine the master's proc_id and slot */
753
if (multiproc_self() < remote_proc_id) {
754
master_id = remote_proc_id;
755
slot = multiproc_self();
757
master_id = multiproc_self();
758
slot = remote_proc_id;
761
/* determine the reserve address for master between self and remote */
762
master = (struct ipc_reserved *)((u32)shared_addr +
763
((master_id * reserved_size) +
764
(slot * sizeof(struct ipc_reserved))));
766
return (void *)master;
770
* ======== ipc_get_region0_reserved_size ========
772
static u32 ipc_get_region0_reserved_size(void)
774
u32 reserved_size = ipc_reserved_size_per_proc();
776
/* Calculate the total amount to reserve */
777
reserved_size = reserved_size * multiproc_get_num_processors();
779
return reserved_size;
783
* ======== Ipc_getSlaveAddr ========
785
static void *ipc_get_slave_addr(u16 remote_proc_id, void *shared_addr)
787
u32 reserved_size = ipc_reserved_size_per_proc();
790
VOLATILE struct ipc_reserved *slave;
792
/* determine the slave's proc_id and slot */
793
if (multiproc_self() < remote_proc_id) {
794
slave_id = multiproc_self();
795
slot = remote_proc_id - 1;
797
slave_id = remote_proc_id;
798
slot = multiproc_self() - 1;
801
/* determine the reserve address for slave between self and remote */
802
slave = (struct ipc_reserved *)((u32)shared_addr +
803
((slave_id * reserved_size) +
804
(slot * sizeof(struct ipc_reserved))));
806
return (void *)slave;
810
* ======== Ipc_proc_syncStart ========
811
* The owner of SharedRegion 0 writes to its reserve memory address
812
* in region 0 to let the other processors know it has started.
813
* It then spins until the other processors start.
814
* The other processors write their reserve memory address in
815
* region 0 to let the owner processor know they've started.
816
* The other processors then spin until the owner processor writes
817
* to let them know that its finished the process of synchronization
820
static int ipc_proc_sync_start(u16 remote_proc_id, void *shared_addr)
823
u32 reserved_size = ipc_reserved_size_per_proc();
824
bool cache_enabled = sharedregion_is_cache_enabled(0);
827
VOLATILE struct ipc_reserved *self;
828
VOLATILE struct ipc_reserved *remote;
829
struct ipc_proc_entry *ipc;
831
/* don't do any synchronization if proc_sync is NONE */
832
if (ipc_module->proc_sync != IPC_PROCSYNC_NONE) {
833
/* determine self and remote pointers */
834
if (ipc_module->proc_entry[remote_proc_id].slave) {
835
self = ipc_get_slave_addr(remote_proc_id, shared_addr);
836
remote = ipc_get_master_addr(remote_proc_id,
839
self = ipc_get_master_addr(remote_proc_id, shared_addr);
840
remote = ipc_get_slave_addr(remote_proc_id,
844
/* construct the config list */
845
ipc = &(ipc_module->proc_entry[remote_proc_id]);
847
ipc->local_config_list = (void *)&self->config_list_head;
848
ipc->remote_config_list = (void *)&remote->config_list_head;
850
((struct ipc_config_head *)ipc->local_config_list)->first =
851
(u32)SHAREDREGION_INVALIDSRPTR;
854
Cache_wbInv(ipc->local_config_list,
861
if (ipc_module->proc_entry[remote_proc_id].slave) {
862
/* set my processor's reserved key to start */
863
self->started_key = IPC_PROCSYNCSTART;
865
/* write back my processor's reserve key */
867
Cache_wbInv((void *)self, reserved_size,
868
Cache_Type_ALL, true);
870
/* wait for remote processor to start */
872
Cache_inv((void *)remote, reserved_size,
873
Cache_Type_ALL, true);
875
if (remote->started_key != IPC_PROCSYNCSTART)
881
/* wait for remote processor to start */
882
Cache_inv((void *)remote, reserved_size, Cache_Type_ALL, true);
884
if ((self->started_key != IPC_PROCSYNCSTART) &&
885
(remote->started_key != IPC_PROCSYNCSTART)) {
891
/* set my processor's reserved key to start */
892
self->started_key = IPC_PROCSYNCSTART;
894
/* write my processor's reserve key back */
896
Cache_wbInv((void *)self, reserved_size,
897
Cache_Type_ALL, true);
899
/* wait for remote processor to finish */
900
Cache_inv((void *)remote, reserved_size,
901
Cache_Type_ALL, true);
903
if (remote->started_key != IPC_PROCSYNCFINISH) {
911
pr_err("ipc_proc_sync_start failed: status [0x%x]\n", status);
913
pr_err("ipc_proc_sync_start done\n");
919
* ======== Ipc_proc_syncFinish ========
920
* Each processor writes its reserve memory address in SharedRegion 0
921
* to let the other processors know its finished the process of
924
static int ipc_proc_sync_finish(u16 remote_proc_id, void *shared_addr)
927
u32 reserved_size = ipc_reserved_size_per_proc();
928
bool cache_enabled = sharedregion_is_cache_enabled(0);
930
VOLATILE struct ipc_reserved *self;
931
VOLATILE struct ipc_reserved *remote;
933
/* don't do any synchronization if proc_sync is NONE */
934
if (ipc_module->proc_sync != IPC_PROCSYNC_NONE) {
935
/* determine self pointer */
936
if (ipc_module->proc_entry[remote_proc_id].slave) {
937
self = ipc_get_slave_addr(remote_proc_id, shared_addr);
938
remote = ipc_get_master_addr(remote_proc_id,
941
self = ipc_get_master_addr(remote_proc_id,
943
remote = ipc_get_slave_addr(remote_proc_id,
946
/* set my processor's reserved key to finish */
947
self->started_key = IPC_PROCSYNCFINISH;
949
/* write back my processor's reserve key */
951
Cache_wbInv((void *)self, reserved_size,
952
Cache_Type_ALL, true);
954
/* if slave processor, wait for remote to finish sync */
955
if (ipc_module->proc_entry[remote_proc_id].slave) {
956
/* wait for remote processor to finish */
960
Cache_inv((Ptr)remote, reservedSize,
961
Cache_Type_ALL, TRUE);
964
} while (remote->started_key != IPC_PROCSYNCFINISH);
968
return IPC_S_SUCCESS;
972
* ======== ipc_read_config ========
974
int ipc_read_config(u16 remote_proc_id, u32 tag, void *cfg, u32 size)
977
bool cache_enabled = sharedregion_is_cache_enabled(0);
979
int status = IPC_E_FAIL;
980
VOLATILE struct ipc_config_entry *entry;
982
if (ipc_module->ref_count == 0) {
987
if (ipc_module->proc_entry[remote_proc_id].is_attached == false) {
995
Cache_inv(ipc_module->proc_entry[remote_proc_id].
997
sharedregion_get_cache_line_size(0),
1002
entry = (struct ipc_config_entry *)((struct ipc_config_head *)
1003
ipc_module->proc_entry[remote_proc_id].remote_config_list)->
1006
while ((u32 *)entry != SHAREDREGION_INVALIDSRPTR) {
1007
entry = (struct ipc_config_entry *)
1008
sharedregion_get_ptr((u32 *)entry);
1009
if (entry == NULL) {
1010
status = IPC_E_FAIL;
1014
/* Traverse the list to find the tag */
1015
if (cache_enabled) {
1016
Cache_inv((void *)entry,
1017
size + sizeof(struct ipc_config_entry),
1023
if ((entry->remote_proc_id == multiproc_self()) &&
1024
(entry->local_proc_id == remote_proc_id) &&
1025
(entry->tag == tag)) {
1027
if (size == entry->size)
1029
(void *)((u32)entry + sizeof(struct
1033
status = IPC_E_FAIL;
1035
entry = (struct ipc_config_entry *)entry->next;
1042
pr_err("ipc_read_config failed: status [0x%x]\n", status);
1048
* ======== ipc_reserved_size_per_proc ========
1050
static u32 ipc_reserved_size_per_proc(void)
1052
u32 reserved_size = sizeof(struct ipc_reserved) *
1053
multiproc_get_num_processors();
1054
u32 cache_line_size = sharedregion_get_cache_line_size(0);
1056
/* Calculate amount to reserve per processor */
1057
if (cache_line_size > reserved_size)
1058
/* Use cache_line_size if larger than reserved_size */
1059
reserved_size = cache_line_size;
1061
/* Round reserved_size to cache_line_size */
1062
reserved_size = ROUND_UP(reserved_size, cache_line_size);
1064
return reserved_size;
1068
* ======== ipc_write_config ========
1070
int ipc_write_config(u16 remote_proc_id, u32 tag, void *cfg, u32 size)
1073
bool cache_enabled = sharedregion_is_cache_enabled(0);
1075
u32 cache_line_size = sharedregion_get_cache_line_size(0);
1076
int status = IPC_S_SUCCESS;
1077
struct ipc_config_entry *entry;
1079
if (ipc_module->ref_count == 0) {
1084
if (ipc_module->proc_entry[remote_proc_id].is_attached == false) {
1089
/* Allocate memory from the shared heap (System Heap) */
1090
entry = sl_heap_alloc(sharedregion_get_heap(0),
1091
size + sizeof(struct ipc_config_entry),
1094
if (entry == NULL) {
1095
status = IPC_E_FAIL;
1099
entry->remote_proc_id = remote_proc_id;
1100
entry->local_proc_id = multiproc_self();
1103
memcpy((void *)((u32)entry + sizeof(struct ipc_config_entry)),
1106
/* Create a linked-list of config */
1107
if (((struct ipc_config_head *)ipc_module->
1108
proc_entry[remote_proc_id].local_config_list)->first
1109
== (u32)SHAREDREGION_INVALIDSRPTR) {
1111
entry->next = (u32)SHAREDREGION_INVALIDSRPTR;
1112
((struct ipc_config_head *)ipc_module->
1113
proc_entry[remote_proc_id].local_config_list)->first =
1114
(u32)sharedregion_get_srptr(entry, 0);
1116
if (((struct ipc_config_head *)ipc_module->
1117
proc_entry[remote_proc_id].local_config_list)->first
1118
== (u32)SHAREDREGION_INVALIDSRPTR)
1119
status = IPC_E_FAIL;
1121
entry->next = ((struct ipc_config_head *)ipc_module->
1122
proc_entry[remote_proc_id].local_config_list)->first;
1124
((struct ipc_config_head *)ipc_module->
1125
proc_entry[remote_proc_id].local_config_list)->first =
1126
(u32)sharedregion_get_srptr(entry, 0);
1127
if (((struct ipc_config_head *)ipc_module->
1128
proc_entry[remote_proc_id].local_config_list)->first
1129
== (u32)SHAREDREGION_INVALIDSRPTR)
1130
status = IPC_E_FAIL;
1133
if (cache_enabled) {
1134
Cache_wbInv(ipc_module->proc_entry[remote_proc_id].
1136
sharedregion_get_cache_line_size(0),
1141
size + sizeof(struct ipc_config_entry),
1149
pr_err("ipc_write_config failed: status [0x%x]\n", status);
1156
* ======== ipc_start ========
1162
struct sharedregion_entry entry;
1163
void *ipc_shared_addr;
1164
void *gatemp_shared_addr;
1165
void *ipu_pm_shared_addr;
1166
struct gatemp_params gatemp_params;
1167
bool line_available;
1169
/* This sets the ref_count variable is not initialized, upper 16 bits
1170
* is written with module Id to ensure correctness of ref_count
1173
atomic_cmpmask_and_set(&(ipc_module->start_ref_count),
1174
IPC_MAKE_MAGICSTAMP(0),
1175
IPC_MAKE_MAGICSTAMP(0));
1176
if (atomic_inc_return(&(ipc_module->start_ref_count))
1177
!= IPC_MAKE_MAGICSTAMP(1u)) {
1178
status = IPC_S_SUCCESS;
1182
/* get region 0 information */
1183
sharedregion_get_entry(0, &entry);
1185
/* if entry is not valid then return */
1186
if (entry.is_valid == false) {
1187
status = IPC_E_FAIL;
1191
* Need to reserve memory in region 0 for processor synchronization.
1192
* This must done before SharedRegion_start().
1194
ipc_shared_addr = sharedregion_reserve_memory(0,
1195
ipc_get_region0_reserved_size());
1197
/* must reserve memory for gatemp before sharedregion_start() */
1198
gatemp_shared_addr = sharedregion_reserve_memory(0,
1199
gatemp_get_region0_reserved_size());
1201
/* Init params for default gate(must match those in gatemp_start() */
1202
gatemp_params_init(&gatemp_params);
1203
gatemp_params.local_protect = GATEMP_LOCALPROTECT_TASKLET;
1205
if (multiproc_get_num_processors() > 1)
1206
gatemp_params.remote_protect = GATEMP_REMOTEPROTECT_SYSTEM;
1208
gatemp_params.remote_protect = GATEMP_REMOTEPROTECT_NONE;
1210
/* reserve memory for default gate before SharedRegion_start() */
1211
sharedregion_reserve_memory(0, gatemp_shared_mem_req(&gatemp_params));
1213
/* reserve memory for PM region */
1214
ipu_pm_shared_addr = sharedregion_reserve_memory(0,
1215
ipu_pm_mem_req(NULL));
1217
/* clear the reserved memory */
1218
sharedregion_clear_reserved_memory();
1220
/* Set shared addresses */
1221
ipc_module->ipc_shared_addr = ipc_shared_addr;
1222
ipc_module->gatemp_shared_addr = gatemp_shared_addr;
1223
ipc_module->ipu_pm_shared_addr = ipu_pm_shared_addr;
1225
/* create default GateMP, must be called before sharedregion_start */
1226
status = gatemp_start(ipc_module->gatemp_shared_addr);
1228
status = IPC_E_FAIL;
1232
/* create HeapMemMP in each SharedRegion */
1233
status = sharedregion_start();
1235
status = IPC_E_FAIL;
1239
/* Call attach for all procs if proc_sync is ALL */
1240
if (ipc_module->proc_sync == IPC_PROCSYNC_ALL) {
1241
/* Must attach to owner first to get default GateMP and
1243
if (multiproc_self() != entry.owner_proc_id) {
1245
status = ipc_attach(entry.owner_proc_id);
1246
} while (status < 0);
1249
/* Loop to attach to all other processors */
1250
for (i = 0; i < multiproc_get_num_processors(); i++) {
1251
if ((i == multiproc_self())
1252
|| (i == entry.owner_proc_id))
1255
notify_setup_proxy_int_line_available(i);
1256
if (!line_available)
1258
/* call Ipc_attach for every remote processor */
1260
status = ipc_attach(i);
1261
} while (status < 0);
1267
pr_err("ipc_start failed: status [0x%x]\n", status);
1274
* ======== ipc_stop ========
1278
int status = IPC_S_SUCCESS;
1279
int tmp_status = IPC_S_SUCCESS;
1280
struct sharedregion_entry entry;
1281
struct gatemp_params gatemp_params;
1283
if (unlikely(atomic_cmpmask_and_lt(&(ipc_module->start_ref_count),
1284
IPC_MAKE_MAGICSTAMP(0),
1285
IPC_MAKE_MAGICSTAMP(1)) == true)) {
1286
status = IPC_E_FAIL;
1290
if (likely(atomic_dec_return(&ipc_module->start_ref_count)
1291
== IPC_MAKE_MAGICSTAMP(0))) {
1292
/* get region 0 information */
1293
sharedregion_get_entry(0, &entry);
1295
/* if entry is not valid then return */
1296
if (entry.is_valid == false) {
1297
status = IPC_E_FAIL;
1303
* Need to unreserve memory in region 0 for processor
1304
* synchronization. This must done before sharedregion_stop().
1306
sharedregion_unreserve_memory(0,
1307
ipc_get_region0_reserved_size());
1309
/* must unreserve memory for GateMP before
1310
sharedregion_stop() */
1311
sharedregion_unreserve_memory(0,
1312
gatemp_get_region0_reserved_size());
1314
/* Init params for default gate (must match those
1316
gatemp_params_init(&gatemp_params);
1317
gatemp_params.local_protect = GATEMP_LOCALPROTECT_TASKLET;
1319
if (multiproc_get_num_processors() > 1)
1320
gatemp_params.remote_protect =
1321
GATEMP_REMOTEPROTECT_SYSTEM;
1323
gatemp_params.remote_protect =
1324
GATEMP_REMOTEPROTECT_NONE;
1326
/* unreserve memory for default gate before
1327
sharedregion_stop() */
1328
sharedregion_unreserve_memory(0,
1329
gatemp_shared_mem_req(&gatemp_params));
1331
/* must unreserve memory for PM before sharedregion_stop() */
1332
sharedregion_unreserve_memory(0, ipu_pm_mem_req(NULL));
1334
/* Delete heapmemmp in each sharedregion */
1335
status = sharedregion_stop();
1337
status = IPC_E_FAIL;
1341
/* delete default gatemp, must be called after
1344
tmp_status = gatemp_stop();
1345
if ((tmp_status < 0) && (status >= 0)) {
1346
status = IPC_E_FAIL;
1350
ipc_module->ipu_pm_shared_addr = NULL;
1351
ipc_module->gatemp_shared_addr = NULL;
1352
ipc_module->ipc_shared_addr = NULL;
1356
pr_err("ipc_stop failed: status [0x%x]\n", status);
1363
* ======== ipc_get_config ========
1365
void ipc_get_config(struct ipc_config *cfg_params)
1370
BUG_ON(cfg_params == NULL);
1372
if (cfg_params == NULL) {
1378
key = gate_enter_system();
1379
if (ipc_module->ref_count == 0)
1380
cfg_params->proc_sync = IPC_PROCSYNC_ALL;
1382
memcpy((void *) cfg_params, (void *) &ipc_module->cfg,
1383
sizeof(struct ipc_config));
1385
gate_leave_system(key);
1389
pr_err("ipc_get_config failed: status [0x%x]\n", status);
1393
/* Sets up ipc for this processor. */
1394
int ipc_setup(const struct ipc_config *cfg)
1396
int status = IPC_S_SUCCESS;
1397
struct ipc_config tmp_cfg;
1401
key = gate_enter_system();
1402
ipc_module->ref_count++;
1404
/* This sets the ref_count variable is not initialized, upper 16 bits is
1405
* written with module Id to ensure correctness of ref_count variable.
1407
if (ipc_module->ref_count > 1) {
1408
status = IPC_S_ALREADYSETUP;
1409
gate_leave_system(key);
1413
gate_leave_system(key);
1415
ipc_get_config(&tmp_cfg);
1420
memcpy(&ipc_module->cfg, cfg, sizeof(struct ipc_config));
1422
ipc_module->proc_sync = cfg->proc_sync;
1424
status = platform_setup();
1426
key = gate_enter_system();
1427
ipc_module->ref_count--;
1428
gate_leave_system(key);
1429
status = IPC_E_FAIL;
1433
/* Following can be done regardless of status */
1434
for (i = 0; i < multiproc_get_num_processors(); i++)
1435
ipc_module->proc_entry[i].is_attached = false;
1439
pr_err("ipc_setup failed: status [0x%x]\n", status);
1446
* =========== ipc_destroy ==========
1447
* Destroys ipc for this processor.
1449
int ipc_destroy(void)
1451
int status = IPC_S_SUCCESS;
1454
key = gate_enter_system();
1455
ipc_module->ref_count--;
1457
if (ipc_module->ref_count < 0) {
1458
gate_leave_system(key);
1459
status = IPC_E_INVALIDSTATE;
1463
if (ipc_module->ref_count == 0) {
1464
gate_leave_system(key);
1465
if (unlikely(atomic_cmpmask_and_lt(
1466
&(ipc_module->start_ref_count),
1467
IPC_MAKE_MAGICSTAMP(0),
1468
IPC_MAKE_MAGICSTAMP(1)) == false)) {
1470
* ipc_start was called, but ipc_stop never happened.
1471
* Need to call ipc_stop here.
1473
/* Set the count to 1 so only need to call stop once. */
1474
atomic_set(&ipc_module->start_ref_count,
1475
IPC_MAKE_MAGICSTAMP(1));
1478
status = platform_destroy();
1480
status = IPC_E_FAIL;
1484
gate_leave_system(key);
1488
pr_err("ipc_destroy failed: status [0x%x]\n", status);
1495
* ====== ipc_create =======
1498
int ipc_create(u16 remote_proc_id, struct ipc_params *params)
1500
ipc_module->proc_entry[remote_proc_id].entry.setup_messageq =
1501
params->setup_messageq;
1502
ipc_module->proc_entry[remote_proc_id].entry.setup_notify =
1503
params->setup_notify;
1504
ipc_module->proc_entry[remote_proc_id].entry.setup_ipu_pm =
1505
params->setup_ipu_pm;
1506
ipc_module->proc_entry[remote_proc_id].entry.remote_proc_id =
1509
/* Assert that the proc_sync is same as configured for the module. */
1510
BUG_ON(ipc_module->proc_sync != params->proc_sync);
1512
return IPC_S_SUCCESS;