356
357
/* The actual cache flush - Implemented for each OS*/
357
_ump_osk_msync( mem, virtual, offset, size, args->op);
358
_ump_osk_msync( mem, virtual, offset, size, args->op, NULL);
359
360
msync_release_and_return:
360
361
ump_dd_reference_release(mem);
365
void _ump_ukk_cache_operations_control(_ump_uk_cache_operations_control_s* args)
367
ump_session_data * session_data;
368
ump_uk_cache_op_control op;
370
DEBUG_ASSERT_POINTER( args );
371
DEBUG_ASSERT_POINTER( args->ctx );
374
session_data = (ump_session_data *)args->ctx;
376
_mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
377
if ( op== _UMP_UK_CACHE_OP_START )
379
session_data->cache_operations_ongoing++;
380
DBG_MSG(4, ("Cache ops start\n" ));
381
if ( session_data->cache_operations_ongoing != 1 )
383
DBG_MSG(2, ("UMP: Number of simultanious cache control ops: %d\n", session_data->cache_operations_ongoing) );
386
else if ( op== _UMP_UK_CACHE_OP_FINISH )
388
DBG_MSG(4, ("Cache ops finish\n"));
389
session_data->cache_operations_ongoing--;
391
if ( session_data->has_pending_level1_cache_flush)
393
/* This function will set has_pending_level1_cache_flush=0 */
394
_ump_osk_msync( NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data);
398
/* to be on the safe side: always flush l1 cache when cache operations are done */
399
_ump_osk_msync( NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data);
400
DBG_MSG(4, ("Cache ops finish end\n" ));
404
DBG_MSG(1, ("Illegal call to %s at line %d\n", __FUNCTION__, __LINE__));
406
_mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
410
void _ump_ukk_switch_hw_usage(_ump_uk_switch_hw_usage_s *args )
412
ump_dd_mem * mem = NULL;
413
ump_uk_user old_user;
414
ump_uk_msync_op cache_op = _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE;
415
ump_session_data *session_data;
417
DEBUG_ASSERT_POINTER( args );
418
DEBUG_ASSERT_POINTER( args->ctx );
420
session_data = (ump_session_data *)args->ctx;
422
_mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
423
ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
427
_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
428
DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_switch_hw_usage(). ID: %u\n", (ump_secure_id)args->secure_id));
432
old_user = mem->hw_device;
433
mem->hw_device = args->new_user;
435
DBG_MSG(3, ("UMP[%02u] Switch usage Start New: %s Prev: %s.\n", (ump_secure_id)args->secure_id, args->new_user?"MALI":"CPU",old_user?"MALI":"CPU"));
437
if ( ! mem->is_cached )
439
_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
440
DBG_MSG(3, ("UMP[%02u] Changing owner of uncached memory. Cache flushing not needed.\n", (ump_secure_id)args->secure_id));
444
if ( old_user == args->new_user)
446
_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
447
DBG_MSG(4, ("UMP[%02u] Setting the new_user equal to previous for. Cache flushing not needed.\n", (ump_secure_id)args->secure_id));
451
/* Previous AND new is both different from CPU */
452
(old_user != _UMP_UK_USED_BY_CPU) && (args->new_user != _UMP_UK_USED_BY_CPU )
455
_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
456
DBG_MSG(4, ("UMP[%02u] Previous and new user is not CPU. Cache flushing not needed.\n", (ump_secure_id)args->secure_id));
460
if ( (old_user != _UMP_UK_USED_BY_CPU ) && (args->new_user==_UMP_UK_USED_BY_CPU) )
462
cache_op =_UMP_UK_MSYNC_INVALIDATE;
463
DBG_MSG(4, ("UMP[%02u] Cache invalidation needed\n", (ump_secure_id)args->secure_id));
464
#ifdef UMP_SKIP_INVALIDATION
466
_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
467
DBG_MSG(4, ("UMP[%02u] Performing Cache invalidation SKIPPED\n", (ump_secure_id)args->secure_id));
471
/* Ensure the memory doesn't dissapear when we are flushing it. */
472
ump_dd_reference_add(mem);
473
_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
475
/* Take lock to protect: session->cache_operations_ongoing and session->has_pending_level1_cache_flush */
476
_mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
477
/* Actual cache flush */
478
_ump_osk_msync( mem, NULL, 0, mem->size_bytes, cache_op, session_data);
479
_mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
481
ump_dd_reference_release(mem);
482
DBG_MSG(4, ("UMP[%02u] Switch usage Finish\n", (ump_secure_id)args->secure_id));
486
void _ump_ukk_lock(_ump_uk_lock_s *args )
488
ump_dd_mem * mem = NULL;
490
_mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
491
ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
495
_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
496
DBG_MSG(1, ("UMP[%02u] Failed to look up mapping in _ump_ukk_lock(). ID: %u\n", (ump_secure_id)args->secure_id));
499
ump_dd_reference_add(mem);
500
_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
502
DBG_MSG(1, ("UMP[%02u] Lock. New lock flag: %d. Old Lock flag:\n", (u32)args->secure_id, (u32)args->lock_usage, (u32) mem->lock_usage ));
504
mem->lock_usage = (ump_lock_usage) args->lock_usage;
506
/** TODO: TAKE LOCK HERE */
508
ump_dd_reference_release(mem);
511
void _ump_ukk_unlock(_ump_uk_unlock_s *args )
513
ump_dd_mem * mem = NULL;
515
_mali_osk_lock_wait(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
516
ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
520
_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
521
DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_unlock(). ID: %u\n", (ump_secure_id)args->secure_id));
524
ump_dd_reference_add(mem);
525
_mali_osk_lock_signal(device.secure_id_map_lock, _MALI_OSK_LOCKMODE_RW);
527
DBG_MSG(1, ("UMP[%02u] Unlocking. Old Lock flag:\n", (u32)args->secure_id, (u32) mem->lock_usage ));
529
mem->lock_usage = (ump_lock_usage) UMP_NOT_LOCKED;
531
/** TODO: RELEASE LOCK HERE */
533
ump_dd_reference_release(mem);