X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=drivers%2Facpi%2Fosl.c;h=5dd2ed11a38755a57dd27685fa783549bcfa5d80;hb=93fdf10d4c28edaa1b9f80e7f9c3002359186d00;hp=13b5fd5854a80bd9f92d1eb8e90764a3fe8f6d5a;hpb=74d89c16735d83349ea74232031819e989a49156;p=powerpc.git diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 13b5fd5854..5dd2ed11a3 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -25,7 +25,6 @@ * */ -#include #include #include #include @@ -37,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -136,6 +136,7 @@ void acpi_os_vprintf(const char *fmt, va_list args) #endif } + extern int acpi_in_resume; void *acpi_os_allocate(acpi_size size) { @@ -585,38 +586,55 @@ static void acpi_os_execute_deferred(void *context) { struct acpi_os_dpc *dpc = NULL; - ACPI_FUNCTION_TRACE("os_execute_deferred"); dpc = (struct acpi_os_dpc *)context; if (!dpc) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid (NULL) context.\n")); - return_VOID; + printk(KERN_ERR PREFIX "Invalid (NULL) context\n"); + return; } dpc->function(dpc->context); kfree(dpc); - return_VOID; + return; } -acpi_status -acpi_os_queue_for_execution(u32 priority, +static int acpi_os_execute_thread(void *context) +{ + struct acpi_os_dpc *dpc = (struct acpi_os_dpc *)context; + if (dpc) { + dpc->function(dpc->context); + kfree(dpc); + } + do_exit(0); +} + +/******************************************************************************* + * + * FUNCTION: acpi_os_execute + * + * PARAMETERS: Type - Type of the callback + * Function - Function to be executed + * Context - Function parameters + * + * RETURN: Status + * + * DESCRIPTION: Depending on type, either queues function for deferred execution or + * immediately executes function on a separate thread. + * + ******************************************************************************/ + +acpi_status acpi_os_execute(acpi_execute_type type, acpi_osd_exec_callback function, void *context) { acpi_status status = AE_OK; struct acpi_os_dpc *dpc; struct work_struct *task; - - ACPI_FUNCTION_TRACE("os_queue_for_execution"); - - ACPI_DEBUG_PRINT((ACPI_DB_EXEC, - "Scheduling function [%p(%p)] for deferred execution.\n", - function, context)); + struct task_struct *p; if (!function) - return_ACPI_STATUS(AE_BAD_PARAMETER); - + return AE_BAD_PARAMETER; /* * Allocate/initialize DPC structure. Note that this memory will be * freed by the callee. The kernel handles the tq_struct list in a @@ -627,30 +645,37 @@ acpi_os_queue_for_execution(u32 priority, * We can save time and code by allocating the DPC and tq_structs * from the same memory. */ - - dpc = - kmalloc(sizeof(struct acpi_os_dpc) + sizeof(struct work_struct), - GFP_ATOMIC); + if (type == OSL_NOTIFY_HANDLER) { + dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_KERNEL); + } else { + dpc = kmalloc(sizeof(struct acpi_os_dpc) + + sizeof(struct work_struct), GFP_ATOMIC); + } if (!dpc) - return_ACPI_STATUS(AE_NO_MEMORY); - + return AE_NO_MEMORY; dpc->function = function; dpc->context = context; - task = (void *)(dpc + 1); - INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc); - - if (!queue_work(kacpid_wq, task)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Call to queue_work() failed.\n")); - kfree(dpc); - status = AE_ERROR; + if (type == OSL_NOTIFY_HANDLER) { + p = kthread_create(acpi_os_execute_thread, dpc, "kacpid_notify"); + if (!IS_ERR(p)) { + wake_up_process(p); + } else { + status = AE_NO_MEMORY; + kfree(dpc); + } + } else { + task = (void *)(dpc + 1); + INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc); + if (!queue_work(kacpid_wq, task)) { + status = AE_ERROR; + kfree(dpc); + } } - - return_ACPI_STATUS(status); + return status; } -EXPORT_SYMBOL(acpi_os_queue_for_execution); +EXPORT_SYMBOL(acpi_os_execute); void acpi_os_wait_events_complete(void *context) { @@ -662,35 +687,19 @@ EXPORT_SYMBOL(acpi_os_wait_events_complete); /* * Allocate the memory for a spinlock and initialize it. */ -acpi_status acpi_os_create_lock(acpi_handle * out_handle) +acpi_status acpi_os_create_lock(acpi_spinlock * handle) { - spinlock_t *lock_ptr; - - ACPI_FUNCTION_TRACE("os_create_lock"); + spin_lock_init(*handle); - lock_ptr = acpi_os_allocate(sizeof(spinlock_t)); - - spin_lock_init(lock_ptr); - - ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating spinlock[%p].\n", lock_ptr)); - - *out_handle = lock_ptr; - - return_ACPI_STATUS(AE_OK); + return AE_OK; } /* * Deallocate the memory for a spinlock. */ -void acpi_os_delete_lock(acpi_handle handle) +void acpi_os_delete_lock(acpi_spinlock handle) { - ACPI_FUNCTION_TRACE("os_create_lock"); - - ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting spinlock[%p].\n", handle)); - - acpi_os_free(handle); - - return_VOID; + return; } acpi_status @@ -698,11 +707,10 @@ acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) { struct semaphore *sem = NULL; - ACPI_FUNCTION_TRACE("os_create_semaphore"); sem = acpi_os_allocate(sizeof(struct semaphore)); if (!sem) - return_ACPI_STATUS(AE_NO_MEMORY); + return AE_NO_MEMORY; memset(sem, 0, sizeof(struct semaphore)); sema_init(sem, initial_units); @@ -712,7 +720,7 @@ acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n", *handle, initial_units)); - return_ACPI_STATUS(AE_OK); + return AE_OK; } EXPORT_SYMBOL(acpi_os_create_semaphore); @@ -728,17 +736,16 @@ acpi_status acpi_os_delete_semaphore(acpi_handle handle) { struct semaphore *sem = (struct semaphore *)handle; - ACPI_FUNCTION_TRACE("os_delete_semaphore"); if (!sem) - return_ACPI_STATUS(AE_BAD_PARAMETER); + return AE_BAD_PARAMETER; ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle)); acpi_os_free(sem); sem = NULL; - return_ACPI_STATUS(AE_OK); + return AE_OK; } EXPORT_SYMBOL(acpi_os_delete_semaphore); @@ -758,20 +765,16 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) struct semaphore *sem = (struct semaphore *)handle; int ret = 0; - ACPI_FUNCTION_TRACE("os_wait_semaphore"); if (!sem || (units < 1)) - return_ACPI_STATUS(AE_BAD_PARAMETER); + return AE_BAD_PARAMETER; if (units > 1) - return_ACPI_STATUS(AE_SUPPORT); + return AE_SUPPORT; ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", handle, units, timeout)); - if (in_atomic()) - timeout = 0; - switch (timeout) { /* * No Wait: @@ -816,17 +819,17 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) } if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Failed to acquire semaphore[%p|%d|%d], %s\n", + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, + "Failed to acquire semaphore[%p|%d|%d], %s", handle, units, timeout, acpi_format_exception(status))); } else { ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, - "Acquired semaphore[%p|%d|%d]\n", handle, + "Acquired semaphore[%p|%d|%d]", handle, units, timeout)); } - return_ACPI_STATUS(status); + return status; } EXPORT_SYMBOL(acpi_os_wait_semaphore); @@ -838,20 +841,19 @@ acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units) { struct semaphore *sem = (struct semaphore *)handle; - ACPI_FUNCTION_TRACE("os_signal_semaphore"); if (!sem || (units < 1)) - return_ACPI_STATUS(AE_BAD_PARAMETER); + return AE_BAD_PARAMETER; if (units > 1) - return_ACPI_STATUS(AE_SUPPORT); + return AE_SUPPORT; ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle, units)); up(sem); - return_ACPI_STATUS(AE_OK); + return AE_OK; } EXPORT_SYMBOL(acpi_os_signal_semaphore); @@ -896,14 +898,6 @@ u8 acpi_os_writable(void *ptr, acpi_size len) } #endif -u32 acpi_os_get_thread_id(void) -{ - if (!in_atomic()) - return current->pid; - - return 0; -} - acpi_status acpi_os_signal(u32 function, void *info) { switch (function) { @@ -1028,10 +1022,10 @@ EXPORT_SYMBOL(max_cstate); * handle is a pointer to the spinlock_t. */ -acpi_cpu_flags acpi_os_acquire_lock(acpi_handle handle) +acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) { acpi_cpu_flags flags; - spin_lock_irqsave((spinlock_t *) handle, flags); + spin_lock_irqsave(lockp, flags); return flags; } @@ -1039,9 +1033,9 @@ acpi_cpu_flags acpi_os_acquire_lock(acpi_handle handle) * Release a spinlock. See above. */ -void acpi_os_release_lock(acpi_handle handle, acpi_cpu_flags flags) +void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) { - spin_unlock_irqrestore((spinlock_t *) handle, flags); + spin_unlock_irqrestore(lockp, flags); } #ifndef ACPI_USE_LOCAL_CACHE @@ -1050,12 +1044,12 @@ void acpi_os_release_lock(acpi_handle handle, acpi_cpu_flags flags) * * FUNCTION: acpi_os_create_cache * - * PARAMETERS: CacheName - Ascii name for the cache - * ObjectSize - Size of each cached object - * MaxDepth - Maximum depth of the cache (in objects) - * ReturnCache - Where the new cache object is returned + * PARAMETERS: name - Ascii name for the cache + * size - Size of each cached object + * depth - Maximum depth of the cache (in objects) + * cache - Where the new cache object is returned * - * RETURN: Status + * RETURN: status * * DESCRIPTION: Create a cache object * @@ -1065,7 +1059,10 @@ acpi_status acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) { *cache = kmem_cache_create(name, size, 0, 0, NULL, NULL); - return AE_OK; + if (cache == NULL) + return AE_ERROR; + else + return AE_OK; } /******************************************************************************* @@ -1134,16 +1131,63 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object) * * RETURN: Status * - * DESCRIPTION: Get an object from the specified cache. If cache is empty, - * the object is allocated. + * DESCRIPTION: Return a zero-filled object. * ******************************************************************************/ void *acpi_os_acquire_object(acpi_cache_t * cache) { - void *object = kmem_cache_alloc(cache, GFP_KERNEL); + void *object = kmem_cache_zalloc(cache, GFP_KERNEL); WARN_ON(!object); return object; } +/****************************************************************************** + * + * FUNCTION: acpi_os_validate_interface + * + * PARAMETERS: interface - Requested interface to be validated + * + * RETURN: AE_OK if interface is supported, AE_SUPPORT otherwise + * + * DESCRIPTION: Match an interface string to the interfaces supported by the + * host. Strings originate from an AML call to the _OSI method. + * + *****************************************************************************/ + +acpi_status +acpi_os_validate_interface (char *interface) +{ + + return AE_SUPPORT; +} + + +/****************************************************************************** + * + * FUNCTION: acpi_os_validate_address + * + * PARAMETERS: space_id - ACPI space ID + * address - Physical address + * length - Address length + * + * RETURN: AE_OK if address/length is valid for the space_id. Otherwise, + * should return AE_AML_ILLEGAL_ADDRESS. + * + * DESCRIPTION: Validate a system address via the host OS. Used to validate + * the addresses accessed by AML operation regions. + * + *****************************************************************************/ + +acpi_status +acpi_os_validate_address ( + u8 space_id, + acpi_physical_address address, + acpi_size length) +{ + + return AE_OK; +} + + #endif