Vishal Bhoj | 78e9049 | 2015-12-07 01:36:32 +0530 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2010 ARM Limited. All rights reserved. |
| 3 | * |
| 4 | * Copyright (C) 2008 The Android Open Source Project |
| 5 | * |
| 6 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 7 | * you may not use this file except in compliance with the License. |
| 8 | * You may obtain a copy of the License at |
| 9 | * |
| 10 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 11 | * |
| 12 | * Unless required by applicable law or agreed to in writing, software |
| 13 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 15 | * See the License for the specific language governing permissions and |
| 16 | * limitations under the License. |
| 17 | */ |
| 18 | |
| 19 | #include <string.h> |
| 20 | #include <errno.h> |
| 21 | #include <pthread.h> |
| 22 | |
| 23 | #include <cutils/log.h> |
| 24 | #include <cutils/atomic.h> |
| 25 | #include <hardware/hardware.h> |
| 26 | #include <hardware/gralloc.h> |
| 27 | |
| 28 | #include <sys/ioctl.h> |
| 29 | |
| 30 | #include "alloc_device.h" |
| 31 | #include "gralloc_priv.h" |
| 32 | #include "gralloc_helper.h" |
| 33 | #include "framebuffer_device.h" |
| 34 | |
| 35 | #if GRALLOC_ARM_UMP_MODULE |
| 36 | #include <ump/ump.h> |
| 37 | #include <ump/ump_ref_drv.h> |
| 38 | #endif |
| 39 | |
| 40 | #if GRALLOC_ARM_DMA_BUF_MODULE |
| 41 | #include <linux/ion.h> |
| 42 | #include <ion/ion.h> |
| 43 | #endif |
| 44 | |
| 45 | #define GRALLOC_ALIGN( value, base ) (((value) + ((base) - 1)) & ~((base) - 1)) |
| 46 | |
| 47 | |
| 48 | #if GRALLOC_SIMULATE_FAILURES |
| 49 | #include <cutils/properties.h> |
| 50 | |
| 51 | /* system property keys for controlling simulated UMP allocation failures */ |
| 52 | #define PROP_MALI_TEST_GRALLOC_FAIL_FIRST "mali.test.gralloc.fail_first" |
| 53 | #define PROP_MALI_TEST_GRALLOC_FAIL_INTERVAL "mali.test.gralloc.fail_interval" |
| 54 | |
| 55 | static int __ump_alloc_should_fail() |
| 56 | { |
| 57 | |
| 58 | static unsigned int call_count = 0; |
| 59 | unsigned int first_fail = 0; |
| 60 | int fail_period = 0; |
| 61 | int fail = 0; |
| 62 | |
| 63 | ++call_count; |
| 64 | |
| 65 | /* read the system properties that control failure simulation */ |
| 66 | { |
| 67 | char prop_value[PROPERTY_VALUE_MAX]; |
| 68 | |
| 69 | if (property_get(PROP_MALI_TEST_GRALLOC_FAIL_FIRST, prop_value, "0") > 0) |
| 70 | { |
| 71 | sscanf(prop_value, "%11u", &first_fail); |
| 72 | } |
| 73 | |
| 74 | if (property_get(PROP_MALI_TEST_GRALLOC_FAIL_INTERVAL, prop_value, "0") > 0) |
| 75 | { |
| 76 | sscanf(prop_value, "%11u", &fail_period); |
| 77 | } |
| 78 | } |
| 79 | |
| 80 | /* failure simulation is enabled by setting the first_fail property to non-zero */ |
| 81 | if (first_fail > 0) |
| 82 | { |
| 83 | LOGI("iteration %u (fail=%u, period=%u)\n", call_count, first_fail, fail_period); |
| 84 | |
| 85 | fail = (call_count == first_fail) || |
| 86 | (call_count > first_fail && fail_period > 0 && 0 == (call_count - first_fail) % fail_period); |
| 87 | |
| 88 | if (fail) |
| 89 | { |
| 90 | AERR("failed ump_ref_drv_allocate on iteration #%d\n", call_count); |
| 91 | } |
| 92 | } |
| 93 | |
| 94 | return fail; |
| 95 | } |
| 96 | #endif |
| 97 | |
| 98 | |
| 99 | static int gralloc_alloc_buffer(alloc_device_t *dev, size_t size, int usage, buffer_handle_t *pHandle) |
| 100 | { |
| 101 | #if GRALLOC_ARM_DMA_BUF_MODULE |
| 102 | { |
| 103 | private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module); |
| 104 | ion_user_handle_t ion_hnd; |
| 105 | unsigned char *cpu_ptr; |
| 106 | int shared_fd; |
| 107 | int ret; |
| 108 | |
| 109 | ret = ion_alloc(m->ion_client, size, 0, ION_HEAP_SYSTEM_MASK, 0, &(ion_hnd)); |
| 110 | |
| 111 | if (ret != 0) |
| 112 | { |
| 113 | AERR("Failed to ion_alloc from ion_client:%d", m->ion_client); |
| 114 | return -1; |
| 115 | } |
| 116 | |
| 117 | ret = ion_share(m->ion_client, ion_hnd, &shared_fd); |
| 118 | |
| 119 | if (ret != 0) |
| 120 | { |
| 121 | AERR("ion_share( %d ) failed", m->ion_client); |
| 122 | |
| 123 | if (0 != ion_free(m->ion_client, ion_hnd)) |
| 124 | { |
| 125 | AERR("ion_free( %d ) failed", m->ion_client); |
| 126 | } |
| 127 | |
| 128 | return -1; |
| 129 | } |
| 130 | |
Chia-I Wu | 06695a6 | 2017-05-08 12:55:28 -0700 | [diff] [blame] | 131 | // we do not need ion_hnd once we have shared_fd |
| 132 | if (0 != ion_free(m->ion_client, ion_hnd)) |
| 133 | { |
| 134 | AWAR("ion_free( %d ) failed", m->ion_client); |
| 135 | } |
| 136 | ion_hnd = ION_INVALID_HANDLE; |
| 137 | |
Vishal Bhoj | 78e9049 | 2015-12-07 01:36:32 +0530 | [diff] [blame] | 138 | cpu_ptr = (unsigned char *)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, shared_fd, 0); |
| 139 | |
| 140 | if (MAP_FAILED == cpu_ptr) |
| 141 | { |
| 142 | AERR("ion_map( %d ) failed", m->ion_client); |
| 143 | |
Vishal Bhoj | 78e9049 | 2015-12-07 01:36:32 +0530 | [diff] [blame] | 144 | close(shared_fd); |
| 145 | return -1; |
| 146 | } |
| 147 | |
| 148 | private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_ION, usage, size, cpu_ptr, private_handle_t::LOCK_STATE_MAPPED); |
| 149 | |
| 150 | if (NULL != hnd) |
| 151 | { |
| 152 | hnd->share_fd = shared_fd; |
Vishal Bhoj | 78e9049 | 2015-12-07 01:36:32 +0530 | [diff] [blame] | 153 | *pHandle = hnd; |
| 154 | return 0; |
| 155 | } |
| 156 | else |
| 157 | { |
| 158 | AERR("Gralloc out of mem for ion_client:%d", m->ion_client); |
| 159 | } |
| 160 | |
| 161 | close(shared_fd); |
| 162 | ret = munmap(cpu_ptr, size); |
| 163 | |
| 164 | if (0 != ret) |
| 165 | { |
| 166 | AERR("munmap failed for base:%p size: %lu", cpu_ptr, (unsigned long)size); |
| 167 | } |
| 168 | |
Vishal Bhoj | 78e9049 | 2015-12-07 01:36:32 +0530 | [diff] [blame] | 169 | return -1; |
| 170 | } |
| 171 | #endif |
| 172 | |
| 173 | #if GRALLOC_ARM_UMP_MODULE |
| 174 | MALI_IGNORE(dev); |
| 175 | { |
| 176 | ump_handle ump_mem_handle; |
| 177 | void *cpu_ptr; |
| 178 | ump_secure_id ump_id; |
| 179 | ump_alloc_constraints constraints; |
| 180 | |
| 181 | size = round_up_to_page_size(size); |
| 182 | |
| 183 | if ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN) |
| 184 | { |
| 185 | constraints = UMP_REF_DRV_CONSTRAINT_USE_CACHE; |
| 186 | } |
| 187 | else |
| 188 | { |
| 189 | constraints = UMP_REF_DRV_CONSTRAINT_NONE; |
| 190 | } |
| 191 | |
| 192 | #ifdef GRALLOC_SIMULATE_FAILURES |
| 193 | |
| 194 | /* if the failure condition matches, fail this iteration */ |
| 195 | if (__ump_alloc_should_fail()) |
| 196 | { |
| 197 | ump_mem_handle = UMP_INVALID_MEMORY_HANDLE; |
| 198 | } |
| 199 | else |
| 200 | #endif |
| 201 | { |
| 202 | ump_mem_handle = ump_ref_drv_allocate(size, constraints); |
| 203 | |
| 204 | if (UMP_INVALID_MEMORY_HANDLE != ump_mem_handle) |
| 205 | { |
| 206 | cpu_ptr = ump_mapped_pointer_get(ump_mem_handle); |
| 207 | |
| 208 | if (NULL != cpu_ptr) |
| 209 | { |
| 210 | ump_id = ump_secure_id_get(ump_mem_handle); |
| 211 | |
| 212 | if (UMP_INVALID_SECURE_ID != ump_id) |
| 213 | { |
| 214 | private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_UMP, usage, size, cpu_ptr, |
| 215 | private_handle_t::LOCK_STATE_MAPPED, ump_id, ump_mem_handle); |
| 216 | |
| 217 | if (NULL != hnd) |
| 218 | { |
| 219 | *pHandle = hnd; |
| 220 | return 0; |
| 221 | } |
| 222 | else |
| 223 | { |
| 224 | AERR("gralloc_alloc_buffer() failed to allocate handle. ump_handle = %p, ump_id = %d", ump_mem_handle, ump_id); |
| 225 | } |
| 226 | } |
| 227 | else |
| 228 | { |
| 229 | AERR("gralloc_alloc_buffer() failed to retrieve valid secure id. ump_handle = %p", ump_mem_handle); |
| 230 | } |
| 231 | |
| 232 | ump_mapped_pointer_release(ump_mem_handle); |
| 233 | } |
| 234 | else |
| 235 | { |
| 236 | AERR("gralloc_alloc_buffer() failed to map UMP memory. ump_handle = %p", ump_mem_handle); |
| 237 | } |
| 238 | |
| 239 | ump_reference_release(ump_mem_handle); |
| 240 | } |
| 241 | else |
| 242 | { |
| 243 | AERR("gralloc_alloc_buffer() failed to allocate UMP memory. size:%d constraints: %d", size, constraints); |
| 244 | } |
| 245 | } |
| 246 | |
| 247 | return -1; |
| 248 | } |
| 249 | #endif |
| 250 | |
| 251 | } |
| 252 | |
| 253 | static int gralloc_alloc_framebuffer_locked(alloc_device_t *dev, size_t size, int usage, buffer_handle_t *pHandle) |
| 254 | { |
| 255 | private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module); |
| 256 | |
| 257 | // allocate the framebuffer |
| 258 | if (m->framebuffer == NULL) |
| 259 | { |
| 260 | // initialize the framebuffer, the framebuffer is mapped once and forever. |
| 261 | int err = init_frame_buffer_locked(m); |
| 262 | |
| 263 | if (err < 0) |
| 264 | { |
| 265 | return err; |
| 266 | } |
| 267 | } |
| 268 | |
| 269 | const uint32_t bufferMask = m->bufferMask; |
| 270 | const uint32_t numBuffers = m->numBuffers; |
| 271 | const size_t bufferSize = m->finfo.line_length * m->info.yres; |
| 272 | |
| 273 | if (numBuffers == 1) |
| 274 | { |
| 275 | // If we have only one buffer, we never use page-flipping. Instead, |
| 276 | // we return a regular buffer which will be memcpy'ed to the main |
| 277 | // screen when post is called. |
| 278 | int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D; |
| 279 | AERR("fallback to single buffering. Virtual Y-res too small %d", m->info.yres); |
| 280 | return gralloc_alloc_buffer(dev, bufferSize, newUsage, pHandle); |
| 281 | } |
| 282 | |
| 283 | if (bufferMask >= ((1LU << numBuffers) - 1)) |
| 284 | { |
| 285 | // We ran out of buffers. |
| 286 | return -ENOMEM; |
| 287 | } |
| 288 | |
| 289 | void *vaddr = m->framebuffer->base; |
| 290 | |
| 291 | // find a free slot |
| 292 | for (uint32_t i = 0 ; i < numBuffers ; i++) |
| 293 | { |
| 294 | if ((bufferMask & (1LU << i)) == 0) |
| 295 | { |
| 296 | m->bufferMask |= (1LU << i); |
| 297 | break; |
| 298 | } |
| 299 | |
| 300 | vaddr = (void *)((uintptr_t)vaddr + bufferSize); |
| 301 | } |
| 302 | |
Chia-I Wu | 57b9e8a | 2017-05-08 12:50:36 -0700 | [diff] [blame] | 303 | int fbdev_fd = m->framebuffer->shallow_fbdev_fd; |
Vishal Bhoj | 78e9049 | 2015-12-07 01:36:32 +0530 | [diff] [blame] | 304 | // The entire framebuffer memory is already mapped, now create a buffer object for parts of this memory |
| 305 | private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_FRAMEBUFFER, usage, size, vaddr, |
Chia-I Wu | 57b9e8a | 2017-05-08 12:50:36 -0700 | [diff] [blame] | 306 | 0, fbdev_fd, (uintptr_t)vaddr - (uintptr_t) m->framebuffer->base); |
Vishal Bhoj | 78e9049 | 2015-12-07 01:36:32 +0530 | [diff] [blame] | 307 | #if GRALLOC_ARM_UMP_MODULE |
| 308 | hnd->ump_id = m->framebuffer->ump_id; |
| 309 | |
| 310 | /* create a backing ump memory handle if the framebuffer is exposed as a secure ID */ |
| 311 | if ((int)UMP_INVALID_SECURE_ID != hnd->ump_id) |
| 312 | { |
| 313 | hnd->ump_mem_handle = (int)ump_handle_create_from_secure_id(hnd->ump_id); |
| 314 | |
| 315 | if ((int)UMP_INVALID_MEMORY_HANDLE == hnd->ump_mem_handle) |
| 316 | { |
| 317 | AINF("warning: unable to create UMP handle from secure ID %i\n", hnd->ump_id); |
| 318 | } |
| 319 | } |
| 320 | |
| 321 | #endif |
| 322 | |
| 323 | #if GRALLOC_ARM_DMA_BUF_MODULE |
| 324 | { |
| 325 | #ifdef FBIOGET_DMABUF |
| 326 | struct fb_dmabuf_export fb_dma_buf; |
| 327 | |
Chia-I Wu | 57b9e8a | 2017-05-08 12:50:36 -0700 | [diff] [blame] | 328 | if (ioctl(fbdev_fd, FBIOGET_DMABUF, &fb_dma_buf) == 0) |
Vishal Bhoj | 78e9049 | 2015-12-07 01:36:32 +0530 | [diff] [blame] | 329 | { |
| 330 | AINF("framebuffer accessed with dma buf (fd 0x%x)\n", (int)fb_dma_buf.fd); |
| 331 | hnd->share_fd = fb_dma_buf.fd; |
| 332 | } |
| 333 | |
| 334 | #endif |
| 335 | } |
Chia-I Wu | a12e37d | 2017-05-08 12:46:13 -0700 | [diff] [blame] | 336 | |
| 337 | // correct numFds/numInts when there is no dmabuf fd |
| 338 | if (hnd->share_fd < 0) { |
| 339 | hnd->numFds--; |
| 340 | hnd->numInts++; |
| 341 | } |
Vishal Bhoj | 78e9049 | 2015-12-07 01:36:32 +0530 | [diff] [blame] | 342 | #endif |
| 343 | |
| 344 | *pHandle = hnd; |
| 345 | |
| 346 | return 0; |
| 347 | } |
| 348 | |
| 349 | static int gralloc_alloc_framebuffer(alloc_device_t *dev, size_t size, int usage, buffer_handle_t *pHandle) |
| 350 | { |
| 351 | private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module); |
| 352 | pthread_mutex_lock(&m->lock); |
| 353 | int err = gralloc_alloc_framebuffer_locked(dev, size, usage, pHandle); |
| 354 | pthread_mutex_unlock(&m->lock); |
| 355 | return err; |
| 356 | } |
| 357 | |
| 358 | static int alloc_device_alloc(alloc_device_t *dev, int w, int h, int format, int usage, buffer_handle_t *pHandle, int *pStride) |
| 359 | { |
| 360 | if (!pHandle || !pStride) |
| 361 | { |
| 362 | return -EINVAL; |
| 363 | } |
| 364 | |
| 365 | size_t size; |
| 366 | size_t stride; |
| 367 | |
| 368 | if (format == HAL_PIXEL_FORMAT_YCrCb_420_SP || format == HAL_PIXEL_FORMAT_YV12 |
| 369 | /* HAL_PIXEL_FORMAT_YCbCr_420_SP, HAL_PIXEL_FORMAT_YCbCr_420_P, HAL_PIXEL_FORMAT_YCbCr_422_I are not defined in Android. |
| 370 | * To enable Mali DDK EGLImage support for those formats, firstly, you have to add them in Android system/core/include/system/graphics.h. |
| 371 | * Then, define SUPPORT_LEGACY_FORMAT in the same header file(Mali DDK will also check this definition). |
| 372 | */ |
| 373 | #ifdef SUPPORT_LEGACY_FORMAT |
| 374 | || format == HAL_PIXEL_FORMAT_YCbCr_420_SP || format == HAL_PIXEL_FORMAT_YCbCr_420_P || format == HAL_PIXEL_FORMAT_YCbCr_422_I |
| 375 | #endif |
| 376 | ) |
| 377 | { |
| 378 | switch (format) |
| 379 | { |
| 380 | case HAL_PIXEL_FORMAT_YCrCb_420_SP: |
| 381 | stride = GRALLOC_ALIGN(w, 16); |
| 382 | size = GRALLOC_ALIGN(h, 16) * (stride + GRALLOC_ALIGN(stride / 2, 16)); |
| 383 | break; |
| 384 | |
| 385 | case HAL_PIXEL_FORMAT_YV12: |
| 386 | #ifdef SUPPORT_LEGACY_FORMAT |
| 387 | case HAL_PIXEL_FORMAT_YCbCr_420_P: |
| 388 | #endif |
| 389 | stride = GRALLOC_ALIGN(w, 16); |
| 390 | size = GRALLOC_ALIGN(h, 2) * (stride + GRALLOC_ALIGN(stride / 2, 16)); |
| 391 | |
| 392 | break; |
| 393 | #ifdef SUPPORT_LEGACY_FORMAT |
| 394 | |
| 395 | case HAL_PIXEL_FORMAT_YCbCr_420_SP: |
| 396 | stride = GRALLOC_ALIGN(w, 16); |
| 397 | size = GRALLOC_ALIGN(h, 16) * (stride + GRALLOC_ALIGN(stride / 2, 16)); |
| 398 | break; |
| 399 | |
| 400 | case HAL_PIXEL_FORMAT_YCbCr_422_I: |
| 401 | stride = GRALLOC_ALIGN(w, 16); |
| 402 | size = h * stride * 2; |
| 403 | |
| 404 | break; |
| 405 | #endif |
| 406 | |
| 407 | default: |
| 408 | return -EINVAL; |
| 409 | } |
| 410 | } |
| 411 | else |
| 412 | { |
| 413 | int bpp = 0; |
| 414 | |
| 415 | switch (format) |
| 416 | { |
| 417 | case HAL_PIXEL_FORMAT_RGBA_8888: |
| 418 | case HAL_PIXEL_FORMAT_RGBX_8888: |
| 419 | case HAL_PIXEL_FORMAT_BGRA_8888: |
| 420 | bpp = 4; |
| 421 | break; |
| 422 | |
| 423 | case HAL_PIXEL_FORMAT_RGB_888: |
| 424 | bpp = 3; |
| 425 | break; |
| 426 | |
| 427 | case HAL_PIXEL_FORMAT_RGB_565: |
| 428 | #if PLATFORM_SDK_VERSION < 19 |
| 429 | case HAL_PIXEL_FORMAT_RGBA_5551: |
| 430 | case HAL_PIXEL_FORMAT_RGBA_4444: |
| 431 | #endif |
| 432 | bpp = 2; |
| 433 | break; |
| 434 | |
| 435 | default: |
| 436 | return -EINVAL; |
| 437 | } |
| 438 | |
| 439 | size_t bpr = GRALLOC_ALIGN(w * bpp, 64); |
| 440 | size = bpr * h; |
| 441 | stride = bpr / bpp; |
| 442 | } |
| 443 | |
| 444 | int err; |
| 445 | |
| 446 | #ifndef MALI_600 |
| 447 | |
| 448 | if (usage & GRALLOC_USAGE_HW_FB) |
| 449 | { |
| 450 | err = gralloc_alloc_framebuffer(dev, size, usage, pHandle); |
| 451 | } |
| 452 | else |
| 453 | #endif |
| 454 | |
| 455 | { |
| 456 | err = gralloc_alloc_buffer(dev, size, usage, pHandle); |
| 457 | } |
| 458 | |
| 459 | if (err < 0) |
| 460 | { |
| 461 | return err; |
| 462 | } |
| 463 | |
| 464 | /* match the framebuffer format */ |
| 465 | if (usage & GRALLOC_USAGE_HW_FB) |
| 466 | { |
| 467 | #ifdef GRALLOC_16_BITS |
| 468 | format = HAL_PIXEL_FORMAT_RGB_565; |
| 469 | #else |
| 470 | format = HAL_PIXEL_FORMAT_BGRA_8888; |
| 471 | #endif |
| 472 | } |
| 473 | |
| 474 | private_handle_t *hnd = (private_handle_t *)*pHandle; |
| 475 | int private_usage = usage & (GRALLOC_USAGE_PRIVATE_0 | |
| 476 | GRALLOC_USAGE_PRIVATE_1); |
| 477 | |
| 478 | switch (private_usage) |
| 479 | { |
| 480 | case 0: |
| 481 | hnd->yuv_info = MALI_YUV_BT601_NARROW; |
| 482 | break; |
| 483 | |
| 484 | case GRALLOC_USAGE_PRIVATE_1: |
| 485 | hnd->yuv_info = MALI_YUV_BT601_WIDE; |
| 486 | break; |
| 487 | |
| 488 | case GRALLOC_USAGE_PRIVATE_0: |
| 489 | hnd->yuv_info = MALI_YUV_BT709_NARROW; |
| 490 | break; |
| 491 | |
| 492 | case (GRALLOC_USAGE_PRIVATE_0 | GRALLOC_USAGE_PRIVATE_1): |
| 493 | hnd->yuv_info = MALI_YUV_BT709_WIDE; |
| 494 | break; |
| 495 | } |
| 496 | |
| 497 | hnd->width = w; |
| 498 | hnd->height = h; |
| 499 | hnd->format = format; |
| 500 | hnd->stride = stride; |
| 501 | |
| 502 | *pStride = stride; |
| 503 | return 0; |
| 504 | } |
| 505 | |
| 506 | static int alloc_device_free(alloc_device_t *dev, buffer_handle_t handle) |
| 507 | { |
| 508 | if (private_handle_t::validate(handle) < 0) |
| 509 | { |
| 510 | return -EINVAL; |
| 511 | } |
| 512 | |
| 513 | private_handle_t const *hnd = reinterpret_cast<private_handle_t const *>(handle); |
| 514 | |
| 515 | if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) |
| 516 | { |
| 517 | // free this buffer |
| 518 | private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module); |
| 519 | const size_t bufferSize = m->finfo.line_length * m->info.yres; |
| 520 | int index = ((uintptr_t)hnd->base - (uintptr_t)m->framebuffer->base) / bufferSize; |
| 521 | m->bufferMask &= ~(1 << index); |
Vishal Bhoj | 78e9049 | 2015-12-07 01:36:32 +0530 | [diff] [blame] | 522 | |
| 523 | #if GRALLOC_ARM_UMP_MODULE |
| 524 | |
| 525 | if ((int)UMP_INVALID_MEMORY_HANDLE != hnd->ump_mem_handle) |
| 526 | { |
| 527 | ump_reference_release((ump_handle)hnd->ump_mem_handle); |
| 528 | } |
| 529 | |
| 530 | #endif |
| 531 | } |
| 532 | else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP) |
| 533 | { |
| 534 | #if GRALLOC_ARM_UMP_MODULE |
| 535 | |
| 536 | /* Buffer might be unregistered so we need to check for invalid ump handle*/ |
| 537 | if ((int)UMP_INVALID_MEMORY_HANDLE != hnd->ump_mem_handle) |
| 538 | { |
| 539 | ump_mapped_pointer_release((ump_handle)hnd->ump_mem_handle); |
| 540 | ump_reference_release((ump_handle)hnd->ump_mem_handle); |
| 541 | } |
| 542 | |
| 543 | #else |
| 544 | AERR("Can't free ump memory for handle:0x%p. Not supported.", hnd); |
| 545 | #endif |
| 546 | } |
| 547 | else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION) |
| 548 | { |
| 549 | #if GRALLOC_ARM_DMA_BUF_MODULE |
| 550 | private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module); |
| 551 | |
| 552 | /* Buffer might be unregistered so we need to check for invalid ump handle*/ |
| 553 | if (0 != hnd->base) |
| 554 | { |
| 555 | if (0 != munmap((void *)hnd->base, hnd->size)) |
| 556 | { |
| 557 | AERR("Failed to munmap handle 0x%p", hnd); |
| 558 | } |
| 559 | } |
| 560 | |
| 561 | close(hnd->share_fd); |
| 562 | |
Vishal Bhoj | 78e9049 | 2015-12-07 01:36:32 +0530 | [diff] [blame] | 563 | memset((void *)hnd, 0, sizeof(*hnd)); |
| 564 | #else |
| 565 | AERR("Can't free dma_buf memory for handle:0x%x. Not supported.", (unsigned int)hnd); |
| 566 | #endif |
| 567 | |
| 568 | } |
| 569 | |
| 570 | delete hnd; |
| 571 | |
| 572 | return 0; |
| 573 | } |
| 574 | |
| 575 | static int alloc_device_close(struct hw_device_t *device) |
| 576 | { |
| 577 | alloc_device_t *dev = reinterpret_cast<alloc_device_t *>(device); |
| 578 | |
| 579 | if (dev) |
| 580 | { |
| 581 | #if GRALLOC_ARM_DMA_BUF_MODULE |
| 582 | private_module_t *m = reinterpret_cast<private_module_t *>(device); |
| 583 | |
| 584 | if (0 != ion_close(m->ion_client)) |
| 585 | { |
| 586 | AERR("Failed to close ion_client: %d", m->ion_client); |
| 587 | } |
| 588 | |
| 589 | close(m->ion_client); |
| 590 | #endif |
| 591 | delete dev; |
| 592 | #if GRALLOC_ARM_UMP_MODULE |
| 593 | ump_close(); // Our UMP memory refs will be released automatically here... |
| 594 | #endif |
| 595 | } |
| 596 | |
| 597 | return 0; |
| 598 | } |
| 599 | |
| 600 | int alloc_device_open(hw_module_t const *module, const char *name, hw_device_t **device) |
| 601 | { |
| 602 | MALI_IGNORE(name); |
| 603 | alloc_device_t *dev; |
| 604 | |
| 605 | dev = new alloc_device_t; |
| 606 | |
| 607 | if (NULL == dev) |
| 608 | { |
| 609 | return -1; |
| 610 | } |
| 611 | |
| 612 | #if GRALLOC_ARM_UMP_MODULE |
| 613 | ump_result ump_res = ump_open(); |
| 614 | |
| 615 | if (UMP_OK != ump_res) |
| 616 | { |
| 617 | AERR("UMP open failed with %d", ump_res); |
| 618 | delete dev; |
| 619 | return -1; |
| 620 | } |
| 621 | |
| 622 | #endif |
| 623 | |
| 624 | /* initialize our state here */ |
| 625 | memset(dev, 0, sizeof(*dev)); |
| 626 | |
| 627 | /* initialize the procs */ |
| 628 | dev->common.tag = HARDWARE_DEVICE_TAG; |
| 629 | dev->common.version = 0; |
| 630 | dev->common.module = const_cast<hw_module_t *>(module); |
| 631 | dev->common.close = alloc_device_close; |
| 632 | dev->alloc = alloc_device_alloc; |
| 633 | dev->free = alloc_device_free; |
| 634 | |
| 635 | #if GRALLOC_ARM_DMA_BUF_MODULE |
| 636 | private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module); |
| 637 | m->ion_client = ion_open(); |
| 638 | |
| 639 | if (m->ion_client < 0) |
| 640 | { |
| 641 | AERR("ion_open failed with %s", strerror(errno)); |
| 642 | delete dev; |
| 643 | return -1; |
| 644 | } |
| 645 | |
| 646 | #endif |
| 647 | |
| 648 | *device = &dev->common; |
| 649 | |
| 650 | return 0; |
| 651 | } |