Revert "staging: android: add ion_legency driver"

This reverts commit b53e459d52.
This reverts commit 32364c4431.

Change-Id: Ibffd99caf31e756870ac45d5e93c831a12b241bd
Signed-off-by: Jianqun Xu <jay.xu@rock-chips.com>
This commit is contained in:
Jianqun Xu
2020-09-29 17:35:37 +08:00
committed by Tao Huang
parent 403778342a
commit 59cab3cae5
22 changed files with 2 additions and 5095 deletions

View File

@@ -25,8 +25,6 @@ config ANDROID_VSOC
source "drivers/staging/android/ion/Kconfig"
source "drivers/staging/android/ion_legency/Kconfig"
source "drivers/staging/android/fiq_debugger/Kconfig"
endif # if ANDROID

View File

@@ -1,7 +1,6 @@
ccflags-y += -I$(src) # needed for trace events
obj-y += ion/
obj-$(CONFIG_ION_LEGENCY) += ion_legency/
obj-$(CONFIG_FIQ_DEBUGGER) += fiq_debugger/
obj-$(CONFIG_ASHMEM) += ashmem.o

View File

@@ -150,7 +150,7 @@ static int rk_ion_probe(struct platform_device *pdev)
}
pr_info("rockchip ion: success to create - %s\n",
heaps[i]->name);
ion_device_add_heap_legency(idev, heaps[i]);
ion_device_add_heap(idev, heaps[i]);
}
platform_set_drvdata(pdev, idev);

View File

@@ -1,17 +0,0 @@
menuconfig ION_LEGENCY
bool "Legency Ion Memory Manager"
depends on HAVE_MEMBLOCK && HAS_DMA && MMU
select GENERIC_ALLOCATOR
select DMA_SHARED_BUFFER
---help---
Chose this option to enable the Legency ION Memory Manager,
used by Android to efficiently allocate buffers
from userspace that can be shared between drivers.
If you're not using Android its probably safe to
say N here.
config ION_LEGENCY_ROCKCHIP
tristate "Legency Ion for Rockchip"
depends on ARCH_ROCKCHIP && ION_LEGENCY
help
Choose this option if you wish to use legency ion on an Rockchip.

View File

@@ -1,6 +0,0 @@
obj-$(CONFIG_ION_LEGENCY) += ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o
ifdef CONFIG_COMPAT
obj-$(CONFIG_ION_LEGENCY) += compat_ion.o
endif
obj-$(CONFIG_ION_LEGENCY_ROCKCHIP) += rockchip/

View File

@@ -1,195 +0,0 @@
/*
* drivers/staging/android/ion/compat_ion.c
*
* Copyright (C) 2013 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/compat.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include "ion.h"
#include "compat_ion.h"
/* See drivers/staging/android/uapi/ion.h for the definition of these structs */
struct compat_ion_allocation_data {
compat_size_t len;
compat_size_t align;
compat_uint_t heap_id_mask;
compat_uint_t flags;
compat_int_t handle;
};
struct compat_ion_custom_data {
compat_uint_t cmd;
compat_ulong_t arg;
};
struct compat_ion_handle_data {
compat_int_t handle;
};
#define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
struct compat_ion_allocation_data)
#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, \
struct compat_ion_handle_data)
#define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \
struct compat_ion_custom_data)
static int compat_get_ion_allocation_data(
struct compat_ion_allocation_data __user *data32,
struct ion_allocation_data __user *data)
{
compat_size_t s;
compat_uint_t u;
compat_int_t i;
int err;
err = get_user(s, &data32->len);
err |= put_user(s, &data->len);
err |= get_user(s, &data32->align);
err |= put_user(s, &data->align);
err |= get_user(u, &data32->heap_id_mask);
err |= put_user(u, &data->heap_id_mask);
err |= get_user(u, &data32->flags);
err |= put_user(u, &data->flags);
err |= get_user(i, &data32->handle);
err |= put_user(i, &data->handle);
return err;
}
static int compat_get_ion_handle_data(
struct compat_ion_handle_data __user *data32,
struct ion_handle_data __user *data)
{
compat_int_t i;
int err;
err = get_user(i, &data32->handle);
err |= put_user(i, &data->handle);
return err;
}
static int compat_put_ion_allocation_data(
struct compat_ion_allocation_data __user *data32,
struct ion_allocation_data __user *data)
{
compat_size_t s;
compat_uint_t u;
compat_int_t i;
int err;
err = get_user(s, &data->len);
err |= put_user(s, &data32->len);
err |= get_user(s, &data->align);
err |= put_user(s, &data32->align);
err |= get_user(u, &data->heap_id_mask);
err |= put_user(u, &data32->heap_id_mask);
err |= get_user(u, &data->flags);
err |= put_user(u, &data32->flags);
err |= get_user(i, &data->handle);
err |= put_user(i, &data32->handle);
return err;
}
static int compat_get_ion_custom_data(
struct compat_ion_custom_data __user *data32,
struct ion_custom_data __user *data)
{
compat_uint_t cmd;
compat_ulong_t arg;
int err;
err = get_user(cmd, &data32->cmd);
err |= put_user(cmd, &data->cmd);
err |= get_user(arg, &data32->arg);
err |= put_user(arg, &data->arg);
return err;
};
long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
long ret;
if (!filp->f_op->unlocked_ioctl)
return -ENOTTY;
switch (cmd) {
case COMPAT_ION_IOC_ALLOC:
{
struct compat_ion_allocation_data __user *data32;
struct ion_allocation_data __user *data;
int err;
data32 = compat_ptr(arg);
data = compat_alloc_user_space(sizeof(*data));
if (data == NULL)
return -EFAULT;
err = compat_get_ion_allocation_data(data32, data);
if (err)
return err;
ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC,
(unsigned long)data);
err = compat_put_ion_allocation_data(data32, data);
return ret ? ret : err;
}
case COMPAT_ION_IOC_FREE:
{
struct compat_ion_handle_data __user *data32;
struct ion_handle_data __user *data;
int err;
data32 = compat_ptr(arg);
data = compat_alloc_user_space(sizeof(*data));
if (data == NULL)
return -EFAULT;
err = compat_get_ion_handle_data(data32, data);
if (err)
return err;
return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE,
(unsigned long)data);
}
case COMPAT_ION_IOC_CUSTOM: {
struct compat_ion_custom_data __user *data32;
struct ion_custom_data __user *data;
int err;
data32 = compat_ptr(arg);
data = compat_alloc_user_space(sizeof(*data));
if (data == NULL)
return -EFAULT;
err = compat_get_ion_custom_data(data32, data);
if (err)
return err;
return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM,
(unsigned long)data);
}
case ION_IOC_SHARE:
case ION_IOC_MAP:
case ION_IOC_IMPORT:
case ION_IOC_SYNC:
return filp->f_op->unlocked_ioctl(filp, cmd,
(unsigned long)compat_ptr(arg));
default:
return -ENOIOCTLCMD;
}
}

View File

@@ -1,29 +0,0 @@
/*
* drivers/staging/android/ion/compat_ion.h
*
* Copyright (C) 2013 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _LINUX_COMPAT_ION_H
#define _LINUX_COMPAT_ION_H
#if IS_ENABLED(CONFIG_COMPAT)
long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
#else
#define compat_ion_ioctl NULL
#endif /* CONFIG_COMPAT */
#endif /* _LINUX_COMPAT_ION_H */

File diff suppressed because it is too large Load Diff

View File

@@ -1,241 +0,0 @@
/*
* drivers/staging/android/ion/ion.h
*
* Copyright (C) 2011 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _LINUX_ION_H
#define _LINUX_ION_H
#include <linux/types.h>
#include "uapi/ion.h"
struct ion_handle;
struct ion_device;
struct ion_heap;
struct ion_mapper;
struct ion_client;
struct ion_buffer;
/*
* This should be removed some day when phys_addr_t's are fully
* plumbed in the kernel, and all instances of ion_phys_addr_t should
* be converted to phys_addr_t. For the time being many kernel interfaces
* do not accept phys_addr_t's that would have to
*/
#define ion_phys_addr_t unsigned long
/**
* struct ion_platform_heap - defines a heap in the given platform
* @type: type of the heap from ion_heap_type enum
* @id: unique identifier for heap. When allocating higher numbers
* will be allocated from first. At allocation these are passed
* as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
* @name: used for debug purposes
* @base: base address of heap in physical memory if applicable
* @size: size of the heap in bytes if applicable
* @align: required alignment in physical memory if applicable
* @priv: private info passed from the board file
*
* Provided by the board file.
*/
struct ion_platform_heap {
enum ion_heap_type type;
unsigned int id;
const char *name;
ion_phys_addr_t base;
size_t size;
ion_phys_addr_t align;
void *priv;
};
/**
* struct ion_platform_data - array of platform heaps passed from board file
* @nr: number of structures in the array
* @heaps: array of platform_heap structions
*
* Provided by the board file in the form of platform data to a platform device.
*/
struct ion_platform_data {
int nr;
struct ion_platform_heap *heaps;
};
struct device *ion_device_get_platform(struct ion_device *idev);
void ion_device_set_platform(struct ion_device *idev,
struct device *dev);
/**
* ion_reserve() - reserve memory for ion heaps if applicable
* @data: platform data specifying starting physical address and
* size
*
* Calls memblock reserve to set aside memory for heaps that are
* located at specific memory addresses or of specific sizes not
* managed by the kernel
*/
void ion_reserve(struct ion_platform_data *data);
/**
* ion_client_create() - allocate a client and returns it
* @dev: the global ion device
* @name: used for debugging
*/
struct ion_client *ion_client_create(struct ion_device *dev,
const char *name);
/**
* ion_client_destroy() - free's a client and all it's handles
* @client: the client
*
* Free the provided client and all it's resources including
* any handles it is holding.
*/
void ion_client_destroy(struct ion_client *client);
/**
* ion_alloc_legency - allocate ion memory
* @client: the client
* @len: size of the allocation
* @align: requested allocation alignment, lots of hardware blocks
* have alignment requirements of some kind
* @heap_id_mask: mask of heaps to allocate from, if multiple bits are set
* heaps will be tried in order from highest to lowest
* id
* @flags: heap flags, the low 16 bits are consumed by ion, the
* high 16 bits are passed on to the respective heap and
* can be heap custom
*
* Allocate memory in one of the heaps provided in heap mask and return
* an opaque handle to it.
*/
struct ion_handle *ion_alloc_legency(struct ion_client *client, size_t len,
size_t align, unsigned int heap_id_mask,
unsigned int flags);
/**
* ion_free - free a handle
* @client: the client
* @handle: the handle to free
*
* Free the provided handle.
*/
void ion_free(struct ion_client *client, struct ion_handle *handle);
/**
* ion_phys - returns the physical address and len of a handle
* @client: the client
* @handle: the handle
* @addr: a pointer to put the address in
* @len: a pointer to put the length in
*
* This function queries the heap for a particular handle to get the
* handle's physical address. It't output is only correct if
* a heap returns physically contiguous memory -- in other cases
* this api should not be implemented -- ion_sg_table should be used
* instead. Returns -EINVAL if the handle is invalid. This has
* no implications on the reference counting of the handle --
* the returned value may not be valid if the caller is not
* holding a reference.
*/
int ion_phys(struct ion_client *client, struct ion_handle *handle,
ion_phys_addr_t *addr, size_t *len);
/**
* ion_map_dma - return an sg_table describing a handle
* @client: the client
* @handle: the handle
*
* This function returns the sg_table describing
* a particular ion handle.
*/
struct sg_table *ion_sg_table(struct ion_client *client,
struct ion_handle *handle);
/**
* ion_map_kernel - create mapping for the given handle
* @client: the client
* @handle: handle to map
*
* Map the given handle into the kernel and return a kernel address that
* can be used to access this address.
*/
void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
/**
* ion_unmap_kernel() - destroy a kernel mapping for a handle
* @client: the client
* @handle: handle to unmap
*/
void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
/**
* ion_share_dma_buf() - share buffer as dma-buf
* @client: the client
* @handle: the handle
*/
struct dma_buf *ion_share_dma_buf(struct ion_client *client,
struct ion_handle *handle);
/**
* ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd
* @client: the client
* @handle: the handle
*/
int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle);
/**
* ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
* @client: the client
* @fd: the dma-buf fd
*
* Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
* import that fd and return a handle representing it. If a dma-buf from
* another exporter is passed in this function will return ERR_PTR(-EINVAL)
*/
struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
struct device;
#ifdef CONFIG_RK_IOMMU
int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
struct ion_handle *handle, unsigned long *iova,
unsigned long *size);
void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
struct ion_handle *handle);
#else
static inline int ion_map_iommu(struct device *iommu_dev, struct ion_client *client,
struct ion_handle *handle, unsigned long *iova,
unsigned long *size)
{
return 0;
}
static inline void ion_unmap_iommu(struct device *iommu_dev, struct ion_client *client,
struct ion_handle *handle)
{
}
#endif
void ion_handle_get(struct ion_handle *handle);
int ion_handle_put(struct ion_handle *handle);
struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
int id);
#endif /* _LINUX_ION_H */

View File

@@ -1,193 +0,0 @@
/*
* drivers/staging/android/ion/ion_carveout_heap.c
*
* Copyright (C) 2011 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/genalloc.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "ion.h"
#include "ion_priv.h"
struct ion_carveout_heap {
struct ion_heap heap;
struct gen_pool *pool;
ion_phys_addr_t base;
};
ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
unsigned long size,
unsigned long align)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
if (!offset)
return ION_CARVEOUT_ALLOCATE_FAIL;
return offset;
}
void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
unsigned long size)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
return;
gen_pool_free(carveout_heap->pool, addr, size);
}
static int ion_carveout_heap_phys(struct ion_heap *heap,
struct ion_buffer *buffer,
ion_phys_addr_t *addr, size_t *len)
{
struct sg_table *table = buffer->priv_virt;
struct page *page = sg_page(table->sgl);
ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
*addr = paddr;
*len = buffer->size;
return 0;
}
static int ion_carveout_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long size, unsigned long align,
unsigned long flags)
{
struct sg_table *table;
ion_phys_addr_t paddr;
int ret;
if (align > PAGE_SIZE)
return -EINVAL;
table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!table)
return -ENOMEM;
ret = sg_alloc_table(table, 1, GFP_KERNEL);
if (ret)
goto err_free;
paddr = ion_carveout_allocate(heap, size, align);
if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
ret = -ENOMEM;
goto err_free_table;
}
sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
buffer->priv_virt = table;
return 0;
err_free_table:
sg_free_table(table);
err_free:
kfree(table);
return ret;
}
static void ion_carveout_heap_free(struct ion_buffer *buffer)
{
struct ion_heap *heap = buffer->heap;
struct sg_table *table = buffer->priv_virt;
struct page *page = sg_page(table->sgl);
ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
ion_heap_buffer_zero_legency(buffer);
if (ion_buffer_cached(buffer))
dma_sync_sg_for_device(NULL, table->sgl, table->nents,
DMA_BIDIRECTIONAL);
ion_carveout_free(heap, paddr, buffer->size);
sg_free_table(table);
kfree(table);
}
static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return buffer->priv_virt;
}
static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
}
static struct ion_heap_ops carveout_heap_ops = {
.allocate = ion_carveout_heap_allocate,
.free = ion_carveout_heap_free,
.phys = ion_carveout_heap_phys,
.map_dma = ion_carveout_heap_map_dma,
.unmap_dma = ion_carveout_heap_unmap_dma,
.map_user = ion_heap_map_user_legency,
.map_kernel = ion_heap_map_kernel_legency,
.unmap_kernel = ion_heap_unmap_kernel_legency,
};
struct ion_heap *ion_carveout_heap_create_legency(struct ion_platform_heap *heap_data)
{
struct ion_carveout_heap *carveout_heap;
int ret;
struct page *page;
size_t size;
page = pfn_to_page(PFN_DOWN(heap_data->base));
size = heap_data->size;
ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
ret = ion_heap_pages_zero_legency(page, size, pgprot_writecombine(PAGE_KERNEL));
if (ret)
return ERR_PTR(ret);
carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL);
if (!carveout_heap)
return ERR_PTR(-ENOMEM);
carveout_heap->pool = gen_pool_create(PAGE_SHIFT, -1);
if (!carveout_heap->pool) {
kfree(carveout_heap);
return ERR_PTR(-ENOMEM);
}
carveout_heap->base = heap_data->base;
gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
-1);
carveout_heap->heap.ops = &carveout_heap_ops;
carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
return &carveout_heap->heap;
}
void ion_carveout_heap_destroy(struct ion_heap *heap)
{
struct ion_carveout_heap *carveout_heap =
container_of(heap, struct ion_carveout_heap, heap);
gen_pool_destroy(carveout_heap->pool);
kfree(carveout_heap);
carveout_heap = NULL;
}

View File

@@ -1,194 +0,0 @@
/*
* drivers/staging/android/ion/ion_chunk_heap.c
*
* Copyright (C) 2012 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/genalloc.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "ion.h"
#include "ion_priv.h"
struct ion_chunk_heap {
struct ion_heap heap;
struct gen_pool *pool;
ion_phys_addr_t base;
unsigned long chunk_size;
unsigned long size;
unsigned long allocated;
};
static int ion_chunk_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long size, unsigned long align,
unsigned long flags)
{
struct ion_chunk_heap *chunk_heap =
container_of(heap, struct ion_chunk_heap, heap);
struct sg_table *table;
struct scatterlist *sg;
int ret, i;
unsigned long num_chunks;
unsigned long allocated_size;
if (align > chunk_heap->chunk_size)
return -EINVAL;
allocated_size = ALIGN(size, chunk_heap->chunk_size);
num_chunks = allocated_size / chunk_heap->chunk_size;
if (allocated_size > chunk_heap->size - chunk_heap->allocated)
return -ENOMEM;
table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!table)
return -ENOMEM;
ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
if (ret) {
kfree(table);
return ret;
}
sg = table->sgl;
for (i = 0; i < num_chunks; i++) {
unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
chunk_heap->chunk_size);
if (!paddr)
goto err;
sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
chunk_heap->chunk_size, 0);
sg = sg_next(sg);
}
buffer->priv_virt = table;
chunk_heap->allocated += allocated_size;
return 0;
err:
sg = table->sgl;
for (i -= 1; i >= 0; i--) {
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
sg->length);
sg = sg_next(sg);
}
sg_free_table(table);
kfree(table);
return -ENOMEM;
}
static void ion_chunk_heap_free(struct ion_buffer *buffer)
{
struct ion_heap *heap = buffer->heap;
struct ion_chunk_heap *chunk_heap =
container_of(heap, struct ion_chunk_heap, heap);
struct sg_table *table = buffer->priv_virt;
struct scatterlist *sg;
int i;
unsigned long allocated_size;
allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
ion_heap_buffer_zero_legency(buffer);
if (ion_buffer_cached(buffer))
dma_sync_sg_for_device(NULL, table->sgl, table->nents,
DMA_BIDIRECTIONAL);
for_each_sg(table->sgl, sg, table->nents, i) {
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
sg->length);
}
chunk_heap->allocated -= allocated_size;
sg_free_table(table);
kfree(table);
}
static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return buffer->priv_virt;
}
static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
}
static struct ion_heap_ops chunk_heap_ops = {
.allocate = ion_chunk_heap_allocate,
.free = ion_chunk_heap_free,
.map_dma = ion_chunk_heap_map_dma,
.unmap_dma = ion_chunk_heap_unmap_dma,
.map_user = ion_heap_map_user_legency,
.map_kernel = ion_heap_map_kernel_legency,
.unmap_kernel = ion_heap_unmap_kernel_legency,
};
struct ion_heap *ion_chunk_heap_create_legency(struct ion_platform_heap *heap_data)
{
struct ion_chunk_heap *chunk_heap;
int ret;
struct page *page;
size_t size;
page = pfn_to_page(PFN_DOWN(heap_data->base));
size = heap_data->size;
ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
ret = ion_heap_pages_zero_legency(page, size, pgprot_writecombine(PAGE_KERNEL));
if (ret)
return ERR_PTR(ret);
chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
if (!chunk_heap)
return ERR_PTR(-ENOMEM);
chunk_heap->chunk_size = (unsigned long)heap_data->priv;
chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
PAGE_SHIFT, -1);
if (!chunk_heap->pool) {
ret = -ENOMEM;
goto error_gen_pool_create;
}
chunk_heap->base = heap_data->base;
chunk_heap->size = heap_data->size;
chunk_heap->allocated = 0;
gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
chunk_heap->heap.ops = &chunk_heap_ops;
chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
pr_debug("%s: base %lu size %zu align %ld\n", __func__,
chunk_heap->base, heap_data->size, heap_data->align);
return &chunk_heap->heap;
error_gen_pool_create:
kfree(chunk_heap);
return ERR_PTR(ret);
}
void ion_chunk_heap_destroy(struct ion_heap *heap)
{
struct ion_chunk_heap *chunk_heap =
container_of(heap, struct ion_chunk_heap, heap);
gen_pool_destroy(chunk_heap->pool);
kfree(chunk_heap);
chunk_heap = NULL;
}

View File

@@ -1,239 +0,0 @@
/*
* drivers/staging/android/ion/ion_cma_heap.c
*
* Copyright (C) Linaro 2012
* Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
#ifdef CONFIG_RK_IOMMU
#include <linux/rockchip-iovmm.h>
#endif
#include "ion.h"
#include "ion_priv.h"
#define ION_CMA_ALLOCATE_FAILED -1
struct ion_cma_heap {
struct ion_heap heap;
struct device *dev;
};
#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
struct ion_cma_buffer_info {
void *cpu_addr;
dma_addr_t handle;
struct sg_table *table;
};
/* ION CMA heap operations functions */
static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
unsigned long len, unsigned long align,
unsigned long flags)
{
struct ion_cma_heap *cma_heap = to_cma_heap(heap);
struct device *dev = cma_heap->dev;
struct ion_cma_buffer_info *info;
if (buffer->flags & ION_FLAG_CACHED)
return -EINVAL;
if (align > PAGE_SIZE)
return -EINVAL;
info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
if (!info)
return ION_CMA_ALLOCATE_FAILED;
info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle),
GFP_HIGHUSER | __GFP_ZERO);
if (!info->cpu_addr) {
dev_err(dev, "Fail to allocate buffer\n");
goto err;
}
info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!info->table)
goto free_mem;
if (dma_get_sgtable(dev, info->table, info->cpu_addr, info->handle,
len))
goto free_table;
/* keep this for memory release */
buffer->priv_virt = info;
return 0;
free_table:
kfree(info->table);
free_mem:
dma_free_coherent(dev, len, info->cpu_addr, info->handle);
err:
kfree(info);
return ION_CMA_ALLOCATE_FAILED;
}
static void ion_cma_free(struct ion_buffer *buffer)
{
struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
struct device *dev = cma_heap->dev;
struct ion_cma_buffer_info *info = buffer->priv_virt;
/* release memory */
dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
/* release sg table */
sg_free_table(info->table);
kfree(info->table);
kfree(info);
}
/* return physical address in addr */
static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
ion_phys_addr_t *addr, size_t *len)
{
struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
struct device *dev = cma_heap->dev;
struct ion_cma_buffer_info *info = buffer->priv_virt;
dev_dbg(dev, "Return buffer %p physical address %pa\n", buffer,
&info->handle);
*addr = info->handle;
*len = buffer->size;
return 0;
}
static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
struct ion_cma_buffer_info *info = buffer->priv_virt;
return info->table;
}
static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
}
static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
struct vm_area_struct *vma)
{
struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
struct device *dev = cma_heap->dev;
struct ion_cma_buffer_info *info = buffer->priv_virt;
return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
buffer->size);
}
static void *ion_cma_map_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
struct ion_cma_buffer_info *info = buffer->priv_virt;
/* kernel memory mapping has been done at allocation time */
return info->cpu_addr;
}
static void ion_cma_unmap_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
}
#ifdef CONFIG_RK_IOMMU
static int ion_cma_map_iommu(struct ion_buffer *buffer,
struct device *iommu_dev,
struct ion_iommu_map *data,
unsigned long iova_length,
unsigned long flags)
{
int ret = 0;
struct ion_cma_buffer_info *info = buffer->priv_virt;
data->iova_addr = rockchip_iovmm_map(iommu_dev,
info->table->sgl,
0,
iova_length);
pr_debug("%s: map %pad -> %lx\n", __func__,
&info->table->sgl->dma_address,
data->iova_addr);
if (IS_ERR_VALUE(data->iova_addr)) {
pr_err("%s: failed: %lx\n", __func__, data->iova_addr);
ret = data->iova_addr;
goto out;
}
data->mapped_size = iova_length;
out:
return ret;
}
void ion_cma_unmap_iommu(struct device *iommu_dev, struct ion_iommu_map *data)
{
pr_debug("%s: unmap %x@%lx\n",
__func__,
data->mapped_size,
data->iova_addr);
rockchip_iovmm_unmap(iommu_dev, data->iova_addr);
}
#endif
static struct ion_heap_ops ion_cma_ops = {
.allocate = ion_cma_allocate,
.free = ion_cma_free,
.map_dma = ion_cma_heap_map_dma,
.unmap_dma = ion_cma_heap_unmap_dma,
.phys = ion_cma_phys,
.map_user = ion_cma_mmap,
.map_kernel = ion_cma_map_kernel,
.unmap_kernel = ion_cma_unmap_kernel,
#ifdef CONFIG_RK_IOMMU
.map_iommu = ion_cma_map_iommu,
.unmap_iommu = ion_cma_unmap_iommu,
#endif
};
struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
{
struct ion_cma_heap *cma_heap;
cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL);
if (!cma_heap)
return ERR_PTR(-ENOMEM);
cma_heap->heap.ops = &ion_cma_ops;
/*
* get device from private heaps data, later it will be
* used to make the link with reserved CMA memory
*/
cma_heap->dev = data->priv;
cma_heap->heap.type = ION_HEAP_TYPE_DMA;
return &cma_heap->heap;
}
void ion_cma_heap_destroy(struct ion_heap *heap)
{
struct ion_cma_heap *cma_heap = to_cma_heap(heap);
kfree(cma_heap);
}

View File

@@ -1,386 +0,0 @@
/*
* drivers/staging/android/ion/ion_heap.c
*
* Copyright (C) 2011 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/err.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/mm.h>
#include <linux/rtmutex.h>
#include <linux/sched.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
#include <uapi/linux/sched/types.h>
#include "ion.h"
#include "ion_priv.h"
void *ion_heap_map_kernel_legency(struct ion_heap *heap,
struct ion_buffer *buffer)
{
struct scatterlist *sg;
int i, j;
void *vaddr;
pgprot_t pgprot;
struct sg_table *table = buffer->sg_table;
int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
struct page **pages = vmalloc(sizeof(struct page *) * npages);
struct page **tmp = pages;
if (!pages)
return ERR_PTR(-ENOMEM);
if (buffer->flags & ION_FLAG_CACHED)
pgprot = PAGE_KERNEL;
else
pgprot = pgprot_writecombine(PAGE_KERNEL);
for_each_sg(table->sgl, sg, table->nents, i) {
int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
struct page *page = sg_page(sg);
BUG_ON(i >= npages);
for (j = 0; j < npages_this_entry; j++)
*(tmp++) = page++;
}
vaddr = vmap(pages, npages, VM_MAP, pgprot);
vfree(pages);
if (!vaddr)
return ERR_PTR(-ENOMEM);
return vaddr;
}
void ion_heap_unmap_kernel_legency(struct ion_heap *heap,
struct ion_buffer *buffer)
{
vunmap(buffer->vaddr);
}
int ion_heap_map_user_legency(struct ion_heap *heap, struct ion_buffer *buffer,
struct vm_area_struct *vma)
{
struct sg_table *table = buffer->sg_table;
unsigned long addr = vma->vm_start;
unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
struct scatterlist *sg;
int i;
int ret;
for_each_sg(table->sgl, sg, table->nents, i) {
struct page *page = sg_page(sg);
unsigned long remainder = vma->vm_end - addr;
unsigned long len = sg->length;
if (offset >= sg->length) {
offset -= sg->length;
continue;
} else if (offset) {
page += offset / PAGE_SIZE;
len = sg->length - offset;
offset = 0;
}
len = min(len, remainder);
ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
vma->vm_page_prot);
if (ret)
return ret;
addr += len;
if (addr >= vma->vm_end)
return 0;
}
return 0;
}
static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
{
void *addr = vm_map_ram(pages, num, -1, pgprot);
if (!addr)
return -ENOMEM;
memset(addr, 0, PAGE_SIZE * num);
vm_unmap_ram(addr, num);
return 0;
}
static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
pgprot_t pgprot)
{
int p = 0;
int ret = 0;
struct sg_page_iter piter;
struct page *pages[32];
for_each_sg_page(sgl, &piter, nents, 0) {
pages[p++] = sg_page_iter_page(&piter);
if (p == ARRAY_SIZE(pages)) {
ret = ion_heap_clear_pages(pages, p, pgprot);
if (ret)
return ret;
p = 0;
}
}
if (p)
ret = ion_heap_clear_pages(pages, p, pgprot);
return ret;
}
int ion_heap_buffer_zero_legency(struct ion_buffer *buffer)
{
struct sg_table *table = buffer->sg_table;
pgprot_t pgprot;
if (buffer->flags & ION_FLAG_CACHED)
pgprot = PAGE_KERNEL;
else
pgprot = pgprot_writecombine(PAGE_KERNEL);
return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
}
int ion_heap_pages_zero_legency(struct page *page, size_t size, pgprot_t pgprot)
{
struct scatterlist sg;
sg_init_table(&sg, 1);
sg_set_page(&sg, page, size, 0);
return ion_heap_sglist_zero(&sg, 1, pgprot);
}
void ion_heap_freelist_add_legency(struct ion_heap *heap, struct ion_buffer *buffer)
{
spin_lock(&heap->free_lock);
list_add(&buffer->list, &heap->free_list);
heap->free_list_size += buffer->size;
spin_unlock(&heap->free_lock);
wake_up(&heap->waitqueue);
}
size_t ion_heap_freelist_size_legency(struct ion_heap *heap)
{
size_t size;
spin_lock(&heap->free_lock);
size = heap->free_list_size;
spin_unlock(&heap->free_lock);
return size;
}
static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
bool skip_pools)
{
struct ion_buffer *buffer;
size_t total_drained = 0;
if (ion_heap_freelist_size_legency(heap) == 0)
return 0;
spin_lock(&heap->free_lock);
if (size == 0)
size = heap->free_list_size;
while (!list_empty(&heap->free_list)) {
if (total_drained >= size)
break;
buffer = list_first_entry(&heap->free_list, struct ion_buffer,
list);
list_del(&buffer->list);
heap->free_list_size -= buffer->size;
if (skip_pools)
buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
total_drained += buffer->size;
spin_unlock(&heap->free_lock);
ion_buffer_destroy_legency(buffer);
spin_lock(&heap->free_lock);
}
spin_unlock(&heap->free_lock);
return total_drained;
}
size_t ion_heap_freelist_drain_legency(struct ion_heap *heap, size_t size)
{
return _ion_heap_freelist_drain(heap, size, false);
}
size_t ion_heap_freelist_shrink_legency(struct ion_heap *heap, size_t size)
{
return _ion_heap_freelist_drain(heap, size, true);
}
static int ion_heap_deferred_free(void *data)
{
struct ion_heap *heap = data;
while (true) {
struct ion_buffer *buffer;
wait_event_freezable(heap->waitqueue,
ion_heap_freelist_size_legency(heap) > 0);
spin_lock(&heap->free_lock);
if (list_empty(&heap->free_list)) {
spin_unlock(&heap->free_lock);
continue;
}
buffer = list_first_entry(&heap->free_list, struct ion_buffer,
list);
list_del(&buffer->list);
heap->free_list_size -= buffer->size;
spin_unlock(&heap->free_lock);
ion_buffer_destroy_legency(buffer);
}
return 0;
}
int ion_heap_init_deferred_free_legency(struct ion_heap *heap)
{
struct sched_param param = { .sched_priority = 0 };
INIT_LIST_HEAD(&heap->free_list);
init_waitqueue_head(&heap->waitqueue);
heap->task = kthread_run(ion_heap_deferred_free, heap,
"%s", heap->name);
if (IS_ERR(heap->task)) {
pr_err("%s: creating thread for deferred free failed\n",
__func__);
return PTR_ERR_OR_ZERO(heap->task);
}
sched_setscheduler(heap->task, SCHED_IDLE, &param);
return 0;
}
static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
struct shrink_control *sc)
{
struct ion_heap *heap = container_of(shrinker, struct ion_heap,
shrinker);
int total = 0;
total = ion_heap_freelist_size_legency(heap) / PAGE_SIZE;
if (heap->ops->shrink)
total += heap->ops->shrink(heap, sc->gfp_mask, 0);
return total;
}
static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
struct shrink_control *sc)
{
struct ion_heap *heap = container_of(shrinker, struct ion_heap,
shrinker);
int freed = 0;
int to_scan = sc->nr_to_scan;
if (to_scan == 0)
return 0;
/*
* shrink the free list first, no point in zeroing the memory if we're
* just going to reclaim it. Also, skip any possible page pooling.
*/
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
freed = ion_heap_freelist_shrink_legency(heap, to_scan * PAGE_SIZE) /
PAGE_SIZE;
to_scan -= freed;
if (to_scan <= 0)
return freed;
if (heap->ops->shrink)
freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
return freed;
}
void ion_heap_init_shrinker_legency(struct ion_heap *heap)
{
heap->shrinker.count_objects = ion_heap_shrink_count;
heap->shrinker.scan_objects = ion_heap_shrink_scan;
heap->shrinker.seeks = DEFAULT_SEEKS;
heap->shrinker.batch = 0;
register_shrinker(&heap->shrinker);
}
struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
{
struct ion_heap *heap = NULL;
switch (heap_data->type) {
case ION_HEAP_TYPE_SYSTEM_CONTIG:
pr_err("%s: Heap type is disabled: %d\n", __func__,
heap_data->type);
return ERR_PTR(-EINVAL);
case ION_HEAP_TYPE_SYSTEM:
heap = ion_system_heap_create_legency(heap_data);
break;
case ION_HEAP_TYPE_CARVEOUT:
heap = ion_carveout_heap_create_legency(heap_data);
break;
case ION_HEAP_TYPE_CHUNK:
heap = ion_chunk_heap_create_legency(heap_data);
break;
case ION_HEAP_TYPE_DMA:
heap = ion_cma_heap_create(heap_data);
break;
default:
pr_err("%s: Invalid heap type %d\n", __func__,
heap_data->type);
return ERR_PTR(-EINVAL);
}
if (IS_ERR_OR_NULL(heap)) {
pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
__func__, heap_data->name, heap_data->type,
heap_data->base, heap_data->size);
return ERR_PTR(-EINVAL);
}
heap->name = heap_data->name;
heap->id = heap_data->id;
return heap;
}
EXPORT_SYMBOL(ion_heap_create);
void ion_heap_destroy(struct ion_heap *heap)
{
if (!heap)
return;
switch (heap->type) {
case ION_HEAP_TYPE_SYSTEM_CONTIG:
pr_err("%s: Heap type is disabled: %d\n", __func__,
heap->type);
break;
case ION_HEAP_TYPE_SYSTEM:
ion_system_heap_destroy(heap);
break;
case ION_HEAP_TYPE_CARVEOUT:
ion_carveout_heap_destroy(heap);
break;
case ION_HEAP_TYPE_CHUNK:
ion_chunk_heap_destroy(heap);
break;
case ION_HEAP_TYPE_DMA:
ion_cma_heap_destroy(heap);
break;
default:
pr_err("%s: Invalid heap type %d\n", __func__,
heap->type);
}
}
EXPORT_SYMBOL(ion_heap_destroy);

View File

@@ -1,185 +0,0 @@
/*
* drivers/staging/android/ion/ion_mem_pool.c
*
* Copyright (C) 2011 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include "ion_priv.h"
static void *ion_page_pool_alloc_legency_pages(struct ion_page_pool *pool)
{
struct page *page = alloc_pages(pool->gfp_mask, pool->order);
if (!page)
return NULL;
ion_page_pool_alloc_legency_set_cache_policy(pool, page);
ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
DMA_BIDIRECTIONAL);
return page;
}
static void ion_page_pool_free_legency_pages(struct ion_page_pool *pool,
struct page *page)
{
ion_page_pool_free_legency_set_cache_policy(pool, page);
__free_pages(page, pool->order);
}
static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
{
mutex_lock(&pool->mutex);
if (PageHighMem(page)) {
list_add_tail(&page->lru, &pool->high_items);
pool->high_count++;
} else {
list_add_tail(&page->lru, &pool->low_items);
pool->low_count++;
}
mutex_unlock(&pool->mutex);
return 0;
}
static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
{
struct page *page;
if (high) {
BUG_ON(!pool->high_count);
page = list_first_entry(&pool->high_items, struct page, lru);
pool->high_count--;
} else {
BUG_ON(!pool->low_count);
page = list_first_entry(&pool->low_items, struct page, lru);
pool->low_count--;
}
list_del(&page->lru);
return page;
}
struct page *ion_page_pool_alloc_legency(struct ion_page_pool *pool)
{
struct page *page = NULL;
BUG_ON(!pool);
mutex_lock(&pool->mutex);
if (pool->high_count)
page = ion_page_pool_remove(pool, true);
else if (pool->low_count)
page = ion_page_pool_remove(pool, false);
mutex_unlock(&pool->mutex);
if (!page)
page = ion_page_pool_alloc_legency_pages(pool);
return page;
}
void ion_page_pool_free_legency(struct ion_page_pool *pool, struct page *page)
{
int ret;
BUG_ON(pool->order != compound_order(page));
ret = ion_page_pool_add(pool, page);
if (ret)
ion_page_pool_free_legency_pages(pool, page);
}
void ion_page_pool_free_legency_immediate(struct ion_page_pool *pool, struct page *page)
{
ion_page_pool_free_legency_pages(pool, page);
}
static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
{
int count = pool->low_count;
if (high)
count += pool->high_count;
return count << pool->order;
}
int ion_page_pool_shrink_legency(struct ion_page_pool *pool, gfp_t gfp_mask,
int nr_to_scan)
{
int freed = 0;
bool high;
if (current_is_kswapd())
high = true;
else
high = !!(gfp_mask & __GFP_HIGHMEM);
if (nr_to_scan == 0)
return ion_page_pool_total(pool, high);
while (freed < nr_to_scan) {
struct page *page;
mutex_lock(&pool->mutex);
if (pool->low_count) {
page = ion_page_pool_remove(pool, false);
} else if (high && pool->high_count) {
page = ion_page_pool_remove(pool, true);
} else {
mutex_unlock(&pool->mutex);
break;
}
mutex_unlock(&pool->mutex);
ion_page_pool_free_legency_pages(pool, page);
freed += (1 << pool->order);
}
return freed;
}
struct ion_page_pool *ion_page_pool_create_legency(gfp_t gfp_mask, unsigned int order)
{
struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool),
GFP_KERNEL);
if (!pool)
return NULL;
pool->high_count = 0;
pool->low_count = 0;
INIT_LIST_HEAD(&pool->low_items);
INIT_LIST_HEAD(&pool->high_items);
pool->gfp_mask = gfp_mask | __GFP_COMP;
pool->order = order;
mutex_init(&pool->mutex);
plist_node_init(&pool->list, order);
return pool;
}
void ion_page_pool_destroy_legency(struct ion_page_pool *pool)
{
kfree(pool);
}
static int __init ion_page_pool_init(void)
{
return 0;
}
device_initcall(ion_page_pool_init);

View File

@@ -1,478 +0,0 @@
/*
* drivers/staging/android/ion/ion_priv.h
*
* Copyright (C) 2011 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _ION_PRIV_H
#define _ION_PRIV_H
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/kref.h>
#include <linux/mm_types.h>
#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/shrinker.h>
#include <linux/types.h>
#ifdef CONFIG_ION_POOL_CACHE_POLICY
#include <asm/cacheflush.h>
#endif
#include "ion.h"
struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
#ifdef CONFIG_RK_IOMMU
/**
* struct ion_iommu_map - represents a mapping of an ion buffer to an iommu
* @iova_addr - iommu virtual address
* @node - rb node to exist in the buffer's tree of iommu mappings
* @key - contains the iommu device info
* @ref - for reference counting this mapping
* @mapped_size - size of the iova space mapped
* (may not be the same as the buffer size)
*
* Represents a mapping of one ion buffer to a particular iommu domain
* and address range. There may exist other mappings of this buffer in
* different domains or address ranges. All mappings will have the same
* cacheability and security.
*/
struct ion_iommu_map {
unsigned long iova_addr;
struct rb_node node;
unsigned long key;
struct ion_buffer *buffer;
struct kref ref;
int mapped_size;
};
#endif
/**
* struct ion_buffer - metadata for a particular buffer
* @ref: reference count
* @node: node in the ion_device buffers tree
* @dev: back pointer to the ion_device
* @heap: back pointer to the heap the buffer came from
* @flags: buffer specific flags
* @private_flags: internal buffer specific flags
* @size: size of the buffer
* @priv_virt: private data to the buffer representable as
* a void *
* @priv_phys: private data to the buffer representable as
* an ion_phys_addr_t (and someday a phys_addr_t)
* @lock: protects the buffers cnt fields
* @kmap_cnt: number of times the buffer is mapped to the kernel
* @vaddr: the kernel mapping if kmap_cnt is not zero
* @dmap_cnt: number of times the buffer is mapped for dma
* @sg_table: the sg table for the buffer if dmap_cnt is not zero
* @pages: flat array of pages in the buffer -- used by fault
* handler and only valid for buffers that are faulted in
* @vmas: list of vma's mapping this buffer
* @handle_count: count of handles referencing this buffer
* @task_comm: taskcomm of last client to reference this buffer in a
* handle, used for debugging
* @pid: pid of last client to reference this buffer in a
* handle, used for debugging
*/
struct ion_buffer {
struct kref ref;
union {
struct rb_node node;
struct list_head list;
};
struct ion_device *dev;
struct ion_heap *heap;
unsigned long flags;
unsigned long private_flags;
size_t size;
union {
void *priv_virt;
ion_phys_addr_t priv_phys;
};
struct mutex lock;
int kmap_cnt;
void *vaddr;
int dmap_cnt;
struct sg_table *sg_table;
struct page **pages;
struct list_head vmas;
/* used to track orphaned buffers */
int handle_count;
char task_comm[TASK_COMM_LEN];
pid_t pid;
#ifdef CONFIG_RK_IOMMU
unsigned int iommu_map_cnt;
struct rb_root iommu_maps;
#endif
};
void ion_buffer_destroy_legency(struct ion_buffer *buffer);
/**
* struct ion_heap_ops - ops to operate on a given heap
* @allocate: allocate memory
* @free: free memory
* @phys get physical address of a buffer (only define on
* physically contiguous heaps)
* @map_dma map the memory for dma to a scatterlist
* @unmap_dma unmap the memory for dma
* @map_kernel map memory to the kernel
* @unmap_kernel unmap memory to the kernel
* @map_user map memory to userspace
*
* allocate, phys, and map_user return 0 on success, -errno on error.
* map_dma and map_kernel return pointer on success, ERR_PTR on
* error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
* the buffer's private_flags when called from a shrinker. In that
* case, the pages being free'd must be truly free'd back to the
* system, not put in a page pool or otherwise cached.
*/
struct ion_heap_ops {
int (*allocate)(struct ion_heap *heap,
struct ion_buffer *buffer, unsigned long len,
unsigned long align, unsigned long flags);
void (*free)(struct ion_buffer *buffer);
int (*phys)(struct ion_heap *heap, struct ion_buffer *buffer,
ion_phys_addr_t *addr, size_t *len);
struct sg_table * (*map_dma)(struct ion_heap *heap,
struct ion_buffer *buffer);
void (*unmap_dma)(struct ion_heap *heap, struct ion_buffer *buffer);
void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
struct vm_area_struct *vma);
int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
#ifdef CONFIG_RK_IOMMU
int (*map_iommu)(struct ion_buffer *buffer,
struct device *iommu_dev,
struct ion_iommu_map *map_data,
unsigned long iova_length,
unsigned long flags);
void (*unmap_iommu)(struct device *iommu_dev,
struct ion_iommu_map *data);
#endif
};
/**
* heap flags - flags between the heaps and core ion code
*/
#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
/**
* private flags - flags internal to ion
*/
/*
* Buffer is being freed from a shrinker function. Skip any possible
* heap-specific caching mechanism (e.g. page pools). Guarantees that
* any buffer storage that came from the system allocator will be
* returned to the system allocator.
*/
#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0)
/**
* struct ion_heap - represents a heap in the system
* @node: rb node to put the heap on the device's tree of heaps
* @dev: back pointer to the ion_device
* @type: type of heap
* @ops: ops struct as above
* @flags: flags
* @id: id of heap, also indicates priority of this heap when
* allocating. These are specified by platform data and
* MUST be unique
* @name: used for debugging
* @shrinker: a shrinker for the heap
* @free_list: free list head if deferred free is used
* @free_list_size size of the deferred free list in bytes
* @lock: protects the free list
* @waitqueue: queue to wait on from deferred free thread
* @task: task struct of deferred free thread
* @debug_show: called when heap debug file is read to add any
* heap specific debug info to output
*
* Represents a pool of memory from which buffers can be made. In some
* systems the only heap is regular system memory allocated via vmalloc.
* On others, some blocks might require large physically contiguous buffers
* that are allocated from a specially reserved heap.
*/
struct ion_heap {
struct plist_node node;
struct ion_device *dev;
enum ion_heap_type type;
struct ion_heap_ops *ops;
unsigned long flags;
unsigned int id;
const char *name;
struct shrinker shrinker;
struct list_head free_list;
size_t free_list_size;
spinlock_t free_lock;
wait_queue_head_t waitqueue;
struct task_struct *task;
int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
};
/**
* ion_buffer_cached - this ion buffer is cached
* @buffer: buffer
*
* indicates whether this ion buffer is cached
*/
bool ion_buffer_cached(struct ion_buffer *buffer);
/**
* ion_buffer_fault_user_mappings - fault in user mappings of this buffer
* @buffer: buffer
*
* indicates whether userspace mappings of this buffer will be faulted
* in, this can affect how buffers are allocated from the heap.
*/
bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
/**
* ion_device_create - allocates and returns an ion device
* @custom_ioctl: arch specific ioctl function if applicable
*
* returns a valid device or -PTR_ERR
*/
struct ion_device *ion_device_create(long (*custom_ioctl)
(struct ion_client *client,
unsigned int cmd,
unsigned long arg));
/**
* ion_device_destroy - free and device and it's resource
* @dev: the device
*/
void ion_device_destroy(struct ion_device *dev);
/**
* ion_device_add_heap_legency - adds a heap to the ion device
* @dev: the device
* @heap: the heap to add
*/
void ion_device_add_heap_legency(struct ion_device *dev, struct ion_heap *heap);
/**
* some helpers for common operations on buffers using the sg_table
* and vaddr fields
*/
void *ion_heap_map_kernel_legency(struct ion_heap *, struct ion_buffer *);
void ion_heap_unmap_kernel_legency(struct ion_heap *, struct ion_buffer *);
int ion_heap_map_user_legency(struct ion_heap *, struct ion_buffer *,
struct vm_area_struct *);
int ion_heap_buffer_zero_legency(struct ion_buffer *buffer);
int ion_heap_pages_zero_legency(struct page *page, size_t size, pgprot_t pgprot);
/**
* ion_heap_init_shrinker_legency
* @heap: the heap
*
* If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
* this function will be called to setup a shrinker to shrink the freelists
* and call the heap's shrink op.
*/
void ion_heap_init_shrinker_legency(struct ion_heap *heap);
/**
* ion_heap_init_deferred_free_legency -- initialize deferred free functionality
* @heap: the heap
*
* If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
* be called to setup deferred frees. Calls to free the buffer will
* return immediately and the actual free will occur some time later
*/
int ion_heap_init_deferred_free_legency(struct ion_heap *heap);
/**
* ion_heap_freelist_add_legency - add a buffer to the deferred free list
* @heap: the heap
* @buffer: the buffer
*
* Adds an item to the deferred freelist.
*/
void ion_heap_freelist_add_legency(struct ion_heap *heap, struct ion_buffer *buffer);
/**
* ion_heap_freelist_drain_legency - drain the deferred free list
* @heap: the heap
* @size: amount of memory to drain in bytes
*
* Drains the indicated amount of memory from the deferred freelist immediately.
* Returns the total amount freed. The total freed may be higher depending
* on the size of the items in the list, or lower if there is insufficient
* total memory on the freelist.
*/
size_t ion_heap_freelist_drain_legency(struct ion_heap *heap, size_t size);
/**
* ion_heap_freelist_shrink_legency - drain the deferred free
* list, skipping any heap-specific
* pooling or caching mechanisms
*
* @heap: the heap
* @size: amount of memory to drain in bytes
*
* Drains the indicated amount of memory from the deferred freelist immediately.
* Returns the total amount freed. The total freed may be higher depending
* on the size of the items in the list, or lower if there is insufficient
* total memory on the freelist.
*
* Unlike with @ion_heap_freelist_drain_legency, don't put any pages back into
* page pools or otherwise cache the pages. Everything must be
* genuinely free'd back to the system. If you're free'ing from a
* shrinker you probably want to use this. Note that this relies on
* the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
* flag.
*/
size_t ion_heap_freelist_shrink_legency(struct ion_heap *heap,
size_t size);
/**
* ion_heap_freelist_size_legency - returns the size of the freelist in bytes
* @heap: the heap
*/
size_t ion_heap_freelist_size_legency(struct ion_heap *heap);
/**
* functions for creating and destroying the built in ion heaps.
* architectures can add their own custom architecture specific
* heaps as appropriate.
*/
struct ion_heap *ion_heap_create(struct ion_platform_heap *);
void ion_heap_destroy(struct ion_heap *);
struct ion_heap *ion_system_heap_create_legency(struct ion_platform_heap *);
void ion_system_heap_destroy(struct ion_heap *);
struct ion_heap *ion_system_contig_heap_create_legency(struct ion_platform_heap *);
void ion_system_contig_heap_destroy(struct ion_heap *);
struct ion_heap *ion_carveout_heap_create_legency(struct ion_platform_heap *);
void ion_carveout_heap_destroy(struct ion_heap *);
struct ion_heap *ion_chunk_heap_create_legency(struct ion_platform_heap *);
void ion_chunk_heap_destroy(struct ion_heap *);
struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
void ion_cma_heap_destroy(struct ion_heap *);
/**
* kernel api to allocate/free from carveout -- used when carveout is
* used to back an architecture specific custom heap
*/
ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
unsigned long align);
void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
unsigned long size);
/**
* The carveout heap returns physical addresses, since 0 may be a valid
* physical address, this is used to indicate allocation failed
*/
#define ION_CARVEOUT_ALLOCATE_FAIL -1
/**
* functions for creating and destroying a heap pool -- allows you
* to keep a pool of pre allocated memory to use from your heap. Keeping
* a pool of memory that is ready for dma, ie any cached mapping have been
* invalidated from the cache, provides a significant performance benefit on
* many systems
*/
/**
* struct ion_page_pool - pagepool struct
* @high_count: number of highmem items in the pool
* @low_count: number of lowmem items in the pool
* @high_items: list of highmem items
* @low_items: list of lowmem items
* @mutex: lock protecting this struct and especially the count
* item list
* @gfp_mask: gfp_mask to use from alloc
* @order: order of pages in the pool
* @list: plist node for list of pools
*
* Allows you to keep a pool of pre allocated pages to use from your heap.
* Keeping a pool of pages that is ready for dma, ie any cached mapping have
* been invalidated from the cache, provides a significant performance benefit
* on many systems
*/
struct ion_page_pool {
int high_count;
int low_count;
struct list_head high_items;
struct list_head low_items;
struct mutex mutex;
gfp_t gfp_mask;
unsigned int order;
struct plist_node list;
};
struct ion_page_pool *ion_page_pool_create_legency(gfp_t gfp_mask, unsigned int order);
void ion_page_pool_destroy_legency(struct ion_page_pool *);
struct page *ion_page_pool_alloc_legency(struct ion_page_pool *);
void ion_page_pool_free_legency(struct ion_page_pool *, struct page *);
void ion_page_pool_free_legency_immediate(struct ion_page_pool *, struct page *);
#ifdef CONFIG_ION_POOL_CACHE_POLICY
static inline void ion_page_pool_alloc_legency_set_cache_policy
(struct ion_page_pool *pool,
struct page *page){
void *va = page_address(page);
if (va)
set_memory_wc((unsigned long)va, 1 << pool->order);
}
static inline void ion_page_pool_free_legency_set_cache_policy
(struct ion_page_pool *pool,
struct page *page){
void *va = page_address(page);
if (va)
set_memory_wb((unsigned long)va, 1 << pool->order);
}
#else
static inline void ion_page_pool_alloc_legency_set_cache_policy
(struct ion_page_pool *pool,
struct page *page){ }
static inline void ion_page_pool_free_legency_set_cache_policy
(struct ion_page_pool *pool,
struct page *page){ }
#endif
/** ion_page_pool_shrink_legency - shrinks the size of the memory cached in the pool
* @pool: the pool
* @gfp_mask: the memory type to reclaim
* @nr_to_scan: number of items to shrink in pages
*
* returns the number of items freed in pages
*/
int ion_page_pool_shrink_legency(struct ion_page_pool *pool, gfp_t gfp_mask,
int nr_to_scan);
/**
* ion_pages_sync_for_device - cache flush pages for use with the specified
* device
* @dev: the device the pages will be used with
* @page: the first page to be flushed
* @size: size in bytes of region to be flushed
* @dir: direction of dma transfer
*/
void ion_pages_sync_for_device(struct device *dev, struct page *page,
size_t size, enum dma_data_direction dir);
#endif /* _ION_PRIV_H */

View File

@@ -1,537 +0,0 @@
/*
* drivers/staging/android/ion/ion_system_heap.c
*
* Copyright (C) 2011 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <asm/page.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/rockchip-iovmm.h>
#include "ion.h"
#include "ion_priv.h"
static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
__GFP_NORETRY) & ~__GFP_RECLAIM;
static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
static const unsigned int orders[] = {8, 4, 0};
static const int num_orders = ARRAY_SIZE(orders);
static int order_to_index(unsigned int order)
{
int i;
for (i = 0; i < num_orders; i++)
if (order == orders[i])
return i;
BUG();
return -1;
}
static inline unsigned int order_to_size(int order)
{
return PAGE_SIZE << order;
}
struct ion_system_heap {
struct ion_heap heap;
struct ion_page_pool *pools[0];
};
static struct page *alloc_buffer_page(struct ion_system_heap *heap,
struct ion_buffer *buffer,
unsigned long order)
{
bool cached = ion_buffer_cached(buffer);
struct ion_page_pool *pool = heap->pools[order_to_index(order)];
struct page *page;
if (!cached) {
page = ion_page_pool_alloc_legency(pool);
} else {
gfp_t gfp_flags = low_order_gfp_flags;
if (order > 4)
gfp_flags = high_order_gfp_flags;
page = alloc_pages(gfp_flags | __GFP_COMP, order);
if (!page)
return NULL;
ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
DMA_BIDIRECTIONAL);
}
return page;
}
static void free_buffer_page(struct ion_system_heap *heap,
struct ion_buffer *buffer, struct page *page)
{
unsigned int order = compound_order(page);
bool cached = ion_buffer_cached(buffer);
if (!cached) {
struct ion_page_pool *pool = heap->pools[order_to_index(order)];
if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)
ion_page_pool_free_legency_immediate(pool, page);
else
ion_page_pool_free_legency(pool, page);
} else {
__free_pages(page, order);
}
}
static struct page *alloc_largest_available(struct ion_system_heap *heap,
struct ion_buffer *buffer,
unsigned long size,
unsigned int max_order)
{
struct page *page;
int i;
for (i = 0; i < num_orders; i++) {
if (size < order_to_size(orders[i]))
continue;
if (max_order < orders[i])
continue;
page = alloc_buffer_page(heap, buffer, orders[i]);
if (!page)
continue;
return page;
}
return NULL;
}
static int ion_system_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long size, unsigned long align,
unsigned long flags)
{
struct ion_system_heap *sys_heap = container_of(heap,
struct ion_system_heap,
heap);
struct sg_table *table;
struct scatterlist *sg;
struct list_head pages;
struct page *page, *tmp_page;
int i = 0;
unsigned long size_remaining = PAGE_ALIGN(size);
unsigned int max_order = orders[0];
struct list_head lists[8];
unsigned int block_index[8] = {0};
unsigned int block_1M = 0;
unsigned int block_64K = 0;
unsigned int maximum;
int j;
if (align > PAGE_SIZE)
return -EINVAL;
if (size / PAGE_SIZE > totalram_pages / 2)
return -ENOMEM;
INIT_LIST_HEAD(&pages);
for (i = 0; i < 8; i++)
INIT_LIST_HEAD(&lists[i]);
i = 0;
while (size_remaining > 0) {
page = alloc_largest_available(sys_heap, buffer, size_remaining,
max_order);
if (!page)
goto free_pages;
size_remaining -= PAGE_SIZE << compound_order(page);
max_order = compound_order(page);
if (max_order) {
if (max_order == 8)
block_1M++;
if (max_order == 4)
block_64K++;
list_add_tail(&page->lru, &pages);
} else {
dma_addr_t phys = page_to_phys(page);
unsigned int bit12_14 = (phys >> 12) & 0x7;
list_add_tail(&page->lru, &lists[bit12_14]);
block_index[bit12_14]++;
}
i++;
}
pr_debug("%s, %d, i = %d, size = %ld\n", __func__, __LINE__, i, size);
table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!table)
goto free_pages;
if (sg_alloc_table(table, i, GFP_KERNEL))
goto free_table;
maximum = block_index[0];
for (i = 1; i < 8; i++)
maximum = max(maximum, block_index[i]);
pr_debug("%s, %d, maximum = %d, block_1M = %d, block_64K = %d\n",
__func__, __LINE__, maximum, block_1M, block_64K);
for (i = 0; i < 8; i++)
pr_debug("block_index[%d] = %d\n", i, block_index[i]);
sg = table->sgl;
list_for_each_entry_safe(page, tmp_page, &pages, lru) {
sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
sg = sg_next(sg);
list_del(&page->lru);
}
for (i = 0; i < maximum; i++) {
for (j = 0; j < 8; j++) {
if (!list_empty(&lists[j])) {
page = list_first_entry(&lists[j], struct page,
lru);
sg_set_page(sg, page, PAGE_SIZE, 0);
sg = sg_next(sg);
list_del(&page->lru);
}
}
}
buffer->priv_virt = table;
return 0;
free_table:
kfree(table);
free_pages:
list_for_each_entry_safe(page, tmp_page, &pages, lru)
free_buffer_page(sys_heap, buffer, page);
for (i = 0; i < 8; i++) {
list_for_each_entry_safe(page, tmp_page, &lists[i], lru)
free_buffer_page(sys_heap, buffer, page);
}
return -ENOMEM;
}
static void ion_system_heap_free(struct ion_buffer *buffer)
{
struct ion_system_heap *sys_heap = container_of(buffer->heap,
struct ion_system_heap,
heap);
struct sg_table *table = buffer->sg_table;
bool cached = ion_buffer_cached(buffer);
struct scatterlist *sg;
int i;
/*
* uncached pages come from the page pools, zero them before returning
* for security purposes (other allocations are zerod at
* alloc time
*/
if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
ion_heap_buffer_zero_legency(buffer);
for_each_sg(table->sgl, sg, table->nents, i)
free_buffer_page(sys_heap, buffer, sg_page(sg));
sg_free_table(table);
kfree(table);
}
static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return buffer->priv_virt;
}
static void ion_system_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
}
static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
int nr_to_scan)
{
struct ion_system_heap *sys_heap;
int nr_total = 0;
int i, nr_freed;
int only_scan = 0;
sys_heap = container_of(heap, struct ion_system_heap, heap);
if (!nr_to_scan)
only_scan = 1;
for (i = 0; i < num_orders; i++) {
struct ion_page_pool *pool = sys_heap->pools[i];
nr_freed = ion_page_pool_shrink_legency(pool, gfp_mask, nr_to_scan);
nr_total += nr_freed;
if (!only_scan) {
nr_to_scan -= nr_freed;
/* shrink completed */
if (nr_to_scan <= 0)
break;
}
}
return nr_total;
}
#ifdef CONFIG_RK_IOMMU
static int ion_system_map_iommu(struct ion_buffer *buffer,
struct device *iommu_dev,
struct ion_iommu_map *data,
unsigned long iova_length,
unsigned long flags)
{
int ret = 0;
struct sg_table *table = (struct sg_table *)buffer->priv_virt;
data->iova_addr = rockchip_iovmm_map(iommu_dev,
table->sgl, 0,
iova_length);
pr_debug("%s: map %lx -> %lx\n", __func__,
(unsigned long)table->sgl->dma_address,
data->iova_addr);
if (IS_ERR_VALUE(data->iova_addr)) {
pr_err("%s: rockchip_iovmm_map() failed: 0x%lx\n",
__func__, data->iova_addr);
ret = data->iova_addr;
goto out;
}
data->mapped_size = iova_length;
out:
return ret;
}
void ion_system_unmap_iommu(struct device *iommu_dev,
struct ion_iommu_map *data)
{
pr_debug("%s: unmap 0x%x@0x%lx\n", __func__,
data->mapped_size, data->iova_addr);
rockchip_iovmm_unmap(iommu_dev, data->iova_addr);
}
#endif
static struct ion_heap_ops system_heap_ops = {
.allocate = ion_system_heap_allocate,
.free = ion_system_heap_free,
.map_dma = ion_system_heap_map_dma,
.unmap_dma = ion_system_heap_unmap_dma,
.map_kernel = ion_heap_map_kernel_legency,
.unmap_kernel = ion_heap_unmap_kernel_legency,
.map_user = ion_heap_map_user_legency,
.shrink = ion_system_heap_shrink,
#ifdef CONFIG_RK_IOMMU
.map_iommu = ion_system_map_iommu,
.unmap_iommu = ion_system_unmap_iommu,
#endif
};
static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
void *unused)
{
struct ion_system_heap *sys_heap = container_of(heap,
struct ion_system_heap,
heap);
int i;
for (i = 0; i < num_orders; i++) {
struct ion_page_pool *pool = sys_heap->pools[i];
seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
pool->high_count, pool->order,
(PAGE_SIZE << pool->order) * pool->high_count);
seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
pool->low_count, pool->order,
(PAGE_SIZE << pool->order) * pool->low_count);
}
return 0;
}
struct ion_heap *ion_system_heap_create_legency(struct ion_platform_heap *unused)
{
struct ion_system_heap *heap;
int i;
heap = kzalloc(sizeof(struct ion_system_heap) +
sizeof(struct ion_page_pool *) * num_orders,
GFP_KERNEL);
if (!heap)
return ERR_PTR(-ENOMEM);
heap->heap.ops = &system_heap_ops;
heap->heap.type = ION_HEAP_TYPE_SYSTEM;
heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
for (i = 0; i < num_orders; i++) {
struct ion_page_pool *pool;
gfp_t gfp_flags = low_order_gfp_flags;
if (orders[i] > 4)
gfp_flags = high_order_gfp_flags;
pool = ion_page_pool_create_legency(gfp_flags, orders[i]);
if (!pool)
goto destroy_pools;
heap->pools[i] = pool;
}
heap->heap.debug_show = ion_system_heap_debug_show;
return &heap->heap;
destroy_pools:
while (i--)
ion_page_pool_destroy_legency(heap->pools[i]);
kfree(heap);
return ERR_PTR(-ENOMEM);
}
void ion_system_heap_destroy(struct ion_heap *heap)
{
struct ion_system_heap *sys_heap = container_of(heap,
struct ion_system_heap,
heap);
int i;
for (i = 0; i < num_orders; i++)
ion_page_pool_destroy_legency(sys_heap->pools[i]);
kfree(sys_heap);
}
static int ion_system_contig_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long len,
unsigned long align,
unsigned long flags)
{
int order = get_order(len);
struct page *page;
struct sg_table *table;
unsigned long i;
int ret;
if (align > (PAGE_SIZE << order))
return -EINVAL;
page = alloc_pages(low_order_gfp_flags, order);
if (!page)
return -ENOMEM;
split_page(page, order);
len = PAGE_ALIGN(len);
for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
__free_page(page + i);
table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!table) {
ret = -ENOMEM;
goto free_pages;
}
ret = sg_alloc_table(table, 1, GFP_KERNEL);
if (ret)
goto free_table;
sg_set_page(table->sgl, page, len, 0);
buffer->priv_virt = table;
ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
return 0;
free_table:
kfree(table);
free_pages:
for (i = 0; i < len >> PAGE_SHIFT; i++)
__free_page(page + i);
return ret;
}
static void ion_system_contig_heap_free(struct ion_buffer *buffer)
{
struct sg_table *table = buffer->priv_virt;
struct page *page = sg_page(table->sgl);
unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
unsigned long i;
for (i = 0; i < pages; i++)
__free_page(page + i);
sg_free_table(table);
kfree(table);
}
static int ion_system_contig_heap_phys(struct ion_heap *heap,
struct ion_buffer *buffer,
ion_phys_addr_t *addr, size_t *len)
{
struct sg_table *table = buffer->priv_virt;
struct page *page = sg_page(table->sgl);
*addr = page_to_phys(page);
*len = buffer->size;
return 0;
}
static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return buffer->priv_virt;
}
static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
}
static struct ion_heap_ops kmalloc_ops = {
.allocate = ion_system_contig_heap_allocate,
.free = ion_system_contig_heap_free,
.phys = ion_system_contig_heap_phys,
.map_dma = ion_system_contig_heap_map_dma,
.unmap_dma = ion_system_contig_heap_unmap_dma,
.map_kernel = ion_heap_map_kernel_legency,
.unmap_kernel = ion_heap_unmap_kernel_legency,
.map_user = ion_heap_map_user_legency,
};
struct ion_heap *ion_system_contig_heap_create_legency(struct ion_platform_heap *unused)
{
struct ion_heap *heap;
heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
if (!heap)
return ERR_PTR(-ENOMEM);
heap->ops = &kmalloc_ops;
heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
return heap;
}
void ion_system_contig_heap_destroy(struct ion_heap *heap)
{
kfree(heap);
}

View File

@@ -1,203 +0,0 @@
/*
* drivers/staging/android/uapi/ion.h
*
* Copyright (C) 2011 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _UAPI_LINUX_ION_H
#define _UAPI_LINUX_ION_H
#include <linux/ioctl.h>
#include <linux/types.h>
typedef int ion_user_handle_t;
/**
* enum ion_heap_types - list of all possible types of heaps
* @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc
* @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
* @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved
* carveout heap, allocations are physically
* contiguous
* @ION_HEAP_TYPE_DMA: memory allocated via DMA API
* @ION_NUM_HEAPS: helper for iterating over heaps, a bit mask
* is used to identify the heaps, so only 32
* total heap types are supported
*/
enum ion_heap_type {
ION_HEAP_TYPE_SYSTEM,
ION_HEAP_TYPE_SYSTEM_CONTIG,
ION_HEAP_TYPE_CARVEOUT,
ION_HEAP_TYPE_CHUNK,
ION_HEAP_TYPE_DMA,
ION_HEAP_TYPE_CUSTOM, /*
* must be last so device specific heaps always
* are at the end of this enum
*/
ION_NUM_HEAPS = 16,
};
#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA)
#define ION_NUM_HEAP_IDS (sizeof(unsigned int) * 8)
/**
* allocation flags - the lower 16 bits are used by core ion, the upper 16
* bits are reserved for use by the heaps themselves.
*/
#define ION_FLAG_CACHED 1 /*
* mappings of this buffer should be
* cached, ion will do cache
* maintenance when the buffer is
* mapped for dma
*/
#define ION_FLAG_CACHED_NEEDS_SYNC 2 /*
* mappings of this buffer will created
* at mmap time, if this is set
* caches must be managed
* manually
*/
/**
* DOC: Ion Userspace API
*
* create a client by opening /dev/ion
* most operations handled via following ioctls
*
*/
/**
* struct ion_allocation_data - metadata passed from userspace for allocations
* @len: size of the allocation
* @align: required alignment of the allocation
* @heap_id_mask: mask of heap ids to allocate from
* @flags: flags passed to heap
* @handle: pointer that will be populated with a cookie to use to
* refer to this allocation
*
* Provided by userspace as an argument to the ioctl
*/
struct ion_allocation_data {
size_t len;
size_t align;
unsigned int heap_id_mask;
unsigned int flags;
ion_user_handle_t handle;
};
/**
* struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
* @handle: a handle
* @fd: a file descriptor representing that handle
*
* For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
* the handle returned from ion alloc, and the kernel returns the file
* descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace
* provides the file descriptor and the kernel returns the handle.
*/
struct ion_fd_data {
ion_user_handle_t handle;
int fd;
};
/**
* struct ion_handle_data - a handle passed to/from the kernel
* @handle: a handle
*/
struct ion_handle_data {
ion_user_handle_t handle;
};
/**
* struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
* @cmd: the custom ioctl function to call
* @arg: additional data to pass to the custom ioctl, typically a user
* pointer to a predefined structure
*
* This works just like the regular cmd and arg fields of an ioctl.
*/
struct ion_custom_data {
unsigned int cmd;
unsigned long arg;
};
#define ION_IOC_MAGIC 'I'
/**
* DOC: ION_IOC_ALLOC - allocate memory
*
* Takes an ion_allocation_data struct and returns it with the handle field
* populated with the opaque handle for the allocation.
*/
#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
struct ion_allocation_data)
/**
* DOC: ION_IOC_FREE - free memory
*
* Takes an ion_handle_data struct and frees the handle.
*/
#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
/**
* DOC: ION_IOC_MAP - get a file descriptor to mmap
*
* Takes an ion_fd_data struct with the handle field populated with a valid
* opaque handle. Returns the struct with the fd field set to a file
* descriptor open in the current address space. This file descriptor
* can then be used as an argument to mmap.
*/
#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
/**
* DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
*
* Takes an ion_fd_data struct with the handle field populated with a valid
* opaque handle. Returns the struct with the fd field set to a file
* descriptor open in the current address space. This file descriptor
* can then be passed to another process. The corresponding opaque handle can
* be retrieved via ION_IOC_IMPORT.
*/
#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
/**
* DOC: ION_IOC_IMPORT - imports a shared file descriptor
*
* Takes an ion_fd_data struct with the fd field populated with a valid file
* descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
* filed set to the corresponding opaque handle.
*/
#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
/**
* DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
*
* Deprecated in favor of using the dma_buf api's correctly (syncing
* will happen automatically when the buffer is mapped to a device).
* If necessary should be used after touching a cached buffer from the cpu,
* this will make the buffer in memory coherent.
*/
#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
/**
* DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
*
* Takes the argument of the architecture specific ioctl to call and
* passes appropriate userdata for that ioctl
*/
#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
#endif /* _UAPI_LINUX_ION_H */

View File

@@ -1,32 +0,0 @@
/*
* Copyright (C) 2012 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _UAPI_LINUX_SW_SYNC_H
#define _UAPI_LINUX_SW_SYNC_H
#include <linux/types.h>
struct sw_sync_create_fence_data {
__u32 value;
char name[32];
__s32 fence; /* fd of new fence */
};
#define SW_SYNC_IOC_MAGIC 'W'
#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
struct sw_sync_create_fence_data)
#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
#endif /* _UAPI_LINUX_SW_SYNC_H */

View File

@@ -1,97 +0,0 @@
/*
* Copyright (C) 2012 Google, Inc.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _UAPI_LINUX_SYNC_H
#define _UAPI_LINUX_SYNC_H
#include <linux/ioctl.h>
#include <linux/types.h>
/**
* struct sync_merge_data - data passed to merge ioctl
* @fd2: file descriptor of second fence
* @name: name of new fence
* @fence: returns the fd of the new fence to userspace
*/
struct sync_merge_data {
__s32 fd2; /* fd of second fence */
char name[32]; /* name of new fence */
__s32 fence; /* fd on newly created fence */
};
/**
* struct sync_pt_info - detailed sync_pt information
* @len: length of sync_pt_info including any driver_data
* @obj_name: name of parent sync_timeline
* @driver_name: name of driver implementing the parent
* @status: status of the sync_pt 0:active 1:signaled <0:error
* @timestamp_ns: timestamp of status change in nanoseconds
* @driver_data: any driver dependent data
*/
struct sync_pt_info {
__u32 len;
char obj_name[32];
char driver_name[32];
__s32 status;
__u64 timestamp_ns;
__u8 driver_data[0];
};
/**
* struct sync_fence_info_data - data returned from fence info ioctl
* @len: ioctl caller writes the size of the buffer its passing in.
* ioctl returns length of sync_fence_data returned to userspace
* including pt_info.
* @name: name of fence
* @status: status of fence. 1: signaled 0:active <0:error
* @pt_info: a sync_pt_info struct for every sync_pt in the fence
*/
struct sync_fence_info_data {
__u32 len;
char name[32];
__s32 status;
__u8 pt_info[0];
};
#define SYNC_IOC_MAGIC '>'
/**
* DOC: SYNC_IOC_WAIT - wait for a fence to signal
*
* pass timeout in milliseconds. Waits indefinitely timeout < 0.
*/
#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32)
/**
* DOC: SYNC_IOC_MERGE - merge two fences
*
* Takes a struct sync_merge_data. Creates a new fence containing copies of
* the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
* new fence's fd in sync_merge_data.fence
*/
#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
/**
* DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
*
* Takes a struct sync_fence_info_data with extra space allocated for pt_info.
* Caller should write the size of the buffer into len. On return, len is
* updated to reflect the total size of the sync_fence_info_data including
* pt_info.
*
* pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
* To iterate over the sync_pt_infos, use the sync_pt_info.len field.
*/
#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\
struct sync_fence_info_data)
#endif /* _UAPI_LINUX_SYNC_H */

View File

@@ -17,7 +17,7 @@
#define _LINUX_ROCKCHIP_ION_H
#ifdef __KERNEL__
#include "../../drivers/staging/android/ion_legency/ion.h"
#include "../../drivers/staging/android/ion/ion.h"
#else
#include <linux/ion.h>
#endif