linux/drivers/infiniband/hw/hns/hns_roce_alloc.c
Wei Hu (Xavier) 5e6ff78a22 IB/hns: Change qpn allocation to round-robin mode.
When using CM to establish connections, qp number that was freed
just now will be rejected by ib core. To fix these problem, We
change qpn allocation to round-robin mode. We added the round-robin
mode for allocating resources using bitmap. We use round-robin mode
for qp number and non round-robing mode for other resources like
cq number, pd number etc.

Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: Salil Mehta  <salil.mehta@huawei.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
2016-12-03 14:20:42 -05:00

261 lines
6.6 KiB
C

/*
* Copyright (c) 2016 Hisilicon Limited.
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/platform_device.h>
#include "hns_roce_device.h"
int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
{
int ret = 0;
spin_lock(&bitmap->lock);
*obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
if (*obj >= bitmap->max) {
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
*obj = find_first_zero_bit(bitmap->table, bitmap->max);
}
if (*obj < bitmap->max) {
set_bit(*obj, bitmap->table);
bitmap->last = (*obj + 1);
if (bitmap->last == bitmap->max)
bitmap->last = 0;
*obj |= bitmap->top;
} else {
ret = -1;
}
spin_unlock(&bitmap->lock);
return ret;
}
void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
int rr)
{
hns_roce_bitmap_free_range(bitmap, obj, 1, rr);
}
int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
int align, unsigned long *obj)
{
int ret = 0;
int i;
if (likely(cnt == 1 && align == 1))
return hns_roce_bitmap_alloc(bitmap, obj);
spin_lock(&bitmap->lock);
*obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
bitmap->last, cnt, align - 1);
if (*obj >= bitmap->max) {
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
*obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, 0,
cnt, align - 1);
}
if (*obj < bitmap->max) {
for (i = 0; i < cnt; i++)
set_bit(*obj + i, bitmap->table);
if (*obj == bitmap->last) {
bitmap->last = (*obj + cnt);
if (bitmap->last >= bitmap->max)
bitmap->last = 0;
}
*obj |= bitmap->top;
} else {
ret = -1;
}
spin_unlock(&bitmap->lock);
return ret;
}
void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
unsigned long obj, int cnt,
int rr)
{
int i;
obj &= bitmap->max + bitmap->reserved_top - 1;
spin_lock(&bitmap->lock);
for (i = 0; i < cnt; i++)
clear_bit(obj + i, bitmap->table);
if (!rr)
bitmap->last = min(bitmap->last, obj);
bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
& bitmap->mask;
spin_unlock(&bitmap->lock);
}
int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
u32 reserved_bot, u32 reserved_top)
{
u32 i;
if (num != roundup_pow_of_two(num))
return -EINVAL;
bitmap->last = 0;
bitmap->top = 0;
bitmap->max = num - reserved_top;
bitmap->mask = mask;
bitmap->reserved_top = reserved_top;
spin_lock_init(&bitmap->lock);
bitmap->table = kcalloc(BITS_TO_LONGS(bitmap->max), sizeof(long),
GFP_KERNEL);
if (!bitmap->table)
return -ENOMEM;
for (i = 0; i < reserved_bot; ++i)
set_bit(i, bitmap->table);
return 0;
}
void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap)
{
kfree(bitmap->table);
}
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
struct hns_roce_buf *buf)
{
int i;
struct device *dev = &hr_dev->pdev->dev;
u32 bits_per_long = BITS_PER_LONG;
if (buf->nbufs == 1) {
dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
} else {
if (bits_per_long == 64)
vunmap(buf->direct.buf);
for (i = 0; i < buf->nbufs; ++i)
if (buf->page_list[i].buf)
dma_free_coherent(&hr_dev->pdev->dev, PAGE_SIZE,
buf->page_list[i].buf,
buf->page_list[i].map);
kfree(buf->page_list);
}
}
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
struct hns_roce_buf *buf)
{
int i = 0;
dma_addr_t t;
struct page **pages;
struct device *dev = &hr_dev->pdev->dev;
u32 bits_per_long = BITS_PER_LONG;
/* SQ/RQ buf lease than one page, SQ + RQ = 8K */
if (size <= max_direct) {
buf->nbufs = 1;
/* Npages calculated by page_size */
buf->npages = 1 << get_order(size);
buf->page_shift = PAGE_SHIFT;
/* MTT PA must be recorded in 4k alignment, t is 4k aligned */
buf->direct.buf = dma_alloc_coherent(dev, size, &t, GFP_KERNEL);
if (!buf->direct.buf)
return -ENOMEM;
buf->direct.map = t;
while (t & ((1 << buf->page_shift) - 1)) {
--buf->page_shift;
buf->npages *= 2;
}
memset(buf->direct.buf, 0, size);
} else {
buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
buf->npages = buf->nbufs;
buf->page_shift = PAGE_SHIFT;
buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
GFP_KERNEL);
if (!buf->page_list)
return -ENOMEM;
for (i = 0; i < buf->nbufs; ++i) {
buf->page_list[i].buf = dma_alloc_coherent(dev,
PAGE_SIZE, &t,
GFP_KERNEL);
if (!buf->page_list[i].buf)
goto err_free;
buf->page_list[i].map = t;
memset(buf->page_list[i].buf, 0, PAGE_SIZE);
}
if (bits_per_long == 64) {
pages = kmalloc_array(buf->nbufs, sizeof(*pages),
GFP_KERNEL);
if (!pages)
goto err_free;
for (i = 0; i < buf->nbufs; ++i)
pages[i] = virt_to_page(buf->page_list[i].buf);
buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP,
PAGE_KERNEL);
kfree(pages);
if (!buf->direct.buf)
goto err_free;
}
}
return 0;
err_free:
hns_roce_buf_free(hr_dev, size, buf);
return -ENOMEM;
}
void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
{
hns_roce_cleanup_qp_table(hr_dev);
hns_roce_cleanup_cq_table(hr_dev);
hns_roce_cleanup_mr_table(hr_dev);
hns_roce_cleanup_pd_table(hr_dev);
hns_roce_cleanup_uar_table(hr_dev);
}