mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-23 06:14:42 +08:00
drm/vmwgfx: Fix NULL pointer comparison
Replace direct comparisons to NULL i.e. 'x == NULL' with '!x'. As per coding standard. Signed-off-by: Ravikant B Sharma <ravikant.s2@samsung.com> Reviewed-by: Sinclair Yeh <syeh@vmware.com> Signed-off-by: Sinclair Yeh <syeh@vmware.com>
This commit is contained in:
parent
9ff1beb1d1
commit
1a4adb0563
@ -205,7 +205,7 @@ int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
|
||||
int ret;
|
||||
|
||||
cres = kzalloc(sizeof(*cres), GFP_KERNEL);
|
||||
if (unlikely(cres == NULL))
|
||||
if (unlikely(!cres))
|
||||
return -ENOMEM;
|
||||
|
||||
cres->hash.key = user_key | (res_type << 24);
|
||||
@ -291,7 +291,7 @@ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
|
||||
int ret;
|
||||
|
||||
man = kzalloc(sizeof(*man), GFP_KERNEL);
|
||||
if (man == NULL)
|
||||
if (!man)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
man->dev_priv = dev_priv;
|
||||
|
@ -776,7 +776,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
if (unlikely(ctx == NULL)) {
|
||||
if (unlikely(!ctx)) {
|
||||
ttm_mem_global_free(vmw_mem_glob(dev_priv),
|
||||
vmw_user_context_size);
|
||||
ret = -ENOMEM;
|
||||
|
@ -583,7 +583,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
|
||||
return ERR_PTR(ret);
|
||||
|
||||
vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
|
||||
if (unlikely(vcotbl == NULL)) {
|
||||
if (unlikely(!vcotbl)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_no_alloc;
|
||||
}
|
||||
|
@ -630,7 +630,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
char host_log[100] = {0};
|
||||
|
||||
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
|
||||
if (unlikely(dev_priv == NULL)) {
|
||||
if (unlikely(!dev_priv)) {
|
||||
DRM_ERROR("Failed allocating a device private struct.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -1035,7 +1035,7 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
|
||||
int ret = -ENOMEM;
|
||||
|
||||
vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
|
||||
if (unlikely(vmw_fp == NULL))
|
||||
if (unlikely(!vmw_fp))
|
||||
return ret;
|
||||
|
||||
vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
|
||||
@ -1196,7 +1196,7 @@ static int vmw_master_create(struct drm_device *dev,
|
||||
struct vmw_master *vmaster;
|
||||
|
||||
vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
|
||||
if (unlikely(vmaster == NULL))
|
||||
if (unlikely(!vmaster))
|
||||
return -ENOMEM;
|
||||
|
||||
vmw_master_init(vmaster);
|
||||
|
@ -264,7 +264,7 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
|
||||
}
|
||||
|
||||
node = kzalloc(sizeof(*node), GFP_KERNEL);
|
||||
if (unlikely(node == NULL)) {
|
||||
if (unlikely(!node)) {
|
||||
DRM_ERROR("Failed to allocate a resource validation "
|
||||
"entry.\n");
|
||||
return -ENOMEM;
|
||||
@ -452,7 +452,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
|
||||
struct vmw_resource_relocation *rel;
|
||||
|
||||
rel = kmalloc(sizeof(*rel), GFP_KERNEL);
|
||||
if (unlikely(rel == NULL)) {
|
||||
if (unlikely(!rel)) {
|
||||
DRM_ERROR("Failed to allocate a resource relocation.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -284,7 +284,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
|
||||
|
||||
if (unlikely(fman == NULL))
|
||||
if (unlikely(!fman))
|
||||
return NULL;
|
||||
|
||||
fman->dev_priv = dev_priv;
|
||||
@ -541,7 +541,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
|
||||
int ret;
|
||||
|
||||
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
|
||||
if (unlikely(fence == NULL))
|
||||
if (unlikely(!fence))
|
||||
return -ENOMEM;
|
||||
|
||||
ret = vmw_fence_obj_init(fman, fence, seqno,
|
||||
@ -606,7 +606,7 @@ int vmw_user_fence_create(struct drm_file *file_priv,
|
||||
return ret;
|
||||
|
||||
ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
|
||||
if (unlikely(ufence == NULL)) {
|
||||
if (unlikely(!ufence)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_no_object;
|
||||
}
|
||||
@ -966,7 +966,7 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
|
||||
struct vmw_fence_manager *fman = fman_from_fence(fence);
|
||||
|
||||
eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
|
||||
if (unlikely(eaction == NULL))
|
||||
if (unlikely(!eaction))
|
||||
return -ENOMEM;
|
||||
|
||||
eaction->event = event;
|
||||
@ -1002,7 +1002,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
|
||||
int ret;
|
||||
|
||||
event = kzalloc(sizeof(*event), GFP_KERNEL);
|
||||
if (unlikely(event == NULL)) {
|
||||
if (unlikely(!event)) {
|
||||
DRM_ERROR("Failed to allocate an event.\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_no_space;
|
||||
|
@ -121,7 +121,7 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
|
||||
struct vmwgfx_gmrid_man *gman =
|
||||
kzalloc(sizeof(*gman), GFP_KERNEL);
|
||||
|
||||
if (unlikely(gman == NULL))
|
||||
if (unlikely(!gman))
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&gman->lock);
|
||||
|
@ -320,14 +320,14 @@ int vmw_otables_setup(struct vmw_private *dev_priv)
|
||||
|
||||
if (dev_priv->has_dx) {
|
||||
*otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL);
|
||||
if (*otables == NULL)
|
||||
if (!(*otables))
|
||||
return -ENOMEM;
|
||||
|
||||
dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables);
|
||||
} else {
|
||||
*otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables),
|
||||
GFP_KERNEL);
|
||||
if (*otables == NULL)
|
||||
if (!(*otables))
|
||||
return -ENOMEM;
|
||||
|
||||
dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables);
|
||||
@ -407,7 +407,7 @@ struct vmw_mob *vmw_mob_create(unsigned long data_pages)
|
||||
{
|
||||
struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
|
||||
|
||||
if (unlikely(mob == NULL))
|
||||
if (unlikely(!mob))
|
||||
return NULL;
|
||||
|
||||
mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
|
||||
|
@ -244,7 +244,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
|
||||
|
||||
reply_len = ebx;
|
||||
reply = kzalloc(reply_len + 1, GFP_KERNEL);
|
||||
if (reply == NULL) {
|
||||
if (!reply) {
|
||||
DRM_ERROR("Cannot allocate memory for reply\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -340,7 +340,7 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
|
||||
|
||||
msg_len = strlen(guest_info_param) + strlen("info-get ") + 1;
|
||||
msg = kzalloc(msg_len, GFP_KERNEL);
|
||||
if (msg == NULL) {
|
||||
if (!msg) {
|
||||
DRM_ERROR("Cannot allocate memory to get %s", guest_info_param);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -400,7 +400,7 @@ int vmw_host_log(const char *log)
|
||||
|
||||
msg_len = strlen(log) + strlen("log ") + 1;
|
||||
msg = kzalloc(msg_len, GFP_KERNEL);
|
||||
if (msg == NULL) {
|
||||
if (!msg) {
|
||||
DRM_ERROR("Cannot allocate memory for log message\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -446,7 +446,7 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
|
||||
int ret;
|
||||
|
||||
user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
|
||||
if (unlikely(user_bo == NULL)) {
|
||||
if (unlikely(!user_bo)) {
|
||||
DRM_ERROR("Failed to allocate a buffer.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -836,7 +836,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
|
||||
}
|
||||
|
||||
backup = kzalloc(sizeof(*backup), GFP_KERNEL);
|
||||
if (unlikely(backup == NULL))
|
||||
if (unlikely(!backup))
|
||||
return -ENOMEM;
|
||||
|
||||
ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
|
||||
|
@ -750,7 +750,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
|
||||
}
|
||||
|
||||
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
|
||||
if (unlikely(ushader == NULL)) {
|
||||
if (unlikely(!ushader)) {
|
||||
ttm_mem_global_free(vmw_mem_glob(dev_priv),
|
||||
vmw_user_shader_size);
|
||||
ret = -ENOMEM;
|
||||
@ -820,7 +820,7 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
|
||||
}
|
||||
|
||||
shader = kzalloc(sizeof(*shader), GFP_KERNEL);
|
||||
if (unlikely(shader == NULL)) {
|
||||
if (unlikely(!shader)) {
|
||||
ttm_mem_global_free(vmw_mem_glob(dev_priv),
|
||||
vmw_shader_size);
|
||||
ret = -ENOMEM;
|
||||
@ -980,7 +980,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
|
||||
|
||||
/* Allocate and pin a DMA buffer */
|
||||
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
|
||||
if (unlikely(buf == NULL))
|
||||
if (unlikely(!buf))
|
||||
return -ENOMEM;
|
||||
|
||||
ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
|
||||
|
Loading…
Reference in New Issue
Block a user