2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-20 19:23:57 +08:00

/spare/repo/libata-dev branch 'iomap-try3'

This commit is contained in:
Jeff Garzik 2005-09-05 05:20:33 -04:00
commit d0bd99299b
188 changed files with 20720 additions and 4173 deletions

View File

@ -0,0 +1,352 @@
Chelsio N210 10Gb Ethernet Network Controller
Driver Release Notes for Linux
Version 2.1.1
June 20, 2005
CONTENTS
========
INTRODUCTION
FEATURES
PERFORMANCE
DRIVER MESSAGES
KNOWN ISSUES
SUPPORT
INTRODUCTION
============
This document describes the Linux driver for Chelsio 10Gb Ethernet Network
Controller. This driver supports the Chelsio N210 NIC and is backward
compatible with the Chelsio N110 model 10Gb NICs.
FEATURES
========
Adaptive Interrupts (adaptive-rx)
---------------------------------
This feature provides an adaptive algorithm that adjusts the interrupt
coalescing parameters, allowing the driver to dynamically adapt the latency
settings to achieve the highest performance during various types of network
load.
The interface used to control this feature is ethtool. Please see the
ethtool manpage for additional usage information.
By default, adaptive-rx is disabled.
To enable adaptive-rx:
ethtool -C <interface> adaptive-rx on
To disable adaptive-rx, use ethtool:
ethtool -C <interface> adaptive-rx off
After disabling adaptive-rx, the timer latency value will be set to 50us.
You may set the timer latency after disabling adaptive-rx:
ethtool -C <interface> rx-usecs <microseconds>
An example to set the timer latency value to 100us on eth0:
ethtool -C eth0 rx-usecs 100
You may also provide a timer latency value while disabling adpative-rx:
ethtool -C <interface> adaptive-rx off rx-usecs <microseconds>
If adaptive-rx is disabled and a timer latency value is specified, the timer
will be set to the specified value until changed by the user or until
adaptive-rx is enabled.
To view the status of the adaptive-rx and timer latency values:
ethtool -c <interface>
TCP Segmentation Offloading (TSO) Support
-----------------------------------------
This feature, also known as "large send", enables a system's protocol stack
to offload portions of outbound TCP processing to a network interface card
thereby reducing system CPU utilization and enhancing performance.
The interface used to control this feature is ethtool version 1.8 or higher.
Please see the ethtool manpage for additional usage information.
By default, TSO is enabled.
To disable TSO:
ethtool -K <interface> tso off
To enable TSO:
ethtool -K <interface> tso on
To view the status of TSO:
ethtool -k <interface>
PERFORMANCE
===========
The following information is provided as an example of how to change system
parameters for "performance tuning" an what value to use. You may or may not
want to change these system parameters, depending on your server/workstation
application. Doing so is not warranted in any way by Chelsio Communications,
and is done at "YOUR OWN RISK". Chelsio will not be held responsible for loss
of data or damage to equipment.
Your distribution may have a different way of doing things, or you may prefer
a different method. These commands are shown only to provide an example of
what to do and are by no means definitive.
Making any of the following system changes will only last until you reboot
your system. You may want to write a script that runs at boot-up which
includes the optimal settings for your system.
Setting PCI Latency Timer:
setpci -d 1425:* 0x0c.l=0x0000F800
Disabling TCP timestamp:
sysctl -w net.ipv4.tcp_timestamps=0
Disabling SACK:
sysctl -w net.ipv4.tcp_sack=0
Setting large number of incoming connection requests:
sysctl -w net.ipv4.tcp_max_syn_backlog=3000
Setting maximum receive socket buffer size:
sysctl -w net.core.rmem_max=1024000
Setting maximum send socket buffer size:
sysctl -w net.core.wmem_max=1024000
Set smp_affinity (on a multiprocessor system) to a single CPU:
echo 1 > /proc/irq/<interrupt_number>/smp_affinity
Setting default receive socket buffer size:
sysctl -w net.core.rmem_default=524287
Setting default send socket buffer size:
sysctl -w net.core.wmem_default=524287
Setting maximum option memory buffers:
sysctl -w net.core.optmem_max=524287
Setting maximum backlog (# of unprocessed packets before kernel drops):
sysctl -w net.core.netdev_max_backlog=300000
Setting TCP read buffers (min/default/max):
sysctl -w net.ipv4.tcp_rmem="10000000 10000000 10000000"
Setting TCP write buffers (min/pressure/max):
sysctl -w net.ipv4.tcp_wmem="10000000 10000000 10000000"
Setting TCP buffer space (min/pressure/max):
sysctl -w net.ipv4.tcp_mem="10000000 10000000 10000000"
TCP window size for single connections:
The receive buffer (RX_WINDOW) size must be at least as large as the
Bandwidth-Delay Product of the communication link between the sender and
receiver. Due to the variations of RTT, you may want to increase the buffer
size up to 2 times the Bandwidth-Delay Product. Reference page 289 of
"TCP/IP Illustrated, Volume 1, The Protocols" by W. Richard Stevens.
At 10Gb speeds, use the following formula:
RX_WINDOW >= 1.25MBytes * RTT(in milliseconds)
Example for RTT with 100us: RX_WINDOW = (1,250,000 * 0.1) = 125,000
RX_WINDOW sizes of 256KB - 512KB should be sufficient.
Setting the min, max, and default receive buffer (RX_WINDOW) size:
sysctl -w net.ipv4.tcp_rmem="<min> <default> <max>"
TCP window size for multiple connections:
The receive buffer (RX_WINDOW) size may be calculated the same as single
connections, but should be divided by the number of connections. The
smaller window prevents congestion and facilitates better pacing,
especially if/when MAC level flow control does not work well or when it is
not supported on the machine. Experimentation may be necessary to attain
the correct value. This method is provided as a starting point fot the
correct receive buffer size.
Setting the min, max, and default receive buffer (RX_WINDOW) size is
performed in the same manner as single connection.
DRIVER MESSAGES
===============
The following messages are the most common messages logged by syslog. These
may be found in /var/log/messages.
Driver up:
Chelsio Network Driver - version 2.1.1
NIC detected:
eth#: Chelsio N210 1x10GBaseX NIC (rev #), PCIX 133MHz/64-bit
Link up:
eth#: link is up at 10 Gbps, full duplex
Link down:
eth#: link is down
KNOWN ISSUES
============
These issues have been identified during testing. The following information
is provided as a workaround to the problem. In some cases, this problem is
inherent to Linux or to a particular Linux Distribution and/or hardware
platform.
1. Large number of TCP retransmits on a multiprocessor (SMP) system.
On a system with multiple CPUs, the interrupt (IRQ) for the network
controller may be bound to more than one CPU. This will cause TCP
retransmits if the packet data were to be split across different CPUs
and re-assembled in a different order than expected.
To eliminate the TCP retransmits, set smp_affinity on the particular
interrupt to a single CPU. You can locate the interrupt (IRQ) used on
the N110/N210 by using ifconfig:
ifconfig <dev_name> | grep Interrupt
Set the smp_affinity to a single CPU:
echo 1 > /proc/irq/<interrupt_number>/smp_affinity
It is highly suggested that you do not run the irqbalance daemon on your
system, as this will change any smp_affinity setting you have applied.
The irqbalance daemon runs on a 10 second interval and binds interrupts
to the least loaded CPU determined by the daemon. To disable this daemon:
chkconfig --level 2345 irqbalance off
By default, some Linux distributions enable the kernel feature,
irqbalance, which performs the same function as the daemon. To disable
this feature, add the following line to your bootloader:
noirqbalance
Example using the Grub bootloader:
title Red Hat Enterprise Linux AS (2.4.21-27.ELsmp)
root (hd0,0)
kernel /vmlinuz-2.4.21-27.ELsmp ro root=/dev/hda3 noirqbalance
initrd /initrd-2.4.21-27.ELsmp.img
2. After running insmod, the driver is loaded and the incorrect network
interface is brought up without running ifup.
When using 2.4.x kernels, including RHEL kernels, the Linux kernel
invokes a script named "hotplug". This script is primarily used to
automatically bring up USB devices when they are plugged in, however,
the script also attempts to automatically bring up a network interface
after loading the kernel module. The hotplug script does this by scanning
the ifcfg-eth# config files in /etc/sysconfig/network-scripts, looking
for HWADDR=<mac_address>.
If the hotplug script does not find the HWADDRR within any of the
ifcfg-eth# files, it will bring up the device with the next available
interface name. If this interface is already configured for a different
network card, your new interface will have incorrect IP address and
network settings.
To solve this issue, you can add the HWADDR=<mac_address> key to the
interface config file of your network controller.
To disable this "hotplug" feature, you may add the driver (module name)
to the "blacklist" file located in /etc/hotplug. It has been noted that
this does not work for network devices because the net.agent script
does not use the blacklist file. Simply remove, or rename, the net.agent
script located in /etc/hotplug to disable this feature.
3. Transport Protocol (TP) hangs when running heavy multi-connection traffic
on an AMD Opteron system with HyperTransport PCI-X Tunnel chipset.
If your AMD Opteron system uses the AMD-8131 HyperTransport PCI-X Tunnel
chipset, you may experience the "133-Mhz Mode Split Completion Data
Corruption" bug identified by AMD while using a 133Mhz PCI-X card on the
bus PCI-X bus.
AMD states, "Under highly specific conditions, the AMD-8131 PCI-X Tunnel
can provide stale data via split completion cycles to a PCI-X card that
is operating at 133 Mhz", causing data corruption.
AMD's provides three workarounds for this problem, however, Chelsio
recommends the first option for best performance with this bug:
For 133Mhz secondary bus operation, limit the transaction length and
the number of outstanding transactions, via BIOS configuration
programming of the PCI-X card, to the following:
Data Length (bytes): 1k
Total allowed outstanding transactions: 2
Please refer to AMD 8131-HT/PCI-X Errata 26310 Rev 3.08 August 2004,
section 56, "133-MHz Mode Split Completion Data Corruption" for more
details with this bug and workarounds suggested by AMD.
It may be possible to work outside AMD's recommended PCI-X settings, try
increasing the Data Length to 2k bytes for increased performance. If you
have issues with these settings, please revert to the "safe" settings
and duplicate the problem before submitting a bug or asking for support.
NOTE: The default setting on most systems is 8 outstanding transactions
and 2k bytes data length.
4. On multiprocessor systems, it has been noted that an application which
is handling 10Gb networking can switch between CPUs causing degraded
and/or unstable performance.
If running on an SMP system and taking performance measurements, it
is suggested you either run the latest netperf-2.4.0+ or use a binding
tool such as Tim Hockin's procstate utilities (runon)
<http://www.hockin.org/~thockin/procstate/>.
Binding netserver and netperf (or other applications) to particular
CPUs will have a significant difference in performance measurements.
You may need to experiment which CPU to bind the application to in
order to achieve the best performance for your system.
If you are developing an application designed for 10Gb networking,
please keep in mind you may want to look at kernel functions
sched_setaffinity & sched_getaffinity to bind your application.
If you are just running user-space applications such as ftp, telnet,
etc., you may want to try the runon tool provided by Tim Hockin's
procstate utility. You could also try binding the interface to a
particular CPU: runon 0 ifup eth0
SUPPORT
=======
If you have problems with the software or hardware, please contact our
customer support team via email at support@chelsio.com or check our website
at http://www.chelsio.com
===============================================================================
Chelsio Communications
370 San Aleso Ave.
Suite 100
Sunnyvale, CA 94085
http://www.chelsio.com
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2, as
published by the Free Software Foundation.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
Copyright (c) 2003-2005 Chelsio Communications. All rights reserved.
===============================================================================

View File

@ -132,6 +132,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
mpu_irq - IRQ # for MPU-401 UART (PnP setup) mpu_irq - IRQ # for MPU-401 UART (PnP setup)
dma1 - first DMA # for AD1816A chip (PnP setup) dma1 - first DMA # for AD1816A chip (PnP setup)
dma2 - second DMA # for AD1816A chip (PnP setup) dma2 - second DMA # for AD1816A chip (PnP setup)
clockfreq - Clock frequency for AD1816A chip (default = 0, 33000Hz)
Module supports up to 8 cards, autoprobe and PnP. Module supports up to 8 cards, autoprobe and PnP.

View File

@ -3422,10 +3422,17 @@ struct _snd_pcm_runtime {
<para> <para>
The <structfield>iface</structfield> field specifies the type of The <structfield>iface</structfield> field specifies the type of
the control, the control, <constant>SNDRV_CTL_ELEM_IFACE_XXX</constant>, which
<constant>SNDRV_CTL_ELEM_IFACE_XXX</constant>. There are is usually <constant>MIXER</constant>.
<constant>MIXER</constant>, <constant>PCM</constant>, Use <constant>CARD</constant> for global controls that are not
<constant>CARD</constant>, etc. logically part of the mixer.
If the control is closely associated with some specific device on
the sound card, use <constant>HWDEP</constant>,
<constant>PCM</constant>, <constant>RAWMIDI</constant>,
<constant>TIMER</constant>, or <constant>SEQUENCER</constant>, and
specify the device number with the
<structfield>device</structfield> and
<structfield>subdevice</structfield> fields.
</para> </para>
<para> <para>

View File

@ -2092,6 +2092,12 @@ M: support@simtec.co.uk
W: http://www.simtec.co.uk/products/EB2410ITX/ W: http://www.simtec.co.uk/products/EB2410ITX/
S: Supported S: Supported
SIS 190 ETHERNET DRIVER
P: Francois Romieu
M: romieu@fr.zoreil.com
L: netdev@vger.kernel.org
S: Maintained
SIS 5513 IDE CONTROLLER DRIVER SIS 5513 IDE CONTROLLER DRIVER
P: Lionel Bouton P: Lionel Bouton
M: Lionel.Bouton@inet6.fr M: Lionel.Bouton@inet6.fr

View File

@ -165,7 +165,6 @@ static int __init pcibios_init(void)
if ((pci_probe & PCI_BIOS_SORT) && !(pci_probe & PCI_NO_SORT)) if ((pci_probe & PCI_BIOS_SORT) && !(pci_probe & PCI_NO_SORT))
pcibios_sort(); pcibios_sort();
#endif #endif
pci_assign_unassigned_resources();
return 0; return 0;
} }

View File

@ -170,43 +170,26 @@ static void __init pcibios_allocate_resources(int pass)
static int __init pcibios_assign_resources(void) static int __init pcibios_assign_resources(void)
{ {
struct pci_dev *dev = NULL; struct pci_dev *dev = NULL;
int idx; struct resource *r, *pr;
struct resource *r;
for_each_pci_dev(dev) { if (!(pci_probe & PCI_ASSIGN_ROMS)) {
int class = dev->class >> 8; /* Try to use BIOS settings for ROMs, otherwise let
pci_assign_unassigned_resources() allocate the new
/* Don't touch classless devices and host bridges */ addresses. */
if (!class || class == PCI_CLASS_BRIDGE_HOST) for_each_pci_dev(dev) {
continue;
for(idx=0; idx<6; idx++) {
r = &dev->resource[idx];
/*
* Don't touch IDE controllers and I/O ports of video cards!
*/
if ((class == PCI_CLASS_STORAGE_IDE && idx < 4) ||
(class == PCI_CLASS_DISPLAY_VGA && (r->flags & IORESOURCE_IO)))
continue;
/*
* We shall assign a new address to this resource, either because
* the BIOS forgot to do so or because we have decided the old
* address was unusable for some reason.
*/
if (!r->start && r->end)
pci_assign_resource(dev, idx);
}
if (pci_probe & PCI_ASSIGN_ROMS) {
r = &dev->resource[PCI_ROM_RESOURCE]; r = &dev->resource[PCI_ROM_RESOURCE];
r->end -= r->start; if (!r->flags || !r->start)
r->start = 0; continue;
if (r->end) pr = pci_find_parent_resource(dev, r);
pci_assign_resource(dev, PCI_ROM_RESOURCE); if (!pr || request_resource(pr, r) < 0) {
r->end -= r->start;
r->start = 0;
}
} }
} }
pci_assign_unassigned_resources();
return 0; return 0;
} }

View File

@ -57,7 +57,7 @@ unsigned char __res[sizeof(bd_t)];
extern void m8xx_ide_init(void); extern void m8xx_ide_init(void);
extern unsigned long find_available_memory(void); extern unsigned long find_available_memory(void);
extern void m8xx_cpm_reset(); extern void m8xx_cpm_reset(void);
extern void m8xx_wdt_handler_install(bd_t *bp); extern void m8xx_wdt_handler_install(bd_t *bp);
extern void rpxfb_alloc_pages(void); extern void rpxfb_alloc_pages(void);
extern void cpm_interrupt_init(void); extern void cpm_interrupt_init(void);
@ -266,8 +266,8 @@ m8xx_show_percpuinfo(struct seq_file *m, int i)
bp = (bd_t *)__res; bp = (bd_t *)__res;
seq_printf(m, "clock\t\t: %ldMHz\n" seq_printf(m, "clock\t\t: %uMHz\n"
"bus clock\t: %ldMHz\n", "bus clock\t: %uMHz\n",
bp->bi_intfreq / 1000000, bp->bi_intfreq / 1000000,
bp->bi_busfreq / 1000000); bp->bi_busfreq / 1000000);

View File

@ -23,13 +23,6 @@ config DRM_TDFX
Choose this option if you have a 3dfx Banshee or Voodoo3 (or later), Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
graphics card. If M is selected, the module will be called tdfx. graphics card. If M is selected, the module will be called tdfx.
config DRM_GAMMA
tristate "3dlabs GMX 2000"
depends on DRM && BROKEN
help
This is the old gamma driver, please tell me if it might actually
work.
config DRM_R128 config DRM_R128
tristate "ATI Rage 128" tristate "ATI Rage 128"
depends on DRM && PCI depends on DRM && PCI
@ -82,7 +75,7 @@ endchoice
config DRM_MGA config DRM_MGA
tristate "Matrox g200/g400" tristate "Matrox g200/g400"
depends on DRM && AGP depends on DRM
help help
Choose this option if you have a Matrox G200, G400 or G450 graphics Choose this option if you have a Matrox G200, G400 or G450 graphics
card. If M is selected, the module will be called mga. AGP card. If M is selected, the module will be called mga. AGP
@ -103,3 +96,10 @@ config DRM_VIA
Choose this option if you have a Via unichrome or compatible video Choose this option if you have a Via unichrome or compatible video
chipset. If M is selected the module will be called via. chipset. If M is selected the module will be called via.
config DRM_SAVAGE
tristate "Savage video cards"
depends on DRM
help
Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
chipset. If M is selected the module will be called savage.

View File

@ -8,16 +8,16 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
drm_sysfs.o drm_sysfs.o
gamma-objs := gamma_drv.o gamma_dma.o
tdfx-objs := tdfx_drv.o tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
i810-objs := i810_drv.o i810_dma.o i810-objs := i810_drv.o i810_dma.o
i830-objs := i830_drv.o i830_dma.o i830_irq.o i830-objs := i830_drv.o i830_dma.o i830_irq.o
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
ffb-objs := ffb_drv.o ffb_context.o ffb-objs := ffb_drv.o ffb_context.o
sis-objs := sis_drv.o sis_ds.o sis_mm.o sis-objs := sis_drv.o sis_ds.o sis_mm.o
savage-objs := savage_drv.o savage_bci.o savage_state.o
via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o
ifeq ($(CONFIG_COMPAT),y) ifeq ($(CONFIG_COMPAT),y)
@ -29,7 +29,6 @@ i915-objs += i915_ioc32.o
endif endif
obj-$(CONFIG_DRM) += drm.o obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_GAMMA) += gamma.o
obj-$(CONFIG_DRM_TDFX) += tdfx.o obj-$(CONFIG_DRM_TDFX) += tdfx.o
obj-$(CONFIG_DRM_R128) += r128.o obj-$(CONFIG_DRM_R128) += r128.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o obj-$(CONFIG_DRM_RADEON)+= radeon.o
@ -39,5 +38,7 @@ obj-$(CONFIG_DRM_I830) += i830.o
obj-$(CONFIG_DRM_I915) += i915.o obj-$(CONFIG_DRM_I915) += i915.o
obj-$(CONFIG_DRM_FFB) += ffb.o obj-$(CONFIG_DRM_FFB) += ffb.o
obj-$(CONFIG_DRM_SIS) += sis.o obj-$(CONFIG_DRM_SIS) += sis.o
obj-$(CONFIG_DRM_SAVAGE)+= savage.o
obj-$(CONFIG_DRM_VIA) +=via.o obj-$(CONFIG_DRM_VIA) +=via.o

View File

@ -98,7 +98,7 @@
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
typedef unsigned long drm_handle_t; typedef unsigned int drm_handle_t;
typedef unsigned int drm_context_t; typedef unsigned int drm_context_t;
typedef unsigned int drm_drawable_t; typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t; typedef unsigned int drm_magic_t;
@ -209,7 +209,8 @@ typedef enum drm_map_type {
_DRM_REGISTERS = 1, /**< no caching, no core dump */ _DRM_REGISTERS = 1, /**< no caching, no core dump */
_DRM_SHM = 2, /**< shared, cached */ _DRM_SHM = 2, /**< shared, cached */
_DRM_AGP = 3, /**< AGP/GART */ _DRM_AGP = 3, /**< AGP/GART */
_DRM_SCATTER_GATHER = 4 /**< Scatter/gather memory for PCI DMA */ _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
} drm_map_type_t; } drm_map_type_t;
@ -368,7 +369,8 @@ typedef struct drm_buf_desc {
enum { enum {
_DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */ _DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
_DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */ _DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
_DRM_SG_BUFFER = 0x04 /**< Scatter/gather memory buffer */ _DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
_DRM_FB_BUFFER = 0x08 /**< Buffer is in frame buffer */
} flags; } flags;
unsigned long agp_start; /**< unsigned long agp_start; /**<
* Start address of where the AGP buffers are * Start address of where the AGP buffers are

View File

@ -53,7 +53,6 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/version.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/smp_lock.h> /* For (un)lock_kernel */ #include <linux/smp_lock.h> /* For (un)lock_kernel */
#include <linux/mm.h> #include <linux/mm.h>
@ -96,6 +95,7 @@
#define DRIVER_IRQ_SHARED 0x80 #define DRIVER_IRQ_SHARED 0x80
#define DRIVER_IRQ_VBL 0x100 #define DRIVER_IRQ_VBL 0x100
#define DRIVER_DMA_QUEUE 0x200 #define DRIVER_DMA_QUEUE 0x200
#define DRIVER_FB_DMA 0x400
/***********************************************************************/ /***********************************************************************/
/** \name Begin the DRM... */ /** \name Begin the DRM... */
@ -160,36 +160,7 @@
#define pte_unmap(pte) #define pte_unmap(pte)
#endif #endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19)
static inline struct page * vmalloc_to_page(void * vmalloc_addr)
{
unsigned long addr = (unsigned long) vmalloc_addr;
struct page *page = NULL;
pgd_t *pgd = pgd_offset_k(addr);
pmd_t *pmd;
pte_t *ptep, pte;
if (!pgd_none(*pgd)) {
pmd = pmd_offset(pgd, addr);
if (!pmd_none(*pmd)) {
preempt_disable();
ptep = pte_offset_map(pmd, addr);
pte = *ptep;
if (pte_present(pte))
page = pte_page(pte);
pte_unmap(ptep);
preempt_enable();
}
}
return page;
}
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
#define DRM_RPR_ARG(vma)
#else
#define DRM_RPR_ARG(vma) vma, #define DRM_RPR_ARG(vma) vma,
#endif
#define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT) #define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT)
@ -474,7 +445,8 @@ typedef struct drm_device_dma {
unsigned long byte_count; unsigned long byte_count;
enum { enum {
_DRM_DMA_USE_AGP = 0x01, _DRM_DMA_USE_AGP = 0x01,
_DRM_DMA_USE_SG = 0x02 _DRM_DMA_USE_SG = 0x02,
_DRM_DMA_USE_FB = 0x04
} flags; } flags;
} drm_device_dma_t; } drm_device_dma_t;
@ -525,12 +497,19 @@ typedef struct drm_sigdata {
drm_hw_lock_t *lock; drm_hw_lock_t *lock;
} drm_sigdata_t; } drm_sigdata_t;
typedef struct drm_dma_handle {
dma_addr_t busaddr;
void *vaddr;
size_t size;
} drm_dma_handle_t;
/** /**
* Mappings list * Mappings list
*/ */
typedef struct drm_map_list { typedef struct drm_map_list {
struct list_head head; /**< list head */ struct list_head head; /**< list head */
drm_map_t *map; /**< mapping */ drm_map_t *map; /**< mapping */
unsigned int user_token;
} drm_map_list_t; } drm_map_list_t;
typedef drm_map_t drm_local_map_t; typedef drm_map_t drm_local_map_t;
@ -578,7 +557,22 @@ struct drm_driver {
int (*kernel_context_switch)(struct drm_device *dev, int old, int new); int (*kernel_context_switch)(struct drm_device *dev, int old, int new);
void (*kernel_context_switch_unlock)(struct drm_device *dev, drm_lock_t *lock); void (*kernel_context_switch_unlock)(struct drm_device *dev, drm_lock_t *lock);
int (*vblank_wait)(struct drm_device *dev, unsigned int *sequence); int (*vblank_wait)(struct drm_device *dev, unsigned int *sequence);
/**
* Called by \c drm_device_is_agp. Typically used to determine if a
* card is really attached to AGP or not.
*
* \param dev DRM device handle
*
* \returns
* One of three values is returned depending on whether or not the
* card is absolutely \b not AGP (return of 0), absolutely \b is AGP
* (return of 1), or may or may not be AGP (return of 2).
*/
int (*device_is_agp) (struct drm_device * dev);
/* these have to be filled in */ /* these have to be filled in */
int (*postinit)(struct drm_device *, unsigned long flags); int (*postinit)(struct drm_device *, unsigned long flags);
irqreturn_t (*irq_handler)( DRM_IRQ_ARGS ); irqreturn_t (*irq_handler)( DRM_IRQ_ARGS );
void (*irq_preinstall)(struct drm_device *dev); void (*irq_preinstall)(struct drm_device *dev);
@ -722,11 +716,7 @@ typedef struct drm_device {
int pci_slot; /**< PCI slot number */ int pci_slot; /**< PCI slot number */
int pci_func; /**< PCI function number */ int pci_func; /**< PCI function number */
#ifdef __alpha__ #ifdef __alpha__
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3)
struct pci_controler *hose;
#else
struct pci_controller *hose; struct pci_controller *hose;
#endif
#endif #endif
drm_sg_mem_t *sg; /**< Scatter gather memory */ drm_sg_mem_t *sg; /**< Scatter gather memory */
unsigned long *ctx_bitmap; /**< context bitmap */ unsigned long *ctx_bitmap; /**< context bitmap */
@ -736,6 +726,7 @@ typedef struct drm_device {
struct drm_driver *driver; struct drm_driver *driver;
drm_local_map_t *agp_buffer_map; drm_local_map_t *agp_buffer_map;
unsigned int agp_buffer_token;
drm_head_t primary; /**< primary screen head */ drm_head_t primary; /**< primary screen head */
} drm_device_t; } drm_device_t;
@ -806,7 +797,7 @@ extern void *drm_ioremap_nocache(unsigned long offset, unsigned long size,
drm_device_t *dev); drm_device_t *dev);
extern void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev); extern void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev);
extern DRM_AGP_MEM *drm_alloc_agp(struct agp_bridge_data *bridge, int pages, u32 type); extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type);
extern int drm_free_agp(DRM_AGP_MEM *handle, int pages); extern int drm_free_agp(DRM_AGP_MEM *handle, int pages);
extern int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start); extern int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start);
extern int drm_unbind_agp(DRM_AGP_MEM *handle); extern int drm_unbind_agp(DRM_AGP_MEM *handle);
@ -881,11 +872,19 @@ extern int drm_lock_free(drm_device_t *dev,
unsigned int context); unsigned int context);
/* Buffer management support (drm_bufs.h) */ /* Buffer management support (drm_bufs.h) */
extern int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request);
extern int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request);
extern int drm_addmap(drm_device_t *dev, unsigned int offset,
unsigned int size, drm_map_type_t type,
drm_map_flags_t flags, drm_local_map_t **map_ptr);
extern int drm_addmap_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_rmmap(drm_device_t *dev, drm_local_map_t *map);
extern int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map);
extern int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_order( unsigned long size ); extern int drm_order( unsigned long size );
extern int drm_addmap( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int drm_rmmap( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int drm_addbufs( struct inode *inode, struct file *filp, extern int drm_addbufs( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg ); unsigned int cmd, unsigned long arg );
extern int drm_infobufs( struct inode *inode, struct file *filp, extern int drm_infobufs( struct inode *inode, struct file *filp,
@ -896,6 +895,10 @@ extern int drm_freebufs( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg ); unsigned int cmd, unsigned long arg );
extern int drm_mapbufs( struct inode *inode, struct file *filp, extern int drm_mapbufs( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg ); unsigned int cmd, unsigned long arg );
extern unsigned long drm_get_resource_start(drm_device_t *dev,
unsigned int resource);
extern unsigned long drm_get_resource_len(drm_device_t *dev,
unsigned int resource);
/* DMA support (drm_dma.h) */ /* DMA support (drm_dma.h) */
extern int drm_dma_setup(drm_device_t *dev); extern int drm_dma_setup(drm_device_t *dev);
@ -919,15 +922,18 @@ extern void drm_vbl_send_signals( drm_device_t *dev );
/* AGP/GART support (drm_agpsupport.h) */ /* AGP/GART support (drm_agpsupport.h) */
extern drm_agp_head_t *drm_agp_init(drm_device_t *dev); extern drm_agp_head_t *drm_agp_init(drm_device_t *dev);
extern int drm_agp_acquire(struct inode *inode, struct file *filp, extern int drm_agp_acquire(drm_device_t * dev);
unsigned int cmd, unsigned long arg); extern int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
extern void drm_agp_do_release(drm_device_t *dev); unsigned int cmd, unsigned long arg);
extern int drm_agp_release(struct inode *inode, struct file *filp, extern int drm_agp_release(drm_device_t *dev);
unsigned int cmd, unsigned long arg); extern int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
extern int drm_agp_enable(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
unsigned int cmd, unsigned long arg); extern int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode);
extern int drm_agp_info(struct inode *inode, struct file *filp, extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info);
extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_agp_alloc(struct inode *inode, struct file *filp, extern int drm_agp_alloc(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg); unsigned int cmd, unsigned long arg);
extern int drm_agp_free(struct inode *inode, struct file *filp, extern int drm_agp_free(struct inode *inode, struct file *filp,
@ -976,12 +982,10 @@ extern int drm_ati_pcigart_cleanup(drm_device_t *dev,
unsigned long addr, unsigned long addr,
dma_addr_t bus_addr); dma_addr_t bus_addr);
extern void *drm_pci_alloc(drm_device_t * dev, size_t size, extern drm_dma_handle_t *drm_pci_alloc(drm_device_t *dev, size_t size,
size_t align, dma_addr_t maxaddr, size_t align, dma_addr_t maxaddr);
dma_addr_t * busaddr); extern void __drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah);
extern void drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah);
extern void drm_pci_free(drm_device_t * dev, size_t size,
void *vaddr, dma_addr_t busaddr);
/* sysfs support (drm_sysfs.c) */ /* sysfs support (drm_sysfs.c) */
struct drm_sysfs_class; struct drm_sysfs_class;
@ -1012,17 +1016,26 @@ static __inline__ void drm_core_ioremapfree(struct drm_map *map, struct drm_devi
drm_ioremapfree( map->handle, map->size, dev ); drm_ioremapfree( map->handle, map->size, dev );
} }
static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, unsigned long offset) static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, unsigned int token)
{ {
struct list_head *_list; drm_map_list_t *_entry;
list_for_each( _list, &dev->maplist->head ) { list_for_each_entry(_entry, &dev->maplist->head, head)
drm_map_list_t *_entry = list_entry( _list, drm_map_list_t, head ); if (_entry->user_token == token)
if ( _entry->map &&
_entry->map->offset == offset ) {
return _entry->map; return _entry->map;
return NULL;
}
static __inline__ int drm_device_is_agp(drm_device_t *dev)
{
if ( dev->driver->device_is_agp != NULL ) {
int err = (*dev->driver->device_is_agp)( dev );
if (err != 2) {
return err;
} }
} }
return NULL;
return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
} }
static __inline__ void drm_core_dropmap(struct drm_map *map) static __inline__ void drm_core_dropmap(struct drm_map *map)

View File

@ -37,7 +37,7 @@
#if __OS_HAS_AGP #if __OS_HAS_AGP
/** /**
* AGP information ioctl. * Get AGP information.
* *
* \param inode device inode. * \param inode device inode.
* \param filp file pointer. * \param filp file pointer.
@ -48,51 +48,56 @@
* Verifies the AGP device has been initialized and acquired and fills in the * Verifies the AGP device has been initialized and acquired and fills in the
* drm_agp_info structure with the information in drm_agp_head::agp_info. * drm_agp_info structure with the information in drm_agp_head::agp_info.
*/ */
int drm_agp_info(struct inode *inode, struct file *filp, int drm_agp_info(drm_device_t *dev, drm_agp_info_t *info)
unsigned int cmd, unsigned long arg)
{ {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
DRM_AGP_KERN *kern; DRM_AGP_KERN *kern;
drm_agp_info_t info;
if (!dev->agp || !dev->agp->acquired) if (!dev->agp || !dev->agp->acquired)
return -EINVAL; return -EINVAL;
kern = &dev->agp->agp_info; kern = &dev->agp->agp_info;
info.agp_version_major = kern->version.major; info->agp_version_major = kern->version.major;
info.agp_version_minor = kern->version.minor; info->agp_version_minor = kern->version.minor;
info.mode = kern->mode; info->mode = kern->mode;
info.aperture_base = kern->aper_base; info->aperture_base = kern->aper_base;
info.aperture_size = kern->aper_size * 1024 * 1024; info->aperture_size = kern->aper_size * 1024 * 1024;
info.memory_allowed = kern->max_memory << PAGE_SHIFT; info->memory_allowed = kern->max_memory << PAGE_SHIFT;
info.memory_used = kern->current_memory << PAGE_SHIFT; info->memory_used = kern->current_memory << PAGE_SHIFT;
info.id_vendor = kern->device->vendor; info->id_vendor = kern->device->vendor;
info.id_device = kern->device->device; info->id_device = kern->device->device;
if (copy_to_user((drm_agp_info_t __user *)arg, &info, sizeof(info))) return 0;
}
EXPORT_SYMBOL(drm_agp_info);
int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_agp_info_t info;
int err;
err = drm_agp_info(dev, &info);
if (err)
return err;
if (copy_to_user((drm_agp_info_t __user *) arg, &info, sizeof(info)))
return -EFAULT; return -EFAULT;
return 0; return 0;
} }
/** /**
* Acquire the AGP device (ioctl). * Acquire the AGP device.
* *
* \param inode device inode. * \param dev DRM device that is to acquire AGP
* \param filp file pointer.
* \param cmd command.
* \param arg user argument.
* \return zero on success or a negative number on failure. * \return zero on success or a negative number on failure.
* *
* Verifies the AGP device hasn't been acquired before and calls * Verifies the AGP device hasn't been acquired before and calls
* agp_acquire(). * \c agp_backend_acquire.
*/ */
int drm_agp_acquire(struct inode *inode, struct file *filp, int drm_agp_acquire(drm_device_t *dev)
unsigned int cmd, unsigned long arg)
{ {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
if (!dev->agp) if (!dev->agp)
return -ENODEV; return -ENODEV;
if (dev->agp->acquired) if (dev->agp->acquired)
@ -102,9 +107,10 @@ int drm_agp_acquire(struct inode *inode, struct file *filp,
dev->agp->acquired = 1; dev->agp->acquired = 1;
return 0; return 0;
} }
EXPORT_SYMBOL(drm_agp_acquire);
/** /**
* Release the AGP device (ioctl). * Acquire the AGP device (ioctl).
* *
* \param inode device inode. * \param inode device inode.
* \param filp file pointer. * \param filp file pointer.
@ -112,63 +118,80 @@ int drm_agp_acquire(struct inode *inode, struct file *filp,
* \param arg user argument. * \param arg user argument.
* \return zero on success or a negative number on failure. * \return zero on success or a negative number on failure.
* *
* Verifies the AGP device has been acquired and calls agp_backend_release(). * Verifies the AGP device hasn't been acquired before and calls
* \c agp_backend_acquire.
*/ */
int drm_agp_release(struct inode *inode, struct file *filp, int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg) unsigned int cmd, unsigned long arg)
{ {
drm_file_t *priv = filp->private_data; drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
return drm_agp_acquire( (drm_device_t *) priv->head->dev );
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
agp_backend_release(dev->agp->bridge);
dev->agp->acquired = 0;
return 0;
} }
/** /**
* Release the AGP device. * Release the AGP device.
* *
* Calls agp_backend_release(). * \param dev DRM device that is to release AGP
* \return zero on success or a negative number on failure.
*
* Verifies the AGP device has been acquired and calls \c agp_backend_release.
*/ */
void drm_agp_do_release(drm_device_t *dev) int drm_agp_release(drm_device_t *dev)
{ {
agp_backend_release(dev->agp->bridge); if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
agp_backend_release(dev->agp->bridge);
dev->agp->acquired = 0;
return 0;
}
EXPORT_SYMBOL(drm_agp_release);
int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
return drm_agp_release(dev);
} }
/** /**
* Enable the AGP bus. * Enable the AGP bus.
* *
* \param inode device inode. * \param dev DRM device that has previously acquired AGP.
* \param filp file pointer. * \param mode Requested AGP mode.
* \param cmd command.
* \param arg pointer to a drm_agp_mode structure.
* \return zero on success or a negative number on failure. * \return zero on success or a negative number on failure.
* *
* Verifies the AGP device has been acquired but not enabled, and calls * Verifies the AGP device has been acquired but not enabled, and calls
* agp_enable(). * \c agp_enable.
*/ */
int drm_agp_enable(struct inode *inode, struct file *filp, int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode)
unsigned int cmd, unsigned long arg)
{ {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_agp_mode_t mode;
if (!dev->agp || !dev->agp->acquired) if (!dev->agp || !dev->agp->acquired)
return -EINVAL; return -EINVAL;
if (copy_from_user(&mode, (drm_agp_mode_t __user *)arg, sizeof(mode)))
return -EFAULT;
dev->agp->mode = mode.mode; dev->agp->mode = mode.mode;
agp_enable(dev->agp->bridge, mode.mode); agp_enable(dev->agp->bridge, mode.mode);
dev->agp->base = dev->agp->agp_info.aper_base; dev->agp->base = dev->agp->agp_info.aper_base;
dev->agp->enabled = 1; dev->agp->enabled = 1;
return 0; return 0;
} }
EXPORT_SYMBOL(drm_agp_enable);
int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_agp_mode_t mode;
if (copy_from_user(&mode, (drm_agp_mode_t __user *) arg, sizeof(mode)))
return -EFAULT;
return drm_agp_enable(dev, mode);
}
/** /**
* Allocate AGP memory. * Allocate AGP memory.
@ -206,7 +229,7 @@ int drm_agp_alloc(struct inode *inode, struct file *filp,
pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE; pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE;
type = (u32) request.type; type = (u32) request.type;
if (!(memory = drm_alloc_agp(dev->agp->bridge, pages, type))) { if (!(memory = drm_alloc_agp(dev, pages, type))) {
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
return -ENOMEM; return -ENOMEM;
} }
@ -403,13 +426,8 @@ drm_agp_head_t *drm_agp_init(drm_device_t *dev)
return NULL; return NULL;
} }
head->memory = NULL; head->memory = NULL;
#if LINUX_VERSION_CODE <= 0x020408
head->cant_use_aperture = 0;
head->page_mask = ~(0xfff);
#else
head->cant_use_aperture = head->agp_info.cant_use_aperture; head->cant_use_aperture = head->agp_info.cant_use_aperture;
head->page_mask = head->agp_info.page_mask; head->page_mask = head->agp_info.page_mask;
#endif
return head; return head;
} }
@ -436,6 +454,7 @@ int drm_agp_bind_memory(DRM_AGP_MEM *handle, off_t start)
return -EINVAL; return -EINVAL;
return agp_bind_memory(handle, start); return agp_bind_memory(handle, start);
} }
EXPORT_SYMBOL(drm_agp_bind_memory);
/** Calls agp_unbind_memory() */ /** Calls agp_unbind_memory() */
int drm_agp_unbind_memory(DRM_AGP_MEM *handle) int drm_agp_unbind_memory(DRM_AGP_MEM *handle)

View File

@ -36,37 +36,69 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include "drmP.h" #include "drmP.h"
/** unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
* Compute size order. Returns the exponent of the smaller power of two which
* is greater or equal to given number.
*
* \param size size.
* \return order.
*
* \todo Can be made faster.
*/
int drm_order( unsigned long size )
{ {
int order; return pci_resource_start(dev->pdev, resource);
unsigned long tmp; }
EXPORT_SYMBOL(drm_get_resource_start);
for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
; unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
{
if (size & (size - 1)) return pci_resource_len(dev->pdev, resource);
++order; }
EXPORT_SYMBOL(drm_get_resource_len);
return order;
static drm_local_map_t *drm_find_matching_map(drm_device_t *dev,
drm_local_map_t *map)
{
struct list_head *list;
list_for_each(list, &dev->maplist->head) {
drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
if (entry->map && map->type == entry->map->type &&
entry->map->offset == map->offset) {
return entry->map;
}
}
return NULL;
} }
EXPORT_SYMBOL(drm_order);
#ifdef CONFIG_COMPAT
/* /*
* Used to allocate 32-bit handles for _DRM_SHM regions * Used to allocate 32-bit handles for mappings.
* The 0x10000000 value is chosen to be out of the way of
* FB/register and GART physical addresses.
*/ */
static unsigned int map32_handle = 0x10000000; #define START_RANGE 0x10000000
#define END_RANGE 0x40000000
#ifdef _LP64
static __inline__ unsigned int HandleID(unsigned long lhandle, drm_device_t *dev)
{
static unsigned int map32_handle = START_RANGE;
unsigned int hash;
if (lhandle & 0xffffffff00000000) {
hash = map32_handle;
map32_handle += PAGE_SIZE;
if (map32_handle > END_RANGE)
map32_handle = START_RANGE;
} else
hash = lhandle;
while (1) {
drm_map_list_t *_entry;
list_for_each_entry(_entry, &dev->maplist->head,head) {
if (_entry->user_token == hash)
break;
}
if (&_entry->head == &dev->maplist->head)
return hash;
hash += PAGE_SIZE;
map32_handle += PAGE_SIZE;
}
}
#else
# define HandleID(x,dev) (unsigned int)(x)
#endif #endif
/** /**
@ -82,25 +114,23 @@ static unsigned int map32_handle = 0x10000000;
* type. Adds the map to the map list drm_device::maplist. Adds MTRR's where * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
* applicable and if supported by the kernel. * applicable and if supported by the kernel.
*/ */
int drm_addmap( struct inode *inode, struct file *filp, int drm_addmap(drm_device_t * dev, unsigned int offset,
unsigned int cmd, unsigned long arg ) unsigned int size, drm_map_type_t type,
drm_map_flags_t flags, drm_local_map_t ** map_ptr)
{ {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_map_t *map; drm_map_t *map;
drm_map_t __user *argp = (void __user *)arg;
drm_map_list_t *list; drm_map_list_t *list;
drm_dma_handle_t *dmah;
if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */ drm_local_map_t *found_map;
map = drm_alloc( sizeof(*map), DRM_MEM_MAPS ); map = drm_alloc( sizeof(*map), DRM_MEM_MAPS );
if ( !map ) if ( !map )
return -ENOMEM; return -ENOMEM;
if ( copy_from_user( map, argp, sizeof(*map) ) ) { map->offset = offset;
drm_free( map, sizeof(*map), DRM_MEM_MAPS ); map->size = size;
return -EFAULT; map->flags = flags;
} map->type = type;
/* Only allow shared memory to be removable since we only keep enough /* Only allow shared memory to be removable since we only keep enough
* book keeping information about shared memory to allow for removal * book keeping information about shared memory to allow for removal
@ -122,7 +152,7 @@ int drm_addmap( struct inode *inode, struct file *filp,
switch ( map->type ) { switch ( map->type ) {
case _DRM_REGISTERS: case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER: case _DRM_FRAME_BUFFER:
#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
if ( map->offset + map->size < map->offset || if ( map->offset + map->size < map->offset ||
map->offset < virt_to_phys(high_memory) ) { map->offset < virt_to_phys(high_memory) ) {
drm_free( map, sizeof(*map), DRM_MEM_MAPS ); drm_free( map, sizeof(*map), DRM_MEM_MAPS );
@ -132,6 +162,24 @@ int drm_addmap( struct inode *inode, struct file *filp,
#ifdef __alpha__ #ifdef __alpha__
map->offset += dev->hose->mem_space->start; map->offset += dev->hose->mem_space->start;
#endif #endif
/* Some drivers preinitialize some maps, without the X Server
* needing to be aware of it. Therefore, we just return success
* when the server tries to create a duplicate map.
*/
found_map = drm_find_matching_map(dev, map);
if (found_map != NULL) {
if (found_map->size != map->size) {
DRM_DEBUG("Matching maps of type %d with "
"mismatched sizes, (%ld vs %ld)\n",
map->type, map->size, found_map->size);
found_map->size = map->size;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
*map_ptr = found_map;
return 0;
}
if (drm_core_has_MTRR(dev)) { if (drm_core_has_MTRR(dev)) {
if ( map->type == _DRM_FRAME_BUFFER || if ( map->type == _DRM_FRAME_BUFFER ||
(map->flags & _DRM_WRITE_COMBINING) ) { (map->flags & _DRM_WRITE_COMBINING) ) {
@ -178,9 +226,22 @@ int drm_addmap( struct inode *inode, struct file *filp,
drm_free(map, sizeof(*map), DRM_MEM_MAPS); drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EINVAL; return -EINVAL;
} }
map->offset += dev->sg->handle; map->offset += (unsigned long)dev->sg->virtual;
break;
case _DRM_CONSISTENT:
/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
* As we're limiting the address to 2^32-1 (or less),
* casting it down to 32 bits is no problem, but we
* need to point to a 64bit variable first. */
dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
if (!dmah) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -ENOMEM;
}
map->handle = dmah->vaddr;
map->offset = (unsigned long)dmah->busaddr;
kfree(dmah);
break; break;
default: default:
drm_free( map, sizeof(*map), DRM_MEM_MAPS ); drm_free( map, sizeof(*map), DRM_MEM_MAPS );
return -EINVAL; return -EINVAL;
@ -196,17 +257,56 @@ int drm_addmap( struct inode *inode, struct file *filp,
down(&dev->struct_sem); down(&dev->struct_sem);
list_add(&list->head, &dev->maplist->head); list_add(&list->head, &dev->maplist->head);
#ifdef CONFIG_COMPAT /* Assign a 32-bit handle */
/* Assign a 32-bit handle for _DRM_SHM mappings */
/* We do it here so that dev->struct_sem protects the increment */ /* We do it here so that dev->struct_sem protects the increment */
if (map->type == _DRM_SHM) list->user_token = HandleID(map->type==_DRM_SHM
map->offset = map32_handle += PAGE_SIZE; ? (unsigned long)map->handle
#endif : map->offset, dev);
up(&dev->struct_sem); up(&dev->struct_sem);
if ( copy_to_user( argp, map, sizeof(*map) ) ) *map_ptr = map;
return 0;
}
EXPORT_SYMBOL(drm_addmap);
int drm_addmap_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_map_t map;
drm_map_t *map_ptr;
drm_map_t __user *argp = (void __user *)arg;
int err;
unsigned long handle = 0;
if (!(filp->f_mode & 3))
return -EACCES; /* Require read/write */
if (copy_from_user(& map, argp, sizeof(map))) {
return -EFAULT; return -EFAULT;
if (copy_to_user(&argp->handle, &map->offset, sizeof(map->offset))) }
err = drm_addmap(dev, map.offset, map.size, map.type, map.flags,
&map_ptr);
if (err) {
return err;
}
{
drm_map_list_t *_entry;
list_for_each_entry(_entry, &dev->maplist->head, head) {
if (_entry->map == map_ptr)
handle = _entry->user_token;
}
if (!handle)
return -EFAULT;
}
if (copy_to_user(argp, map_ptr, sizeof(*map_ptr)))
return -EFAULT;
if (put_user(handle, &argp->handle))
return -EFAULT; return -EFAULT;
return 0; return 0;
} }
@ -226,81 +326,138 @@ int drm_addmap( struct inode *inode, struct file *filp,
* its being used, and free any associate resource (such as MTRR's) if it's not * its being used, and free any associate resource (such as MTRR's) if it's not
* being on use. * being on use.
* *
* \sa addmap(). * \sa drm_addmap
*/ */
int drm_rmmap(struct inode *inode, struct file *filp, int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
unsigned int cmd, unsigned long arg)
{ {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
struct list_head *list; struct list_head *list;
drm_map_list_t *r_list = NULL; drm_map_list_t *r_list = NULL;
drm_vma_entry_t *pt, *prev; drm_dma_handle_t dmah;
drm_map_t *map;
drm_map_t request;
int found_maps = 0;
if (copy_from_user(&request, (drm_map_t __user *)arg, /* Find the list entry for the map and remove it */
sizeof(request))) { list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head);
if (r_list->map == map) {
list_del(list);
drm_free(list, sizeof(*list), DRM_MEM_MAPS);
break;
}
}
/* List has wrapped around to the head pointer, or it's empty and we
* didn't find anything.
*/
if (list == (&dev->maplist->head)) {
return -EINVAL;
}
switch (map->type) {
case _DRM_REGISTERS:
drm_ioremapfree(map->handle, map->size, dev);
/* FALLTHROUGH */
case _DRM_FRAME_BUFFER:
if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
int retcode;
retcode = mtrr_del(map->mtrr, map->offset,
map->size);
DRM_DEBUG ("mtrr_del=%d\n", retcode);
}
break;
case _DRM_SHM:
vfree(map->handle);
break;
case _DRM_AGP:
case _DRM_SCATTER_GATHER:
break;
case _DRM_CONSISTENT:
dmah.vaddr = map->handle;
dmah.busaddr = map->offset;
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return 0;
}
EXPORT_SYMBOL(drm_rmmap_locked);
int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
{
int ret;
down(&dev->struct_sem);
ret = drm_rmmap_locked(dev, map);
up(&dev->struct_sem);
return ret;
}
EXPORT_SYMBOL(drm_rmmap);
/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
* the last close of the device, and this is necessary for cleanup when things
* exit uncleanly. Therefore, having userland manually remove mappings seems
* like a pointless exercise since they're going away anyway.
*
* One use case might be after addmap is allowed for normal users for SHM and
* gets used by drivers that the server doesn't need to care about. This seems
* unlikely.
*/
int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_map_t request;
drm_local_map_t *map = NULL;
struct list_head *list;
int ret;
if (copy_from_user(&request, (drm_map_t __user *)arg, sizeof(request))) {
return -EFAULT; return -EFAULT;
} }
down(&dev->struct_sem); down(&dev->struct_sem);
list = &dev->maplist->head;
list_for_each(list, &dev->maplist->head) { list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head); drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
if(r_list->map && if (r_list->map &&
r_list->map->offset == (unsigned long) request.handle && r_list->user_token == (unsigned long) request.handle &&
r_list->map->flags & _DRM_REMOVABLE) break; r_list->map->flags & _DRM_REMOVABLE) {
map = r_list->map;
break;
}
} }
/* List has wrapped around to the head pointer, or its empty we didn't /* List has wrapped around to the head pointer, or its empty we didn't
* find anything. * find anything.
*/ */
if(list == (&dev->maplist->head)) { if (list == (&dev->maplist->head)) {
up(&dev->struct_sem); up(&dev->struct_sem);
return -EINVAL; return -EINVAL;
} }
map = r_list->map;
list_del(list);
drm_free(list, sizeof(*list), DRM_MEM_MAPS);
for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) { if (!map)
if (pt->vma->vm_private_data == map) found_maps++; return -EINVAL;
/* Register and framebuffer maps are permanent */
if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
up(&dev->struct_sem);
return 0;
} }
if(!found_maps) { ret = drm_rmmap_locked(dev, map);
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
if (drm_core_has_MTRR(dev)) {
if (map->mtrr >= 0) {
int retcode;
retcode = mtrr_del(map->mtrr,
map->offset,
map->size);
DRM_DEBUG("mtrr_del = %d\n", retcode);
}
}
drm_ioremapfree(map->handle, map->size, dev);
break;
case _DRM_SHM:
vfree(map->handle);
break;
case _DRM_AGP:
case _DRM_SCATTER_GATHER:
break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
up(&dev->struct_sem); up(&dev->struct_sem);
return 0;
return ret;
} }
/** /**
* Cleanup after an error on one of the addbufs() functions. * Cleanup after an error on one of the addbufs() functions.
* *
* \param dev DRM device.
* \param entry buffer entry where the error occurred. * \param entry buffer entry where the error occurred.
* *
* Frees any pages and buffers associated with the given entry. * Frees any pages and buffers associated with the given entry.
@ -344,25 +501,19 @@ static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
#if __OS_HAS_AGP #if __OS_HAS_AGP
/** /**
* Add AGP buffers for DMA transfers (ioctl). * Add AGP buffers for DMA transfers.
* *
* \param inode device inode. * \param dev drm_device_t to which the buffers are to be added.
* \param filp file pointer. * \param request pointer to a drm_buf_desc_t describing the request.
* \param cmd command.
* \param arg pointer to a drm_buf_desc_t request.
* \return zero on success or a negative number on failure. * \return zero on success or a negative number on failure.
* *
* After some sanity checks creates a drm_buf structure for each buffer and * After some sanity checks creates a drm_buf structure for each buffer and
* reallocates the buffer list of the same size order to accommodate the new * reallocates the buffer list of the same size order to accommodate the new
* buffers. * buffers.
*/ */
static int drm_addbufs_agp( struct inode *inode, struct file *filp, int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
unsigned int cmd, unsigned long arg )
{ {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
drm_buf_entry_t *entry; drm_buf_entry_t *entry;
drm_buf_t *buf; drm_buf_t *buf;
unsigned long offset; unsigned long offset;
@ -376,25 +527,20 @@ static int drm_addbufs_agp( struct inode *inode, struct file *filp,
int byte_count; int byte_count;
int i; int i;
drm_buf_t **temp_buflist; drm_buf_t **temp_buflist;
drm_buf_desc_t __user *argp = (void __user *)arg;
if ( !dma ) return -EINVAL; if ( !dma ) return -EINVAL;
if ( copy_from_user( &request, argp, count = request->count;
sizeof(request) ) ) order = drm_order(request->size);
return -EFAULT;
count = request.count;
order = drm_order( request.size );
size = 1 << order; size = 1 << order;
alignment = (request.flags & _DRM_PAGE_ALIGN) alignment = (request->flags & _DRM_PAGE_ALIGN)
? PAGE_ALIGN(size) : size; ? PAGE_ALIGN(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order; total = PAGE_SIZE << page_order;
byte_count = 0; byte_count = 0;
agp_offset = dev->agp->base + request.agp_start; agp_offset = dev->agp->base + request->agp_start;
DRM_DEBUG( "count: %d\n", count ); DRM_DEBUG( "count: %d\n", count );
DRM_DEBUG( "order: %d\n", order ); DRM_DEBUG( "order: %d\n", order );
@ -508,26 +654,20 @@ static int drm_addbufs_agp( struct inode *inode, struct file *filp,
up( &dev->struct_sem ); up( &dev->struct_sem );
request.count = entry->buf_count; request->count = entry->buf_count;
request.size = size; request->size = size;
if ( copy_to_user( argp, &request, sizeof(request) ) )
return -EFAULT;
dma->flags = _DRM_DMA_USE_AGP; dma->flags = _DRM_DMA_USE_AGP;
atomic_dec( &dev->buf_alloc ); atomic_dec( &dev->buf_alloc );
return 0; return 0;
} }
EXPORT_SYMBOL(drm_addbufs_agp);
#endif /* __OS_HAS_AGP */ #endif /* __OS_HAS_AGP */
static int drm_addbufs_pci( struct inode *inode, struct file *filp, int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
unsigned int cmd, unsigned long arg )
{ {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t request;
int count; int count;
int order; int order;
int size; int size;
@ -543,26 +683,22 @@ static int drm_addbufs_pci( struct inode *inode, struct file *filp,
int page_count; int page_count;
unsigned long *temp_pagelist; unsigned long *temp_pagelist;
drm_buf_t **temp_buflist; drm_buf_t **temp_buflist;
drm_buf_desc_t __user *argp = (void __user *)arg;
if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL; if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL;
if ( !dma ) return -EINVAL; if ( !dma ) return -EINVAL;
if ( copy_from_user( &request, argp, sizeof(request) ) ) count = request->count;
return -EFAULT; order = drm_order(request->size);
count = request.count;
order = drm_order( request.size );
size = 1 << order; size = 1 << order;
DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n", DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
request.count, request.size, size, request->count, request->size, size,
order, dev->queue_count ); order, dev->queue_count );
if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL; if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
if ( dev->queue_count ) return -EBUSY; /* Not while in use */ if ( dev->queue_count ) return -EBUSY; /* Not while in use */
alignment = (request.flags & _DRM_PAGE_ALIGN) alignment = (request->flags & _DRM_PAGE_ALIGN)
? PAGE_ALIGN(size) : size; ? PAGE_ALIGN(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order; total = PAGE_SIZE << page_order;
@ -740,25 +876,18 @@ static int drm_addbufs_pci( struct inode *inode, struct file *filp,
up( &dev->struct_sem ); up( &dev->struct_sem );
request.count = entry->buf_count; request->count = entry->buf_count;
request.size = size; request->size = size;
if ( copy_to_user( argp, &request, sizeof(request) ) )
return -EFAULT;
atomic_dec( &dev->buf_alloc ); atomic_dec( &dev->buf_alloc );
return 0; return 0;
} }
EXPORT_SYMBOL(drm_addbufs_pci);
static int drm_addbufs_sg( struct inode *inode, struct file *filp, static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
unsigned int cmd, unsigned long arg )
{ {
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev;
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
drm_buf_desc_t __user *argp = (void __user *)arg;
drm_buf_desc_t request;
drm_buf_entry_t *entry; drm_buf_entry_t *entry;
drm_buf_t *buf; drm_buf_t *buf;
unsigned long offset; unsigned long offset;
@ -777,20 +906,17 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp,
if ( !dma ) return -EINVAL; if ( !dma ) return -EINVAL;
if ( copy_from_user( &request, argp, sizeof(request) ) ) count = request->count;
return -EFAULT; order = drm_order(request->size);
count = request.count;
order = drm_order( request.size );
size = 1 << order; size = 1 << order;
alignment = (request.flags & _DRM_PAGE_ALIGN) alignment = (request->flags & _DRM_PAGE_ALIGN)
? PAGE_ALIGN(size) : size; ? PAGE_ALIGN(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order; total = PAGE_SIZE << page_order;
byte_count = 0; byte_count = 0;
agp_offset = request.agp_start; agp_offset = request->agp_start;
DRM_DEBUG( "count: %d\n", count ); DRM_DEBUG( "count: %d\n", count );
DRM_DEBUG( "order: %d\n", order ); DRM_DEBUG( "order: %d\n", order );
@ -848,7 +974,8 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp,
buf->offset = (dma->byte_count + offset); buf->offset = (dma->byte_count + offset);
buf->bus_address = agp_offset + offset; buf->bus_address = agp_offset + offset;
buf->address = (void *)(agp_offset + offset + dev->sg->handle); buf->address = (void *)(agp_offset + offset
+ (unsigned long)dev->sg->virtual);
buf->next = NULL; buf->next = NULL;
buf->waiting = 0; buf->waiting = 0;
buf->pending = 0; buf->pending = 0;
@ -905,11 +1032,8 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp,
up( &dev->struct_sem ); up( &dev->struct_sem );
request.count = entry->buf_count; request->count = entry->buf_count;
request.size = size; request->size = size;
if ( copy_to_user( argp, &request, sizeof(request) ) )
return -EFAULT;
dma->flags = _DRM_DMA_USE_SG; dma->flags = _DRM_DMA_USE_SG;
@ -917,6 +1041,161 @@ static int drm_addbufs_sg( struct inode *inode, struct file *filp,
return 0; return 0;
} }
int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request)
{
drm_device_dma_t *dma = dev->dma;
drm_buf_entry_t *entry;
drm_buf_t *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
int order;
int size;
int alignment;
int page_order;
int total;
int byte_count;
int i;
drm_buf_t **temp_buflist;
if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
return -EINVAL;
if (!dma)
return -EINVAL;
count = request->count;
order = drm_order(request->size);
size = 1 << order;
alignment = (request->flags & _DRM_PAGE_ALIGN)
? PAGE_ALIGN(size) : size;
page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
total = PAGE_SIZE << page_order;
byte_count = 0;
agp_offset = request->agp_start;
DRM_DEBUG("count: %d\n", count);
DRM_DEBUG("order: %d\n", order);
DRM_DEBUG("size: %d\n", size);
DRM_DEBUG("agp_offset: %lu\n", agp_offset);
DRM_DEBUG("alignment: %d\n", alignment);
DRM_DEBUG("page_order: %d\n", page_order);
DRM_DEBUG("total: %d\n", total);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
if (dev->queue_count)
return -EBUSY; /* Not while in use */
spin_lock(&dev->count_lock);
if (dev->buf_use) {
spin_unlock(&dev->count_lock);
return -EBUSY;
}
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->count_lock);
down(&dev->struct_sem);
entry = &dma->bufs[order];
if (entry->buf_count) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
if (count < 0 || count > 4096) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -EINVAL;
}
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->buf_size = size;
entry->page_order = page_order;
offset = 0;
while (entry->buf_count < count) {
buf = &entry->buflist[entry->buf_count];
buf->idx = dma->buf_count + entry->buf_count;
buf->total = alignment;
buf->order = order;
buf->used = 0;
buf->offset = (dma->byte_count + offset);
buf->bus_address = agp_offset + offset;
buf->address = (void *)(agp_offset + offset);
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
init_waitqueue_head(&buf->dma_wait);
buf->filp = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
if (!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
drm_cleanup_buf_error(dev, entry);
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
memset(buf->dev_private, 0, buf->dev_priv_size);
DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
offset += alignment;
entry->buf_count++;
byte_count += PAGE_SIZE << page_order;
}
DRM_DEBUG("byte_count: %d\n", byte_count);
temp_buflist = drm_realloc(dma->buflist,
dma->buf_count * sizeof(*dma->buflist),
(dma->buf_count + entry->buf_count)
* sizeof(*dma->buflist), DRM_MEM_BUFS);
if (!temp_buflist) {
/* Free the entry because it isn't valid */
drm_cleanup_buf_error(dev, entry);
up(&dev->struct_sem);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
dma->buflist = temp_buflist;
for (i = 0; i < entry->buf_count; i++) {
dma->buflist[i + dma->buf_count] = &entry->buflist[i];
}
dma->buf_count += entry->buf_count;
dma->byte_count += byte_count;
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
up(&dev->struct_sem);
request->count = entry->buf_count;
request->size = size;
dma->flags = _DRM_DMA_USE_FB;
atomic_dec(&dev->buf_alloc);
return 0;
}
/** /**
* Add buffers for DMA transfers (ioctl). * Add buffers for DMA transfers (ioctl).
* *
@ -937,6 +1216,7 @@ int drm_addbufs( struct inode *inode, struct file *filp,
drm_buf_desc_t request; drm_buf_desc_t request;
drm_file_t *priv = filp->private_data; drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->head->dev; drm_device_t *dev = priv->head->dev;
int ret;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EINVAL; return -EINVAL;
@ -947,13 +1227,23 @@ int drm_addbufs( struct inode *inode, struct file *filp,
#if __OS_HAS_AGP #if __OS_HAS_AGP
if ( request.flags & _DRM_AGP_BUFFER ) if ( request.flags & _DRM_AGP_BUFFER )
return drm_addbufs_agp( inode, filp, cmd, arg ); ret=drm_addbufs_agp(dev, &request);
else else
#endif #endif
if ( request.flags & _DRM_SG_BUFFER ) if ( request.flags & _DRM_SG_BUFFER )
return drm_addbufs_sg( inode, filp, cmd, arg ); ret=drm_addbufs_sg(dev, &request);
else if ( request.flags & _DRM_FB_BUFFER)
ret=drm_addbufs_fb(dev, &request);
else else
return drm_addbufs_pci( inode, filp, cmd, arg ); ret=drm_addbufs_pci(dev, &request);
if (ret==0) {
if (copy_to_user((void __user *)arg, &request,
sizeof(request))) {
ret = -EFAULT;
}
}
return ret;
} }
@ -1196,43 +1486,31 @@ int drm_mapbufs( struct inode *inode, struct file *filp,
return -EFAULT; return -EFAULT;
if ( request.count >= dma->buf_count ) { if ( request.count >= dma->buf_count ) {
if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) || if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
(drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG)) ) { || (drm_core_check_feature(dev, DRIVER_SG)
&& (dma->flags & _DRM_DMA_USE_SG))
|| (drm_core_check_feature(dev, DRIVER_FB_DMA)
&& (dma->flags & _DRM_DMA_USE_FB))) {
drm_map_t *map = dev->agp_buffer_map; drm_map_t *map = dev->agp_buffer_map;
unsigned long token = dev->agp_buffer_token;
if ( !map ) { if ( !map ) {
retcode = -EINVAL; retcode = -EINVAL;
goto done; goto done;
} }
#if LINUX_VERSION_CODE <= 0x020402
down( &current->mm->mmap_sem );
#else
down_write( &current->mm->mmap_sem ); down_write( &current->mm->mmap_sem );
#endif
virtual = do_mmap( filp, 0, map->size, virtual = do_mmap( filp, 0, map->size,
PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
MAP_SHARED, MAP_SHARED,
(unsigned long)map->offset ); token );
#if LINUX_VERSION_CODE <= 0x020402
up( &current->mm->mmap_sem );
#else
up_write( &current->mm->mmap_sem ); up_write( &current->mm->mmap_sem );
#endif
} else { } else {
#if LINUX_VERSION_CODE <= 0x020402
down( &current->mm->mmap_sem );
#else
down_write( &current->mm->mmap_sem ); down_write( &current->mm->mmap_sem );
#endif
virtual = do_mmap( filp, 0, dma->byte_count, virtual = do_mmap( filp, 0, dma->byte_count,
PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
MAP_SHARED, 0 ); MAP_SHARED, 0 );
#if LINUX_VERSION_CODE <= 0x020402
up( &current->mm->mmap_sem );
#else
up_write( &current->mm->mmap_sem ); up_write( &current->mm->mmap_sem );
#endif
} }
if ( virtual > -1024UL ) { if ( virtual > -1024UL ) {
/* Real error */ /* Real error */
@ -1279,3 +1557,26 @@ int drm_mapbufs( struct inode *inode, struct file *filp,
return retcode; return retcode;
} }
/**
* Compute size order. Returns the exponent of the smaller power of two which
* is greater or equal to given number.
*
* \param size size.
* \return order.
*
* \todo Can be made faster.
*/
int drm_order( unsigned long size )
{
int order;
unsigned long tmp;
for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
;
if (size & (size - 1))
++order;
return order;
}
EXPORT_SYMBOL(drm_order);

View File

@ -212,6 +212,7 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
drm_ctx_priv_map_t __user *argp = (void __user *)arg; drm_ctx_priv_map_t __user *argp = (void __user *)arg;
drm_ctx_priv_map_t request; drm_ctx_priv_map_t request;
drm_map_t *map; drm_map_t *map;
drm_map_list_t *_entry;
if (copy_from_user(&request, argp, sizeof(request))) if (copy_from_user(&request, argp, sizeof(request)))
return -EFAULT; return -EFAULT;
@ -225,7 +226,17 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
map = dev->context_sareas[request.ctx_id]; map = dev->context_sareas[request.ctx_id];
up(&dev->struct_sem); up(&dev->struct_sem);
request.handle = (void *) map->offset; request.handle = 0;
list_for_each_entry(_entry, &dev->maplist->head,head) {
if (_entry->map == map) {
request.handle = (void *)(unsigned long)_entry->user_token;
break;
}
}
if (request.handle == 0)
return -EINVAL;
if (copy_to_user(argp, &request, sizeof(request))) if (copy_to_user(argp, &request, sizeof(request)))
return -EFAULT; return -EFAULT;
return 0; return 0;
@ -262,7 +273,7 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
list_for_each(list, &dev->maplist->head) { list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head); r_list = list_entry(list, drm_map_list_t, head);
if (r_list->map if (r_list->map
&& r_list->map->offset == (unsigned long) request.handle) && r_list->user_token == (unsigned long) request.handle)
goto found; goto found;
} }
bad: bad:
@ -369,7 +380,7 @@ int drm_resctx( struct inode *inode, struct file *filp,
for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) { for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
ctx.handle = i; ctx.handle = i;
if ( copy_to_user( &res.contexts[i], if ( copy_to_user( &res.contexts[i],
&i, sizeof(i) ) ) &ctx, sizeof(ctx) ) )
return -EFAULT; return -EFAULT;
} }
} }

View File

@ -70,8 +70,8 @@ static drm_ioctl_desc_t drm_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_noop, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_noop, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap_ioctl,1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { drm_rmmap, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { drm_rmmap_ioctl, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { drm_setsareactx, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { drm_setsareactx, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { drm_getsareactx, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { drm_getsareactx, 1, 0 },
@ -102,10 +102,10 @@ static drm_ioctl_desc_t drm_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { drm_control, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { drm_control, 1, 1 },
#if __OS_HAS_AGP #if __OS_HAS_AGP
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire_ioctl, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release_ioctl, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable_ioctl, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info_ioctl, 1, 0 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 }, [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
@ -127,14 +127,12 @@ static drm_ioctl_desc_t drm_ioctls[] = {
* *
* Frees every resource in \p dev. * Frees every resource in \p dev.
* *
* \sa drm_device and setup(). * \sa drm_device
*/ */
int drm_takedown( drm_device_t *dev ) int drm_takedown( drm_device_t *dev )
{ {
drm_magic_entry_t *pt, *next; drm_magic_entry_t *pt, *next;
drm_map_t *map;
drm_map_list_t *r_list; drm_map_list_t *r_list;
struct list_head *list, *list_next;
drm_vma_entry_t *vma, *vma_next; drm_vma_entry_t *vma, *vma_next;
int i; int i;
@ -142,6 +140,7 @@ int drm_takedown( drm_device_t *dev )
if (dev->driver->pretakedown) if (dev->driver->pretakedown)
dev->driver->pretakedown(dev); dev->driver->pretakedown(dev);
DRM_DEBUG("driver pretakedown completed\n");
if (dev->unique) { if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
@ -178,11 +177,16 @@ int drm_takedown( drm_device_t *dev )
} }
dev->agp->memory = NULL; dev->agp->memory = NULL;
if ( dev->agp->acquired ) drm_agp_do_release(dev); if (dev->agp->acquired)
drm_agp_release(dev);
dev->agp->acquired = 0; dev->agp->acquired = 0;
dev->agp->enabled = 0; dev->agp->enabled = 0;
} }
if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
drm_sg_cleanup(dev->sg);
dev->sg = NULL;
}
/* Clear vma list (only built for debugging) */ /* Clear vma list (only built for debugging) */
if ( dev->vmalist ) { if ( dev->vmalist ) {
@ -194,48 +198,11 @@ int drm_takedown( drm_device_t *dev )
} }
if( dev->maplist ) { if( dev->maplist ) {
list_for_each_safe( list, list_next, &dev->maplist->head ) { while (!list_empty(&dev->maplist->head)) {
r_list = (drm_map_list_t *)list; struct list_head *list = dev->maplist->head.next;
r_list = list_entry(list, drm_map_list_t, head);
if ( ( map = r_list->map ) ) { drm_rmmap_locked(dev, r_list->map);
switch ( map->type ) { }
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
if (drm_core_has_MTRR(dev)) {
if ( map->mtrr >= 0 ) {
int retcode;
retcode = mtrr_del( map->mtrr,
map->offset,
map->size );
DRM_DEBUG( "mtrr_del=%d\n", retcode );
}
}
drm_ioremapfree( map->handle, map->size, dev );
break;
case _DRM_SHM:
vfree(map->handle);
break;
case _DRM_AGP:
/* Do nothing here, because this is all
* handled in the AGP/GART driver.
*/
break;
case _DRM_SCATTER_GATHER:
/* Handle it */
if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
drm_sg_cleanup(dev->sg);
dev->sg = NULL;
}
break;
}
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
}
list_del( list );
drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
}
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
dev->maplist = NULL;
} }
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist ) { if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist ) {
@ -264,6 +231,7 @@ int drm_takedown( drm_device_t *dev )
} }
up( &dev->struct_sem ); up( &dev->struct_sem );
DRM_DEBUG("takedown completed\n");
return 0; return 0;
} }
@ -312,7 +280,7 @@ EXPORT_SYMBOL(drm_init);
* *
* Cleans up all DRM device, calling takedown(). * Cleans up all DRM device, calling takedown().
* *
* \sa drm_init(). * \sa drm_init
*/ */
static void drm_cleanup( drm_device_t *dev ) static void drm_cleanup( drm_device_t *dev )
{ {
@ -325,6 +293,11 @@ static void drm_cleanup( drm_device_t *dev )
drm_takedown( dev ); drm_takedown( dev );
if (dev->maplist) {
drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
dev->maplist = NULL;
}
drm_ctxbitmap_cleanup( dev ); drm_ctxbitmap_cleanup( dev );
if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&

View File

@ -71,12 +71,6 @@ static int drm_setup( drm_device_t *dev )
dev->magiclist[i].tail = NULL; dev->magiclist[i].tail = NULL;
} }
dev->maplist = drm_alloc(sizeof(*dev->maplist),
DRM_MEM_MAPS);
if(dev->maplist == NULL) return -ENOMEM;
memset(dev->maplist, 0, sizeof(*dev->maplist));
INIT_LIST_HEAD(&dev->maplist->head);
dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist),
DRM_MEM_CTXLIST); DRM_MEM_CTXLIST);
if(dev->ctxlist == NULL) return -ENOMEM; if(dev->ctxlist == NULL) return -ENOMEM;

View File

@ -208,7 +208,7 @@ int drm_getmap( struct inode *inode, struct file *filp,
map.size = r_list->map->size; map.size = r_list->map->size;
map.type = r_list->map->type; map.type = r_list->map->type;
map.flags = r_list->map->flags; map.flags = r_list->map->flags;
map.handle = r_list->map->handle; map.handle = (void *)(unsigned long) r_list->user_token;
map.mtrr = r_list->map->mtrr; map.mtrr = r_list->map->mtrr;
up(&dev->struct_sem); up(&dev->struct_sem);

View File

@ -142,27 +142,31 @@ void drm_free_pages(unsigned long address, int order, int area)
#if __OS_HAS_AGP #if __OS_HAS_AGP
/** Wrapper around agp_allocate_memory() */ /** Wrapper around agp_allocate_memory() */
DRM_AGP_MEM *drm_alloc_agp(struct agp_bridge_data *bridge, int pages, u32 type) DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type)
{ {
return drm_agp_allocate_memory(bridge, pages, type); return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
} }
EXPORT_SYMBOL(drm_alloc_agp);
/** Wrapper around agp_free_memory() */ /** Wrapper around agp_free_memory() */
int drm_free_agp(DRM_AGP_MEM *handle, int pages) int drm_free_agp(DRM_AGP_MEM *handle, int pages)
{ {
return drm_agp_free_memory(handle) ? 0 : -EINVAL; return drm_agp_free_memory(handle) ? 0 : -EINVAL;
} }
EXPORT_SYMBOL(drm_free_agp);
/** Wrapper around agp_bind_memory() */ /** Wrapper around agp_bind_memory() */
int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start) int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start)
{ {
return drm_agp_bind_memory(handle, start); return drm_agp_bind_memory(handle, start);
} }
EXPORT_SYMBOL(drm_bind_agp);
/** Wrapper around agp_unbind_memory() */ /** Wrapper around agp_unbind_memory() */
int drm_unbind_agp(DRM_AGP_MEM *handle) int drm_unbind_agp(DRM_AGP_MEM *handle)
{ {
return drm_agp_unbind_memory(handle); return drm_agp_unbind_memory(handle);
} }
EXPORT_SYMBOL(drm_unbind_agp);
#endif /* agp */ #endif /* agp */
#endif /* debug_memory */ #endif /* debug_memory */

View File

@ -46,11 +46,11 @@
/** /**
* \brief Allocate a PCI consistent memory block, for DMA. * \brief Allocate a PCI consistent memory block, for DMA.
*/ */
void *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align, drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
dma_addr_t maxaddr, dma_addr_t * busaddr) dma_addr_t maxaddr)
{ {
void *address; drm_dma_handle_t *dmah;
#if DRM_DEBUG_MEMORY #ifdef DRM_DEBUG_MEMORY
int area = DRM_MEM_DMA; int area = DRM_MEM_DMA;
spin_lock(&drm_mem_lock); spin_lock(&drm_mem_lock);
@ -74,13 +74,19 @@ void *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
return NULL; return NULL;
} }
address = pci_alloc_consistent(dev->pdev, size, busaddr); dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
if (!dmah)
return NULL;
dmah->size = size;
dmah->vaddr = pci_alloc_consistent(dev->pdev, size, &dmah->busaddr);
#if DRM_DEBUG_MEMORY #ifdef DRM_DEBUG_MEMORY
if (address == NULL) { if (dmah->vaddr == NULL) {
spin_lock(&drm_mem_lock); spin_lock(&drm_mem_lock);
++drm_mem_stats[area].fail_count; ++drm_mem_stats[area].fail_count;
spin_unlock(&drm_mem_lock); spin_unlock(&drm_mem_lock);
kfree(dmah);
return NULL; return NULL;
} }
@ -90,37 +96,42 @@ void *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
drm_ram_used += size; drm_ram_used += size;
spin_unlock(&drm_mem_lock); spin_unlock(&drm_mem_lock);
#else #else
if (address == NULL) if (dmah->vaddr == NULL) {
kfree(dmah);
return NULL; return NULL;
}
#endif #endif
memset(address, 0, size); memset(dmah->vaddr, 0, size);
return address; return dmah;
} }
EXPORT_SYMBOL(drm_pci_alloc); EXPORT_SYMBOL(drm_pci_alloc);
/** /**
* \brief Free a PCI consistent memory block. * \brief Free a PCI consistent memory block with freeing its descriptor.
*
* This function is for internal use in the Linux-specific DRM core code.
*/ */
void void
drm_pci_free(drm_device_t * dev, size_t size, void *vaddr, dma_addr_t busaddr) __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah)
{ {
#if DRM_DEBUG_MEMORY #ifdef DRM_DEBUG_MEMORY
int area = DRM_MEM_DMA; int area = DRM_MEM_DMA;
int alloc_count; int alloc_count;
int free_count; int free_count;
#endif #endif
if (!vaddr) { if (!dmah->vaddr) {
#if DRM_DEBUG_MEMORY #ifdef DRM_DEBUG_MEMORY
DRM_MEM_ERROR(area, "Attempt to free address 0\n"); DRM_MEM_ERROR(area, "Attempt to free address 0\n");
#endif #endif
} else { } else {
pci_free_consistent(dev->pdev, size, vaddr, busaddr); pci_free_consistent(dev->pdev, dmah->size, dmah->vaddr,
dmah->busaddr);
} }
#if DRM_DEBUG_MEMORY #ifdef DRM_DEBUG_MEMORY
spin_lock(&drm_mem_lock); spin_lock(&drm_mem_lock);
free_count = ++drm_mem_stats[area].free_count; free_count = ++drm_mem_stats[area].free_count;
alloc_count = drm_mem_stats[area].succeed_count; alloc_count = drm_mem_stats[area].succeed_count;
@ -135,6 +146,16 @@ drm_pci_free(drm_device_t * dev, size_t size, void *vaddr, dma_addr_t busaddr)
#endif #endif
} }
/**
* \brief Free a PCI consistent memory block
*/
void
drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah)
{
__drm_pci_free(dev, dmah);
kfree(dmah);
}
EXPORT_SYMBOL(drm_pci_free); EXPORT_SYMBOL(drm_pci_free);
/*@}*/ /*@}*/

View File

@ -25,6 +25,8 @@
{0x1002, 0x4965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ {0x1002, 0x4965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \
{0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \
{0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \ {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \
{0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \
{0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \
{0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \ {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \
{0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \ {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \
{0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|CHIP_IS_MOBILITY}, \ {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|CHIP_IS_MOBILITY}, \
@ -33,7 +35,17 @@
{0x1002, 0x4C65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ {0x1002, 0x4C65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \
{0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \
{0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \ {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \
{0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
{0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
{0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
{0x1002, 0x4E47, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
{0x1002, 0x4E48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
{0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
{0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
{0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
{0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \ {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
{0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
{0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
{0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
{0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
{0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \ {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
@ -56,6 +68,7 @@
{0x1002, 0x516A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ {0x1002, 0x516A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
{0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ {0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
{0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ {0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
{0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
{0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \
{0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \ {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \
{0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \ {0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \
@ -116,9 +129,10 @@
{0, 0, 0} {0, 0, 0}
#define mga_PCI_IDS \ #define mga_PCI_IDS \
{0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
{0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
{0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \
{0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
{0, 0, 0} {0, 0, 0}
#define mach64_PCI_IDS \ #define mach64_PCI_IDS \
@ -162,9 +176,10 @@
#define viadrv_PCI_IDS \ #define viadrv_PCI_IDS \
{0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0} {0, 0, 0}
#define i810_PCI_IDS \ #define i810_PCI_IDS \
@ -181,33 +196,30 @@
{0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0} {0, 0, 0}
#define gamma_PCI_IDS \
{0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
#define savage_PCI_IDS \ #define savage_PCI_IDS \
{0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
{0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
{0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
{0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
{0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
{0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
{0x5333, 0x8c20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
{0x5333, 0x8c21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
{0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
{0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
{0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
{0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
{0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
{0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
{0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
{0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
{0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
{0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
{0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
{0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
{0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
{0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
{0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
{0, 0, 0} {0, 0, 0}
#define ffb_PCI_IDS \ #define ffb_PCI_IDS \
@ -223,10 +235,3 @@
{0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0} {0, 0, 0}
#define viadrv_PCI_IDS \
{0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}

View File

@ -210,8 +210,8 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
/* Hardcoded from _DRM_FRAME_BUFFER, /* Hardcoded from _DRM_FRAME_BUFFER,
_DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
_DRM_SCATTER_GATHER. */ _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
const char *types[] = { "FB", "REG", "SHM", "AGP", "SG" }; const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
const char *type; const char *type;
int i; int i;
@ -229,16 +229,19 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
if (dev->maplist != NULL) list_for_each(list, &dev->maplist->head) { if (dev->maplist != NULL) list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head); r_list = list_entry(list, drm_map_list_t, head);
map = r_list->map; map = r_list->map;
if(!map) continue; if(!map)
if (map->type < 0 || map->type > 4) type = "??"; continue;
else type = types[map->type]; if (map->type < 0 || map->type > 5)
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ", type = "??";
else
type = types[map->type];
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08x ",
i, i,
map->offset, map->offset,
map->size, map->size,
type, type,
map->flags, map->flags,
(unsigned long)map->handle); r_list->user_token);
if (map->mtrr < 0) { if (map->mtrr < 0) {
DRM_PROC_PRINT("none\n"); DRM_PROC_PRINT("none\n");
} else { } else {

View File

@ -61,6 +61,12 @@ void drm_sg_cleanup( drm_sg_mem_t *entry )
DRM_MEM_SGLISTS ); DRM_MEM_SGLISTS );
} }
#ifdef _LP64
# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
#else
# define ScatterHandle(x) (unsigned int)(x)
#endif
int drm_sg_alloc( struct inode *inode, struct file *filp, int drm_sg_alloc( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg ) unsigned int cmd, unsigned long arg )
{ {
@ -133,12 +139,13 @@ int drm_sg_alloc( struct inode *inode, struct file *filp,
*/ */
memset( entry->virtual, 0, pages << PAGE_SHIFT ); memset( entry->virtual, 0, pages << PAGE_SHIFT );
entry->handle = (unsigned long)entry->virtual; entry->handle = ScatterHandle((unsigned long)entry->virtual);
DRM_DEBUG( "sg alloc handle = %08lx\n", entry->handle ); DRM_DEBUG( "sg alloc handle = %08lx\n", entry->handle );
DRM_DEBUG( "sg alloc virtual = %p\n", entry->virtual ); DRM_DEBUG( "sg alloc virtual = %p\n", entry->virtual );
for ( i = entry->handle, j = 0 ; j < pages ; i += PAGE_SIZE, j++ ) { for (i = (unsigned long)entry->virtual, j = 0; j < pages;
i += PAGE_SIZE, j++) {
entry->pagelist[j] = vmalloc_to_page((void *)i); entry->pagelist[j] = vmalloc_to_page((void *)i);
if (!entry->pagelist[j]) if (!entry->pagelist[j])
goto failed; goto failed;

View File

@ -75,6 +75,11 @@ static int drm_fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct
dev->pci_func = PCI_FUNC(pdev->devfn); dev->pci_func = PCI_FUNC(pdev->devfn);
dev->irq = pdev->irq; dev->irq = pdev->irq;
dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS);
if (dev->maplist == NULL)
return -ENOMEM;
INIT_LIST_HEAD(&dev->maplist->head);
/* the DRM has 6 basic counters */ /* the DRM has 6 basic counters */
dev->counters = 6; dev->counters = 6;
dev->types[0] = _DRM_STAT_LOCK; dev->types[0] = _DRM_STAT_LOCK;
@ -91,7 +96,8 @@ static int drm_fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct
goto error_out_unreg; goto error_out_unreg;
if (drm_core_has_AGP(dev)) { if (drm_core_has_AGP(dev)) {
dev->agp = drm_agp_init(dev); if (drm_device_is_agp(dev))
dev->agp = drm_agp_init(dev);
if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && (dev->agp == NULL)) { if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && (dev->agp == NULL)) {
DRM_ERROR( "Cannot initialize the agpgart module.\n" ); DRM_ERROR( "Cannot initialize the agpgart module.\n" );
retcode = -EINVAL; retcode = -EINVAL;

View File

@ -73,12 +73,13 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
r_list = list_entry(list, drm_map_list_t, head); r_list = list_entry(list, drm_map_list_t, head);
map = r_list->map; map = r_list->map;
if (!map) continue; if (!map) continue;
if (map->offset == VM_OFFSET(vma)) break; if (r_list->user_token == VM_OFFSET(vma))
break;
} }
if (map && map->type == _DRM_AGP) { if (map && map->type == _DRM_AGP) {
unsigned long offset = address - vma->vm_start; unsigned long offset = address - vma->vm_start;
unsigned long baddr = VM_OFFSET(vma) + offset; unsigned long baddr = map->offset + offset;
struct drm_agp_mem *agpmem; struct drm_agp_mem *agpmem;
struct page *page; struct page *page;
@ -210,6 +211,8 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
} }
if(!found_maps) { if(!found_maps) {
drm_dma_handle_t dmah;
switch (map->type) { switch (map->type) {
case _DRM_REGISTERS: case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER: case _DRM_FRAME_BUFFER:
@ -228,6 +231,12 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
case _DRM_AGP: case _DRM_AGP:
case _DRM_SCATTER_GATHER: case _DRM_SCATTER_GATHER:
break; break;
case _DRM_CONSISTENT:
dmah.vaddr = map->handle;
dmah.busaddr = map->offset;
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
} }
drm_free(map, sizeof(*map), DRM_MEM_MAPS); drm_free(map, sizeof(*map), DRM_MEM_MAPS);
} }
@ -296,7 +305,7 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
offset = address - vma->vm_start; offset = address - vma->vm_start;
map_offset = map->offset - dev->sg->handle; map_offset = map->offset - (unsigned long)dev->sg->virtual;
page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
page = entry->pagelist[page_offset]; page = entry->pagelist[page_offset];
get_page(page); get_page(page);
@ -305,8 +314,6 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
} }
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
static struct page *drm_vm_nopage(struct vm_area_struct *vma, static struct page *drm_vm_nopage(struct vm_area_struct *vma,
unsigned long address, unsigned long address,
int *type) { int *type) {
@ -335,35 +342,6 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
return drm_do_vm_sg_nopage(vma, address); return drm_do_vm_sg_nopage(vma, address);
} }
#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
static struct page *drm_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int unused) {
return drm_do_vm_nopage(vma, address);
}
static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
unsigned long address,
int unused) {
return drm_do_vm_shm_nopage(vma, address);
}
static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
unsigned long address,
int unused) {
return drm_do_vm_dma_nopage(vma, address);
}
static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
unsigned long address,
int unused) {
return drm_do_vm_sg_nopage(vma, address);
}
#endif
/** AGP virtual memory operations */ /** AGP virtual memory operations */
static struct vm_operations_struct drm_vm_ops = { static struct vm_operations_struct drm_vm_ops = {
.nopage = drm_vm_nopage, .nopage = drm_vm_nopage,
@ -487,11 +465,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &drm_vm_dma_ops; vma->vm_ops = &drm_vm_dma_ops;
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
#else
vma->vm_flags |= VM_RESERVED; /* Don't swap */ vma->vm_flags |= VM_RESERVED; /* Don't swap */
#endif
vma->vm_file = filp; /* Needed for drm_vm_open() */ vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open(vma); drm_vm_open(vma);
@ -560,13 +534,12 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
for performance, even if the list was a for performance, even if the list was a
bit longer. */ bit longer. */
list_for_each(list, &dev->maplist->head) { list_for_each(list, &dev->maplist->head) {
unsigned long off;
r_list = list_entry(list, drm_map_list_t, head); r_list = list_entry(list, drm_map_list_t, head);
map = r_list->map; map = r_list->map;
if (!map) continue; if (!map) continue;
off = dev->driver->get_map_ofs(map); if (r_list->user_token == VM_OFFSET(vma))
if (off == VM_OFFSET(vma)) break; break;
} }
if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
@ -605,17 +578,17 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
/* fall through to _DRM_FRAME_BUFFER... */ /* fall through to _DRM_FRAME_BUFFER... */
case _DRM_FRAME_BUFFER: case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS: case _DRM_REGISTERS:
if (VM_OFFSET(vma) >= __pa(high_memory)) {
#if defined(__i386__) || defined(__x86_64__) #if defined(__i386__) || defined(__x86_64__)
if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) { if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
pgprot_val(vma->vm_page_prot) |= _PAGE_PCD; pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT; pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
}
#elif defined(__powerpc__)
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED;
#endif
vma->vm_flags |= VM_IO; /* not in core dump */
} }
#elif defined(__powerpc__)
pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
if (map->type == _DRM_REGISTERS)
pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED;
#endif
vma->vm_flags |= VM_IO; /* not in core dump */
#if defined(__ia64__) #if defined(__ia64__)
if (efi_range_is_wc(vma->vm_start, vma->vm_end - if (efi_range_is_wc(vma->vm_start, vma->vm_end -
vma->vm_start)) vma->vm_start))
@ -628,12 +601,12 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
offset = dev->driver->get_reg_ofs(dev); offset = dev->driver->get_reg_ofs(dev);
#ifdef __sparc__ #ifdef __sparc__
if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start, if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start,
(VM_OFFSET(vma) + offset) >> PAGE_SHIFT, (map->offset + offset) >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_end - vma->vm_start,
vma->vm_page_prot)) vma->vm_page_prot))
#else #else
if (io_remap_pfn_range(vma, vma->vm_start, if (io_remap_pfn_range(vma, vma->vm_start,
(VM_OFFSET(vma) + offset) >> PAGE_SHIFT, (map->offset + offset) >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_end - vma->vm_start,
vma->vm_page_prot)) vma->vm_page_prot))
#endif #endif
@ -641,37 +614,28 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
" offset = 0x%lx\n", " offset = 0x%lx\n",
map->type, map->type,
vma->vm_start, vma->vm_end, VM_OFFSET(vma) + offset); vma->vm_start, vma->vm_end, map->offset + offset);
vma->vm_ops = &drm_vm_ops; vma->vm_ops = &drm_vm_ops;
break; break;
case _DRM_SHM: case _DRM_SHM:
case _DRM_CONSISTENT:
/* Consistent memory is really like shared memory. It's only
* allocate in a different way */
vma->vm_ops = &drm_vm_shm_ops; vma->vm_ops = &drm_vm_shm_ops;
vma->vm_private_data = (void *)map; vma->vm_private_data = (void *)map;
/* Don't let this area swap. Change when /* Don't let this area swap. Change when
DRM_KERNEL advisory is supported. */ DRM_KERNEL advisory is supported. */
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
vma->vm_flags |= VM_LOCKED;
#else
vma->vm_flags |= VM_RESERVED; vma->vm_flags |= VM_RESERVED;
#endif
break; break;
case _DRM_SCATTER_GATHER: case _DRM_SCATTER_GATHER:
vma->vm_ops = &drm_vm_sg_ops; vma->vm_ops = &drm_vm_sg_ops;
vma->vm_private_data = (void *)map; vma->vm_private_data = (void *)map;
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
vma->vm_flags |= VM_LOCKED;
#else
vma->vm_flags |= VM_RESERVED; vma->vm_flags |= VM_RESERVED;
#endif
break; break;
default: default:
return -EINVAL; /* This should never happen. */ return -EINVAL; /* This should never happen. */
} }
#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
#else
vma->vm_flags |= VM_RESERVED; /* Don't swap */ vma->vm_flags |= VM_RESERVED; /* Don't swap */
#endif
vma->vm_file = filp; /* Needed for drm_vm_open() */ vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open(vma); drm_vm_open(vma);

View File

@ -152,14 +152,11 @@ static drm_map_t *ffb_find_map(struct file *filp, unsigned long off)
return NULL; return NULL;
list_for_each(list, &dev->maplist->head) { list_for_each(list, &dev->maplist->head) {
unsigned long uoff;
r_list = (drm_map_list_t *)list; r_list = (drm_map_list_t *)list;
map = r_list->map; map = r_list->map;
if (!map) if (!map)
continue; continue;
uoff = (map->offset & 0xffffffff); if (r_list->user_token == off)
if (uoff == off)
return map; return map;
} }

View File

@ -1,492 +0,0 @@
/* drm_context.h -- IOCTLs for generic contexts -*- linux-c -*-
* Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
* ChangeLog:
* 2001-11-16 Torsten Duwe <duwe@caldera.de>
* added context constructor/destructor hooks,
* needed by SiS driver's memory management.
*/
/* ================================================================
* Old-style context support -- only used by gamma.
*/
/* The drm_read and drm_write_string code (especially that which manages
the circular buffer), is based on Alessandro Rubini's LINUX DEVICE
DRIVERS (Cambridge: O'Reilly, 1998), pages 111-113. */
ssize_t gamma_fops_read(struct file *filp, char __user *buf, size_t count, loff_t *off)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int left;
int avail;
int send;
int cur;
DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp);
while (dev->buf_rp == dev->buf_wp) {
DRM_DEBUG(" sleeping\n");
if (filp->f_flags & O_NONBLOCK) {
return -EAGAIN;
}
interruptible_sleep_on(&dev->buf_readers);
if (signal_pending(current)) {
DRM_DEBUG(" interrupted\n");
return -ERESTARTSYS;
}
DRM_DEBUG(" awake\n");
}
left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
avail = DRM_BSZ - left;
send = DRM_MIN(avail, count);
while (send) {
if (dev->buf_wp > dev->buf_rp) {
cur = DRM_MIN(send, dev->buf_wp - dev->buf_rp);
} else {
cur = DRM_MIN(send, dev->buf_end - dev->buf_rp);
}
if (copy_to_user(buf, dev->buf_rp, cur))
return -EFAULT;
dev->buf_rp += cur;
if (dev->buf_rp == dev->buf_end) dev->buf_rp = dev->buf;
send -= cur;
}
wake_up_interruptible(&dev->buf_writers);
return DRM_MIN(avail, count);
}
/* In an incredibly convoluted setup, the kernel module actually calls
* back into the X server to perform context switches on behalf of the
* 3d clients.
*/
int DRM(write_string)(drm_device_t *dev, const char *s)
{
int left = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
int send = strlen(s);
int count;
DRM_DEBUG("%d left, %d to send (%p, %p)\n",
left, send, dev->buf_rp, dev->buf_wp);
if (left == 1 || dev->buf_wp != dev->buf_rp) {
DRM_ERROR("Buffer not empty (%d left, wp = %p, rp = %p)\n",
left,
dev->buf_wp,
dev->buf_rp);
}
while (send) {
if (dev->buf_wp >= dev->buf_rp) {
count = DRM_MIN(send, dev->buf_end - dev->buf_wp);
if (count == left) --count; /* Leave a hole */
} else {
count = DRM_MIN(send, dev->buf_rp - dev->buf_wp - 1);
}
strncpy(dev->buf_wp, s, count);
dev->buf_wp += count;
if (dev->buf_wp == dev->buf_end) dev->buf_wp = dev->buf;
send -= count;
}
if (dev->buf_async) kill_fasync(&dev->buf_async, SIGIO, POLL_IN);
DRM_DEBUG("waking\n");
wake_up_interruptible(&dev->buf_readers);
return 0;
}
unsigned int gamma_fops_poll(struct file *filp, struct poll_table_struct *wait)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
poll_wait(filp, &dev->buf_readers, wait);
if (dev->buf_wp != dev->buf_rp) return POLLIN | POLLRDNORM;
return 0;
}
int DRM(context_switch)(drm_device_t *dev, int old, int new)
{
char buf[64];
drm_queue_t *q;
if (test_and_set_bit(0, &dev->context_flag)) {
DRM_ERROR("Reentering -- FIXME\n");
return -EBUSY;
}
DRM_DEBUG("Context switch from %d to %d\n", old, new);
if (new >= dev->queue_count) {
clear_bit(0, &dev->context_flag);
return -EINVAL;
}
if (new == dev->last_context) {
clear_bit(0, &dev->context_flag);
return 0;
}
q = dev->queuelist[new];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
atomic_dec(&q->use_count);
clear_bit(0, &dev->context_flag);
return -EINVAL;
}
/* This causes the X server to wake up & do a bunch of hardware
* interaction to actually effect the context switch.
*/
sprintf(buf, "C %d %d\n", old, new);
DRM(write_string)(dev, buf);
atomic_dec(&q->use_count);
return 0;
}
int DRM(context_switch_complete)(drm_device_t *dev, int new)
{
drm_device_dma_t *dma = dev->dma;
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = jiffies;
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
}
if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
if (DRM(lock_free)(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("Cannot free lock\n");
}
}
clear_bit(0, &dev->context_flag);
wake_up_interruptible(&dev->context_wait);
return 0;
}
static int DRM(init_queue)(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx)
{
DRM_DEBUG("\n");
if (atomic_read(&q->use_count) != 1
|| atomic_read(&q->finalization)
|| atomic_read(&q->block_count)) {
DRM_ERROR("New queue is already in use: u%d f%d b%d\n",
atomic_read(&q->use_count),
atomic_read(&q->finalization),
atomic_read(&q->block_count));
}
atomic_set(&q->finalization, 0);
atomic_set(&q->block_count, 0);
atomic_set(&q->block_read, 0);
atomic_set(&q->block_write, 0);
atomic_set(&q->total_queued, 0);
atomic_set(&q->total_flushed, 0);
atomic_set(&q->total_locks, 0);
init_waitqueue_head(&q->write_queue);
init_waitqueue_head(&q->read_queue);
init_waitqueue_head(&q->flush_queue);
q->flags = ctx->flags;
DRM(waitlist_create)(&q->waitlist, dev->dma->buf_count);
return 0;
}
/* drm_alloc_queue:
PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
disappear (so all deallocation must be done after IOCTLs are off)
2) dev->queue_count < dev->queue_slots
3) dev->queuelist[i].use_count == 0 and
dev->queuelist[i].finalization == 0 if i not in use
POST: 1) dev->queuelist[i].use_count == 1
2) dev->queue_count < dev->queue_slots */
static int DRM(alloc_queue)(drm_device_t *dev)
{
int i;
drm_queue_t *queue;
int oldslots;
int newslots;
/* Check for a free queue */
for (i = 0; i < dev->queue_count; i++) {
atomic_inc(&dev->queuelist[i]->use_count);
if (atomic_read(&dev->queuelist[i]->use_count) == 1
&& !atomic_read(&dev->queuelist[i]->finalization)) {
DRM_DEBUG("%d (free)\n", i);
return i;
}
atomic_dec(&dev->queuelist[i]->use_count);
}
/* Allocate a new queue */
down(&dev->struct_sem);
queue = DRM(alloc)(sizeof(*queue), DRM_MEM_QUEUES);
memset(queue, 0, sizeof(*queue));
atomic_set(&queue->use_count, 1);
++dev->queue_count;
if (dev->queue_count >= dev->queue_slots) {
oldslots = dev->queue_slots * sizeof(*dev->queuelist);
if (!dev->queue_slots) dev->queue_slots = 1;
dev->queue_slots *= 2;
newslots = dev->queue_slots * sizeof(*dev->queuelist);
dev->queuelist = DRM(realloc)(dev->queuelist,
oldslots,
newslots,
DRM_MEM_QUEUES);
if (!dev->queuelist) {
up(&dev->struct_sem);
DRM_DEBUG("out of memory\n");
return -ENOMEM;
}
}
dev->queuelist[dev->queue_count-1] = queue;
up(&dev->struct_sem);
DRM_DEBUG("%d (new)\n", dev->queue_count - 1);
return dev->queue_count - 1;
}
int DRM(resctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_ctx_res_t __user *argp = (void __user *)arg;
drm_ctx_res_t res;
drm_ctx_t ctx;
int i;
DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
if (copy_from_user(&res, argp, sizeof(res)))
return -EFAULT;
if (res.count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
if (copy_to_user(&res.contexts[i],
&i,
sizeof(i)))
return -EFAULT;
}
}
res.count = DRM_RESERVED_CONTEXTS;
if (copy_to_user(argp, &res, sizeof(res)))
return -EFAULT;
return 0;
}
int DRM(addctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
drm_ctx_t __user *argp = (void __user *)arg;
if (copy_from_user(&ctx, argp, sizeof(ctx)))
return -EFAULT;
if ((ctx.handle = DRM(alloc_queue)(dev)) == DRM_KERNEL_CONTEXT) {
/* Init kernel's context and get a new one. */
DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
ctx.handle = DRM(alloc_queue)(dev);
}
DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
DRM_DEBUG("%d\n", ctx.handle);
if (copy_to_user(argp, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
int DRM(modctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
drm_queue_t *q;
if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle < 0 || ctx.handle >= dev->queue_count) return -EINVAL;
q = dev->queuelist[ctx.handle];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
/* No longer in use */
atomic_dec(&q->use_count);
return -EINVAL;
}
if (DRM_BUFCOUNT(&q->waitlist)) {
atomic_dec(&q->use_count);
return -EBUSY;
}
q->flags = ctx.flags;
atomic_dec(&q->use_count);
return 0;
}
int DRM(getctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t __user *argp = (void __user *)arg;
drm_ctx_t ctx;
drm_queue_t *q;
if (copy_from_user(&ctx, argp, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle >= dev->queue_count) return -EINVAL;
q = dev->queuelist[ctx.handle];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
/* No longer in use */
atomic_dec(&q->use_count);
return -EINVAL;
}
ctx.flags = q->flags;
atomic_dec(&q->use_count);
if (copy_to_user(argp, &ctx, sizeof(ctx)))
return -EFAULT;
return 0;
}
int DRM(switchctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
return DRM(context_switch)(dev, dev->last_context, ctx.handle);
}
int DRM(newctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
DRM(context_switch_complete)(dev, ctx.handle);
return 0;
}
int DRM(rmctx)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_t ctx;
drm_queue_t *q;
drm_buf_t *buf;
if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
return -EFAULT;
DRM_DEBUG("%d\n", ctx.handle);
if (ctx.handle >= dev->queue_count) return -EINVAL;
q = dev->queuelist[ctx.handle];
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) == 1) {
/* No longer in use */
atomic_dec(&q->use_count);
return -EINVAL;
}
atomic_inc(&q->finalization); /* Mark queue in finalization state */
atomic_sub(2, &q->use_count); /* Mark queue as unused (pending
finalization) */
while (test_and_set_bit(0, &dev->interrupt_flag)) {
schedule();
if (signal_pending(current)) {
clear_bit(0, &dev->interrupt_flag);
return -EINTR;
}
}
/* Remove queued buffers */
while ((buf = DRM(waitlist_get)(&q->waitlist))) {
DRM(free_buffer)(dev, buf);
}
clear_bit(0, &dev->interrupt_flag);
/* Wakeup blocked processes */
wake_up_interruptible(&q->read_queue);
wake_up_interruptible(&q->write_queue);
wake_up_interruptible(&q->flush_queue);
/* Finalization over. Queue is made
available when both use_count and
finalization become 0, which won't
happen until all the waiting processes
stop waiting. */
atomic_dec(&q->finalization);
return 0;
}

View File

@ -1,946 +0,0 @@
/* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*-
* Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
*
*/
#include "gamma.h"
#include "drmP.h"
#include "drm.h"
#include "gamma_drm.h"
#include "gamma_drv.h"
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
unsigned long length)
{
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
mb();
while ( GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
cpu_relax();
GAMMA_WRITE(GAMMA_DMAADDRESS, address);
while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4)
cpu_relax();
GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
}
void gamma_dma_quiescent_single(drm_device_t *dev)
{
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
while (GAMMA_READ(GAMMA_DMACOUNT))
cpu_relax();
while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
cpu_relax();
GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
GAMMA_WRITE(GAMMA_SYNC, 0);
do {
while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
cpu_relax();
} while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
}
void gamma_dma_quiescent_dual(drm_device_t *dev)
{
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
while (GAMMA_READ(GAMMA_DMACOUNT))
cpu_relax();
while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
cpu_relax();
GAMMA_WRITE(GAMMA_BROADCASTMASK, 3);
GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
GAMMA_WRITE(GAMMA_SYNC, 0);
/* Read from first MX */
do {
while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
cpu_relax();
} while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
/* Read from second MX */
do {
while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000))
cpu_relax();
} while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG);
}
void gamma_dma_ready(drm_device_t *dev)
{
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
while (GAMMA_READ(GAMMA_DMACOUNT))
cpu_relax();
}
static inline int gamma_dma_is_ready(drm_device_t *dev)
{
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
return (!GAMMA_READ(GAMMA_DMACOUNT));
}
irqreturn_t gamma_driver_irq_handler( DRM_IRQ_ARGS )
{
drm_device_t *dev = (drm_device_t *)arg;
drm_device_dma_t *dma = dev->dma;
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
/* FIXME: should check whether we're actually interested in the interrupt? */
atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */
while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
cpu_relax();
GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */
GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8);
GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001);
if (gamma_dma_is_ready(dev)) {
/* Free previous buffer */
if (test_and_set_bit(0, &dev->dma_flag))
return IRQ_HANDLED;
if (dma->this_buffer) {
gamma_free_buffer(dev, dma->this_buffer);
dma->this_buffer = NULL;
}
clear_bit(0, &dev->dma_flag);
/* Dispatch new buffer */
schedule_work(&dev->work);
}
return IRQ_HANDLED;
}
/* Only called by gamma_dma_schedule. */
static int gamma_do_dma(drm_device_t *dev, int locked)
{
unsigned long address;
unsigned long length;
drm_buf_t *buf;
int retcode = 0;
drm_device_dma_t *dma = dev->dma;
if (test_and_set_bit(0, &dev->dma_flag)) return -EBUSY;
if (!dma->next_buffer) {
DRM_ERROR("No next_buffer\n");
clear_bit(0, &dev->dma_flag);
return -EINVAL;
}
buf = dma->next_buffer;
/* WE NOW ARE ON LOGICAL PAGES!! - using page table setup in dma_init */
/* So we pass the buffer index value into the physical page offset */
address = buf->idx << 12;
length = buf->used;
DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
buf->context, buf->idx, length);
if (buf->list == DRM_LIST_RECLAIM) {
gamma_clear_next_buffer(dev);
gamma_free_buffer(dev, buf);
clear_bit(0, &dev->dma_flag);
return -EINVAL;
}
if (!length) {
DRM_ERROR("0 length buffer\n");
gamma_clear_next_buffer(dev);
gamma_free_buffer(dev, buf);
clear_bit(0, &dev->dma_flag);
return 0;
}
if (!gamma_dma_is_ready(dev)) {
clear_bit(0, &dev->dma_flag);
return -EBUSY;
}
if (buf->while_locked) {
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
DRM_ERROR("Dispatching buffer %d from pid %d"
" \"while locked\", but no lock held\n",
buf->idx, current->pid);
}
} else {
if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
clear_bit(0, &dev->dma_flag);
return -EBUSY;
}
}
if (dev->last_context != buf->context
&& !(dev->queuelist[buf->context]->flags
& _DRM_CONTEXT_PRESERVED)) {
/* PRE: dev->last_context != buf->context */
if (DRM(context_switch)(dev, dev->last_context,
buf->context)) {
DRM(clear_next_buffer)(dev);
DRM(free_buffer)(dev, buf);
}
retcode = -EBUSY;
goto cleanup;
/* POST: we will wait for the context
switch and will dispatch on a later call
when dev->last_context == buf->context.
NOTE WE HOLD THE LOCK THROUGHOUT THIS
TIME! */
}
gamma_clear_next_buffer(dev);
buf->pending = 1;
buf->waiting = 0;
buf->list = DRM_LIST_PEND;
/* WE NOW ARE ON LOGICAL PAGES!!! - overriding address */
address = buf->idx << 12;
gamma_dma_dispatch(dev, address, length);
gamma_free_buffer(dev, dma->this_buffer);
dma->this_buffer = buf;
atomic_inc(&dev->counts[7]); /* _DRM_STAT_DMA */
atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
if (!buf->while_locked && !dev->context_flag && !locked) {
if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
}
cleanup:
clear_bit(0, &dev->dma_flag);
return retcode;
}
static void gamma_dma_timer_bh(unsigned long dev)
{
gamma_dma_schedule((drm_device_t *)dev, 0);
}
void gamma_irq_immediate_bh(void *dev)
{
gamma_dma_schedule(dev, 0);
}
int gamma_dma_schedule(drm_device_t *dev, int locked)
{
int next;
drm_queue_t *q;
drm_buf_t *buf;
int retcode = 0;
int processed = 0;
int missed;
int expire = 20;
drm_device_dma_t *dma = dev->dma;
if (test_and_set_bit(0, &dev->interrupt_flag)) {
/* Not reentrant */
atomic_inc(&dev->counts[10]); /* _DRM_STAT_MISSED */
return -EBUSY;
}
missed = atomic_read(&dev->counts[10]);
again:
if (dev->context_flag) {
clear_bit(0, &dev->interrupt_flag);
return -EBUSY;
}
if (dma->next_buffer) {
/* Unsent buffer that was previously
selected, but that couldn't be sent
because the lock could not be obtained
or the DMA engine wasn't ready. Try
again. */
if (!(retcode = gamma_do_dma(dev, locked))) ++processed;
} else {
do {
next = gamma_select_queue(dev, gamma_dma_timer_bh);
if (next >= 0) {
q = dev->queuelist[next];
buf = gamma_waitlist_get(&q->waitlist);
dma->next_buffer = buf;
dma->next_queue = q;
if (buf && buf->list == DRM_LIST_RECLAIM) {
gamma_clear_next_buffer(dev);
gamma_free_buffer(dev, buf);
}
}
} while (next >= 0 && !dma->next_buffer);
if (dma->next_buffer) {
if (!(retcode = gamma_do_dma(dev, locked))) {
++processed;
}
}
}
if (--expire) {
if (missed != atomic_read(&dev->counts[10])) {
if (gamma_dma_is_ready(dev)) goto again;
}
if (processed && gamma_dma_is_ready(dev)) {
processed = 0;
goto again;
}
}
clear_bit(0, &dev->interrupt_flag);
return retcode;
}
static int gamma_dma_priority(struct file *filp,
drm_device_t *dev, drm_dma_t *d)
{
unsigned long address;
unsigned long length;
int must_free = 0;
int retcode = 0;
int i;
int idx;
drm_buf_t *buf;
drm_buf_t *last_buf = NULL;
drm_device_dma_t *dma = dev->dma;
int *send_indices = NULL;
int *send_sizes = NULL;
DECLARE_WAITQUEUE(entry, current);
/* Turn off interrupt handling */
while (test_and_set_bit(0, &dev->interrupt_flag)) {
schedule();
if (signal_pending(current)) return -EINTR;
}
if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
while (!gamma_lock_take(&dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
schedule();
if (signal_pending(current)) {
clear_bit(0, &dev->interrupt_flag);
return -EINTR;
}
}
++must_free;
}
send_indices = DRM(alloc)(d->send_count * sizeof(*send_indices),
DRM_MEM_DRIVER);
if (send_indices == NULL)
return -ENOMEM;
if (copy_from_user(send_indices, d->send_indices,
d->send_count * sizeof(*send_indices))) {
retcode = -EFAULT;
goto cleanup;
}
send_sizes = DRM(alloc)(d->send_count * sizeof(*send_sizes),
DRM_MEM_DRIVER);
if (send_sizes == NULL)
return -ENOMEM;
if (copy_from_user(send_sizes, d->send_sizes,
d->send_count * sizeof(*send_sizes))) {
retcode = -EFAULT;
goto cleanup;
}
for (i = 0; i < d->send_count; i++) {
idx = send_indices[i];
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("Index %d (of %d max)\n",
send_indices[i], dma->buf_count - 1);
continue;
}
buf = dma->buflist[ idx ];
if (buf->filp != filp) {
DRM_ERROR("Process %d using buffer not owned\n",
current->pid);
retcode = -EINVAL;
goto cleanup;
}
if (buf->list != DRM_LIST_NONE) {
DRM_ERROR("Process %d using buffer on list %d\n",
current->pid, buf->list);
retcode = -EINVAL;
goto cleanup;
}
/* This isn't a race condition on
buf->list, since our concern is the
buffer reclaim during the time the
process closes the /dev/drm? handle, so
it can't also be doing DMA. */
buf->list = DRM_LIST_PRIO;
buf->used = send_sizes[i];
buf->context = d->context;
buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
address = (unsigned long)buf->address;
length = buf->used;
if (!length) {
DRM_ERROR("0 length buffer\n");
}
if (buf->pending) {
DRM_ERROR("Sending pending buffer:"
" buffer %d, offset %d\n",
send_indices[i], i);
retcode = -EINVAL;
goto cleanup;
}
if (buf->waiting) {
DRM_ERROR("Sending waiting buffer:"
" buffer %d, offset %d\n",
send_indices[i], i);
retcode = -EINVAL;
goto cleanup;
}
buf->pending = 1;
if (dev->last_context != buf->context
&& !(dev->queuelist[buf->context]->flags
& _DRM_CONTEXT_PRESERVED)) {
add_wait_queue(&dev->context_wait, &entry);
current->state = TASK_INTERRUPTIBLE;
/* PRE: dev->last_context != buf->context */
DRM(context_switch)(dev, dev->last_context,
buf->context);
/* POST: we will wait for the context
switch and will dispatch on a later call
when dev->last_context == buf->context.
NOTE WE HOLD THE LOCK THROUGHOUT THIS
TIME! */
schedule();
current->state = TASK_RUNNING;
remove_wait_queue(&dev->context_wait, &entry);
if (signal_pending(current)) {
retcode = -EINTR;
goto cleanup;
}
if (dev->last_context != buf->context) {
DRM_ERROR("Context mismatch: %d %d\n",
dev->last_context,
buf->context);
}
}
gamma_dma_dispatch(dev, address, length);
atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */
atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
if (last_buf) {
gamma_free_buffer(dev, last_buf);
}
last_buf = buf;
}
cleanup:
if (last_buf) {
gamma_dma_ready(dev);
gamma_free_buffer(dev, last_buf);
}
if (send_indices)
DRM(free)(send_indices, d->send_count * sizeof(*send_indices),
DRM_MEM_DRIVER);
if (send_sizes)
DRM(free)(send_sizes, d->send_count * sizeof(*send_sizes),
DRM_MEM_DRIVER);
if (must_free && !dev->context_flag) {
if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
DRM_ERROR("\n");
}
}
clear_bit(0, &dev->interrupt_flag);
return retcode;
}
static int gamma_dma_send_buffers(struct file *filp,
drm_device_t *dev, drm_dma_t *d)
{
DECLARE_WAITQUEUE(entry, current);
drm_buf_t *last_buf = NULL;
int retcode = 0;
drm_device_dma_t *dma = dev->dma;
int send_index;
if (get_user(send_index, &d->send_indices[d->send_count-1]))
return -EFAULT;
if (d->flags & _DRM_DMA_BLOCK) {
last_buf = dma->buflist[send_index];
add_wait_queue(&last_buf->dma_wait, &entry);
}
if ((retcode = gamma_dma_enqueue(filp, d))) {
if (d->flags & _DRM_DMA_BLOCK)
remove_wait_queue(&last_buf->dma_wait, &entry);
return retcode;
}
gamma_dma_schedule(dev, 0);
if (d->flags & _DRM_DMA_BLOCK) {
DRM_DEBUG("%d waiting\n", current->pid);
for (;;) {
current->state = TASK_INTERRUPTIBLE;
if (!last_buf->waiting && !last_buf->pending)
break; /* finished */
schedule();
if (signal_pending(current)) {
retcode = -EINTR; /* Can't restart */
break;
}
}
current->state = TASK_RUNNING;
DRM_DEBUG("%d running\n", current->pid);
remove_wait_queue(&last_buf->dma_wait, &entry);
if (!retcode
|| (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
if (!waitqueue_active(&last_buf->dma_wait)) {
gamma_free_buffer(dev, last_buf);
}
}
if (retcode) {
DRM_ERROR("ctx%d w%d p%d c%ld i%d l%d pid:%d\n",
d->context,
last_buf->waiting,
last_buf->pending,
(long)DRM_WAITCOUNT(dev, d->context),
last_buf->idx,
last_buf->list,
current->pid);
}
}
return retcode;
}
int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
drm_dma_t __user *argp = (void __user *)arg;
drm_dma_t d;
if (copy_from_user(&d, argp, sizeof(d)))
return -EFAULT;
if (d.send_count < 0 || d.send_count > dma->buf_count) {
DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
current->pid, d.send_count, dma->buf_count);
return -EINVAL;
}
if (d.request_count < 0 || d.request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
current->pid, d.request_count, dma->buf_count);
return -EINVAL;
}
if (d.send_count) {
if (d.flags & _DRM_DMA_PRIORITY)
retcode = gamma_dma_priority(filp, dev, &d);
else
retcode = gamma_dma_send_buffers(filp, dev, &d);
}
d.granted_count = 0;
if (!retcode && d.request_count) {
retcode = gamma_dma_get_buffers(filp, &d);
}
DRM_DEBUG("%d returning, granted = %d\n",
current->pid, d.granted_count);
if (copy_to_user(argp, &d, sizeof(d)))
return -EFAULT;
return retcode;
}
/* =============================================================
* DMA initialization, cleanup
*/
static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init )
{
drm_gamma_private_t *dev_priv;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf;
int i;
struct list_head *list;
unsigned long *pgt;
DRM_DEBUG( "%s\n", __FUNCTION__ );
dev_priv = DRM(alloc)( sizeof(drm_gamma_private_t),
DRM_MEM_DRIVER );
if ( !dev_priv )
return -ENOMEM;
dev->dev_private = (void *)dev_priv;
memset( dev_priv, 0, sizeof(drm_gamma_private_t) );
dev_priv->num_rast = init->num_rast;
list_for_each(list, &dev->maplist->head) {
drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
if( r_list->map &&
r_list->map->type == _DRM_SHM &&
r_list->map->flags & _DRM_CONTAINS_LOCK ) {
dev_priv->sarea = r_list->map;
break;
}
}
dev_priv->mmio0 = drm_core_findmap(dev, init->mmio0);
dev_priv->mmio1 = drm_core_findmap(dev, init->mmio1);
dev_priv->mmio2 = drm_core_findmap(dev, init->mmio2);
dev_priv->mmio3 = drm_core_findmap(dev, init->mmio3);
dev_priv->sarea_priv = (drm_gamma_sarea_t *)
((u8 *)dev_priv->sarea->handle +
init->sarea_priv_offset);
if (init->pcimode) {
buf = dma->buflist[GLINT_DRI_BUF_COUNT];
pgt = buf->address;
for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
buf = dma->buflist[i];
*pgt = virt_to_phys((void*)buf->address) | 0x07;
pgt++;
}
buf = dma->buflist[GLINT_DRI_BUF_COUNT];
} else {
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
drm_core_ioremap( dev->agp_buffer_map, dev);
buf = dma->buflist[GLINT_DRI_BUF_COUNT];
pgt = buf->address;
for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
buf = dma->buflist[i];
*pgt = (unsigned long)buf->address + 0x07;
pgt++;
}
buf = dma->buflist[GLINT_DRI_BUF_COUNT];
while (GAMMA_READ(GAMMA_INFIFOSPACE) < 1);
GAMMA_WRITE( GAMMA_GDMACONTROL, 0xe);
}
while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2);
GAMMA_WRITE( GAMMA_PAGETABLEADDR, virt_to_phys((void*)buf->address) );
GAMMA_WRITE( GAMMA_PAGETABLELENGTH, 2 );
return 0;
}
int gamma_do_cleanup_dma( drm_device_t *dev )
{
DRM_DEBUG( "%s\n", __FUNCTION__ );
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
* is freed, it's too late.
*/
if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
if ( dev->irq_enabled )
DRM(irq_uninstall)(dev);
if ( dev->dev_private ) {
if ( dev->agp_buffer_map != NULL )
drm_core_ioremapfree( dev->agp_buffer_map, dev );
DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t),
DRM_MEM_DRIVER );
dev->dev_private = NULL;
}
return 0;
}
int gamma_dma_init( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_gamma_init_t init;
LOCK_TEST_WITH_RETURN( dev, filp );
if ( copy_from_user( &init, (drm_gamma_init_t __user *)arg, sizeof(init) ) )
return -EFAULT;
switch ( init.func ) {
case GAMMA_INIT_DMA:
return gamma_do_init_dma( dev, &init );
case GAMMA_CLEANUP_DMA:
return gamma_do_cleanup_dma( dev );
}
return -EINVAL;
}
static int gamma_do_copy_dma( drm_device_t *dev, drm_gamma_copy_t *copy )
{
drm_device_dma_t *dma = dev->dma;
unsigned int *screenbuf;
DRM_DEBUG( "%s\n", __FUNCTION__ );
/* We've DRM_RESTRICTED this DMA buffer */
screenbuf = dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ]->address;
#if 0
*buffer++ = 0x180; /* Tag (FilterMode) */
*buffer++ = 0x200; /* Allow FBColor through */
*buffer++ = 0x53B; /* Tag */
*buffer++ = copy->Pitch;
*buffer++ = 0x53A; /* Tag */
*buffer++ = copy->SrcAddress;
*buffer++ = 0x539; /* Tag */
*buffer++ = copy->WidthHeight; /* Initiates transfer */
*buffer++ = 0x53C; /* Tag - DMAOutputAddress */
*buffer++ = virt_to_phys((void*)screenbuf);
*buffer++ = 0x53D; /* Tag - DMAOutputCount */
*buffer++ = copy->Count; /* Reads HostOutFifo BLOCKS until ..*/
/* Data now sitting in dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ] */
/* Now put it back to the screen */
*buffer++ = 0x180; /* Tag (FilterMode) */
*buffer++ = 0x400; /* Allow Sync through */
*buffer++ = 0x538; /* Tag - DMARectangleReadTarget */
*buffer++ = 0x155; /* FBSourceData | count */
*buffer++ = 0x537; /* Tag */
*buffer++ = copy->Pitch;
*buffer++ = 0x536; /* Tag */
*buffer++ = copy->DstAddress;
*buffer++ = 0x535; /* Tag */
*buffer++ = copy->WidthHeight; /* Initiates transfer */
*buffer++ = 0x530; /* Tag - DMAAddr */
*buffer++ = virt_to_phys((void*)screenbuf);
*buffer++ = 0x531;
*buffer++ = copy->Count; /* initiates DMA transfer of color data */
#endif
/* need to dispatch it now */
return 0;
}
int gamma_dma_copy( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg )
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_gamma_copy_t copy;
if ( copy_from_user( &copy, (drm_gamma_copy_t __user *)arg, sizeof(copy) ) )
return -EFAULT;
return gamma_do_copy_dma( dev, &copy );
}
/* =============================================================
* Per Context SAREA Support
*/
int gamma_getsareactx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_priv_map_t __user *argp = (void __user *)arg;
drm_ctx_priv_map_t request;
drm_map_t *map;
if (copy_from_user(&request, argp, sizeof(request)))
return -EFAULT;
down(&dev->struct_sem);
if ((int)request.ctx_id >= dev->max_context) {
up(&dev->struct_sem);
return -EINVAL;
}
map = dev->context_sareas[request.ctx_id];
up(&dev->struct_sem);
request.handle = map->handle;
if (copy_to_user(argp, &request, sizeof(request)))
return -EFAULT;
return 0;
}
int gamma_setsareactx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
drm_ctx_priv_map_t request;
drm_map_t *map = NULL;
drm_map_list_t *r_list;
struct list_head *list;
if (copy_from_user(&request,
(drm_ctx_priv_map_t __user *)arg,
sizeof(request)))
return -EFAULT;
down(&dev->struct_sem);
r_list = NULL;
list_for_each(list, &dev->maplist->head) {
r_list = list_entry(list, drm_map_list_t, head);
if(r_list->map &&
r_list->map->handle == request.handle) break;
}
if (list == &(dev->maplist->head)) {
up(&dev->struct_sem);
return -EINVAL;
}
map = r_list->map;
up(&dev->struct_sem);
if (!map) return -EINVAL;
down(&dev->struct_sem);
if ((int)request.ctx_id >= dev->max_context) {
up(&dev->struct_sem);
return -EINVAL;
}
dev->context_sareas[request.ctx_id] = map;
up(&dev->struct_sem);
return 0;
}
void gamma_driver_irq_preinstall( drm_device_t *dev ) {
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
cpu_relax();
GAMMA_WRITE( GAMMA_GCOMMANDMODE, 0x00000004 );
GAMMA_WRITE( GAMMA_GDMACONTROL, 0x00000000 );
}
void gamma_driver_irq_postinstall( drm_device_t *dev ) {
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
cpu_relax();
GAMMA_WRITE( GAMMA_GINTENABLE, 0x00002001 );
GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000008 );
GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00039090 );
}
void gamma_driver_irq_uninstall( drm_device_t *dev ) {
drm_gamma_private_t *dev_priv =
(drm_gamma_private_t *)dev->dev_private;
if (!dev_priv)
return;
while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
cpu_relax();
GAMMA_WRITE( GAMMA_GDELAYTIMER, 0x00000000 );
GAMMA_WRITE( GAMMA_COMMANDINTENABLE, 0x00000000 );
GAMMA_WRITE( GAMMA_GINTENABLE, 0x00000000 );
}
extern drm_ioctl_desc_t DRM(ioctls)[];
static int gamma_driver_preinit(drm_device_t *dev)
{
/* reset the finish ioctl */
DRM(ioctls)[DRM_IOCTL_NR(DRM_IOCTL_FINISH)].func = DRM(finish);
return 0;
}
static void gamma_driver_pretakedown(drm_device_t *dev)
{
gamma_do_cleanup_dma(dev);
}
static void gamma_driver_dma_ready(drm_device_t *dev)
{
gamma_dma_ready(dev);
}
static int gamma_driver_dma_quiescent(drm_device_t *dev)
{
drm_gamma_private_t *dev_priv = (
drm_gamma_private_t *)dev->dev_private;
if (dev_priv->num_rast == 2)
gamma_dma_quiescent_dual(dev);
else gamma_dma_quiescent_single(dev);
return 0;
}
void gamma_driver_register_fns(drm_device_t *dev)
{
dev->driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ;
DRM(fops).read = gamma_fops_read;
DRM(fops).poll = gamma_fops_poll;
dev->driver.preinit = gamma_driver_preinit;
dev->driver.pretakedown = gamma_driver_pretakedown;
dev->driver.dma_ready = gamma_driver_dma_ready;
dev->driver.dma_quiescent = gamma_driver_dma_quiescent;
dev->driver.dma_flush_block_and_flush = gamma_flush_block_and_flush;
dev->driver.dma_flush_unblock = gamma_flush_unblock;
}

View File

@ -1,90 +0,0 @@
#ifndef _GAMMA_DRM_H_
#define _GAMMA_DRM_H_
typedef struct _drm_gamma_tex_region {
unsigned char next, prev; /* indices to form a circular LRU */
unsigned char in_use; /* owned by a client, or free? */
int age; /* tracked by clients to update local LRU's */
} drm_gamma_tex_region_t;
typedef struct {
unsigned int GDeltaMode;
unsigned int GDepthMode;
unsigned int GGeometryMode;
unsigned int GTransformMode;
} drm_gamma_context_regs_t;
typedef struct _drm_gamma_sarea {
drm_gamma_context_regs_t context_state;
unsigned int dirty;
/* Maintain an LRU of contiguous regions of texture space. If
* you think you own a region of texture memory, and it has an
* age different to the one you set, then you are mistaken and
* it has been stolen by another client. If global texAge
* hasn't changed, there is no need to walk the list.
*
* These regions can be used as a proxy for the fine-grained
* texture information of other clients - by maintaining them
* in the same lru which is used to age their own textures,
* clients have an approximate lru for the whole of global
* texture space, and can make informed decisions as to which
* areas to kick out. There is no need to choose whether to
* kick out your own texture or someone else's - simply eject
* them all in LRU order.
*/
#define GAMMA_NR_TEX_REGIONS 64
drm_gamma_tex_region_t texList[GAMMA_NR_TEX_REGIONS+1];
/* Last elt is sentinal */
int texAge; /* last time texture was uploaded */
int last_enqueue; /* last time a buffer was enqueued */
int last_dispatch; /* age of the most recently dispatched buffer */
int last_quiescent; /* */
int ctxOwner; /* last context to upload state */
int vertex_prim;
} drm_gamma_sarea_t;
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (xf86drmGamma.h)
*/
/* Gamma specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_IOCTL_GAMMA_INIT DRM_IOW( 0x40, drm_gamma_init_t)
#define DRM_IOCTL_GAMMA_COPY DRM_IOW( 0x41, drm_gamma_copy_t)
typedef struct drm_gamma_copy {
unsigned int DMAOutputAddress;
unsigned int DMAOutputCount;
unsigned int DMAReadGLINTSource;
unsigned int DMARectangleWriteAddress;
unsigned int DMARectangleWriteLinePitch;
unsigned int DMARectangleWrite;
unsigned int DMARectangleReadAddress;
unsigned int DMARectangleReadLinePitch;
unsigned int DMARectangleRead;
unsigned int DMARectangleReadTarget;
} drm_gamma_copy_t;
typedef struct drm_gamma_init {
enum {
GAMMA_INIT_DMA = 0x01,
GAMMA_CLEANUP_DMA = 0x02
} func;
int sarea_priv_offset;
int pcimode;
unsigned int mmio0;
unsigned int mmio1;
unsigned int mmio2;
unsigned int mmio3;
unsigned int buffers_offset;
int num_rast;
} drm_gamma_init_t;
#endif /* _GAMMA_DRM_H_ */

View File

@ -1,59 +0,0 @@
/* gamma.c -- 3dlabs GMX 2000 driver -*- linux-c -*-
* Created: Mon Jan 4 08:58:31 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*/
#include <linux/config.h>
#include "gamma.h"
#include "drmP.h"
#include "drm.h"
#include "gamma_drm.h"
#include "gamma_drv.h"
#include "drm_auth.h"
#include "drm_agpsupport.h"
#include "drm_bufs.h"
#include "gamma_context.h" /* NOTE! */
#include "drm_dma.h"
#include "gamma_old_dma.h" /* NOTE */
#include "drm_drawable.h"
#include "drm_drv.h"
#include "drm_fops.h"
#include "drm_init.h"
#include "drm_ioctl.h"
#include "drm_irq.h"
#include "gamma_lists.h" /* NOTE */
#include "drm_lock.h"
#include "gamma_lock.h" /* NOTE */
#include "drm_memory.h"
#include "drm_proc.h"
#include "drm_vm.h"
#include "drm_stub.h"
#include "drm_scatter.h"

View File

@ -1,147 +0,0 @@
/* gamma_drv.h -- Private header for 3dlabs GMX 2000 driver -*- linux-c -*-
* Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
*
*/
#ifndef _GAMMA_DRV_H_
#define _GAMMA_DRV_H_
typedef struct drm_gamma_private {
drm_gamma_sarea_t *sarea_priv;
drm_map_t *sarea;
drm_map_t *mmio0;
drm_map_t *mmio1;
drm_map_t *mmio2;
drm_map_t *mmio3;
int num_rast;
} drm_gamma_private_t;
/* gamma_dma.c */
extern int gamma_dma_init( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int gamma_dma_copy( struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg );
extern int gamma_do_cleanup_dma( drm_device_t *dev );
extern void gamma_dma_ready(drm_device_t *dev);
extern void gamma_dma_quiescent_single(drm_device_t *dev);
extern void gamma_dma_quiescent_dual(drm_device_t *dev);
/* gamma_dma.c */
extern int gamma_dma_schedule(drm_device_t *dev, int locked);
extern int gamma_dma(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int gamma_find_devices(void);
extern int gamma_found(void);
/* Gamma-specific code pulled from drm_fops.h:
*/
extern int DRM(finish)(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int DRM(flush_unblock)(drm_device_t *dev, int context,
drm_lock_flags_t flags);
extern int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
drm_lock_flags_t flags);
/* Gamma-specific code pulled from drm_dma.h:
*/
extern void DRM(clear_next_buffer)(drm_device_t *dev);
extern int DRM(select_queue)(drm_device_t *dev,
void (*wrapper)(unsigned long));
extern int DRM(dma_enqueue)(struct file *filp, drm_dma_t *dma);
extern int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma);
/* Gamma-specific code pulled from drm_lists.h (now renamed gamma_lists.h):
*/
extern int DRM(waitlist_create)(drm_waitlist_t *bl, int count);
extern int DRM(waitlist_destroy)(drm_waitlist_t *bl);
extern int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf);
extern drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl);
extern int DRM(freelist_create)(drm_freelist_t *bl, int count);
extern int DRM(freelist_destroy)(drm_freelist_t *bl);
extern int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl,
drm_buf_t *buf);
extern drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block);
/* externs for gamma changes to the ops */
extern struct file_operations DRM(fops);
extern unsigned int gamma_fops_poll(struct file *filp, struct poll_table_struct *wait);
extern ssize_t gamma_fops_read(struct file *filp, char __user *buf, size_t count, loff_t *off);
#define GLINT_DRI_BUF_COUNT 256
#define GAMMA_OFF(reg) \
((reg < 0x1000) \
? reg \
: ((reg < 0x10000) \
? (reg - 0x1000) \
: ((reg < 0x11000) \
? (reg - 0x10000) \
: (reg - 0x11000))))
#define GAMMA_BASE(reg) ((unsigned long) \
((reg < 0x1000) ? dev_priv->mmio0->handle : \
((reg < 0x10000) ? dev_priv->mmio1->handle : \
((reg < 0x11000) ? dev_priv->mmio2->handle : \
dev_priv->mmio3->handle))))
#define GAMMA_ADDR(reg) (GAMMA_BASE(reg) + GAMMA_OFF(reg))
#define GAMMA_DEREF(reg) *(__volatile__ int *)GAMMA_ADDR(reg)
#define GAMMA_READ(reg) GAMMA_DEREF(reg)
#define GAMMA_WRITE(reg,val) do { GAMMA_DEREF(reg) = val; } while (0)
#define GAMMA_BROADCASTMASK 0x9378
#define GAMMA_COMMANDINTENABLE 0x0c48
#define GAMMA_DMAADDRESS 0x0028
#define GAMMA_DMACOUNT 0x0030
#define GAMMA_FILTERMODE 0x8c00
#define GAMMA_GCOMMANDINTFLAGS 0x0c50
#define GAMMA_GCOMMANDMODE 0x0c40
#define GAMMA_QUEUED_DMA_MODE 1<<1
#define GAMMA_GCOMMANDSTATUS 0x0c60
#define GAMMA_GDELAYTIMER 0x0c38
#define GAMMA_GDMACONTROL 0x0060
#define GAMMA_USE_AGP 1<<1
#define GAMMA_GINTENABLE 0x0808
#define GAMMA_GINTFLAGS 0x0810
#define GAMMA_INFIFOSPACE 0x0018
#define GAMMA_OUTFIFOWORDS 0x0020
#define GAMMA_OUTPUTFIFO 0x2000
#define GAMMA_SYNC 0x8c40
#define GAMMA_SYNC_TAG 0x0188
#define GAMMA_PAGETABLEADDR 0x0C00
#define GAMMA_PAGETABLELENGTH 0x0C08
#define GAMMA_PASSTHROUGH 0x1FE
#define GAMMA_DMAADDRTAG 0x530
#define GAMMA_DMACOUNTTAG 0x531
#define GAMMA_COMMANDINTTAG 0x532
#endif

View File

@ -1,215 +0,0 @@
/* drm_lists.h -- Buffer list handling routines -*- linux-c -*-
* Created: Mon Apr 19 20:54:22 1999 by faith@valinux.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*/
#include "drmP.h"
int DRM(waitlist_create)(drm_waitlist_t *bl, int count)
{
if (bl->count) return -EINVAL;
bl->bufs = DRM(alloc)((bl->count + 2) * sizeof(*bl->bufs),
DRM_MEM_BUFLISTS);
if(!bl->bufs) return -ENOMEM;
memset(bl->bufs, 0, sizeof(*bl->bufs));
bl->count = count;
bl->rp = bl->bufs;
bl->wp = bl->bufs;
bl->end = &bl->bufs[bl->count+1];
spin_lock_init(&bl->write_lock);
spin_lock_init(&bl->read_lock);
return 0;
}
int DRM(waitlist_destroy)(drm_waitlist_t *bl)
{
if (bl->rp != bl->wp) return -EINVAL;
if (bl->bufs) DRM(free)(bl->bufs,
(bl->count + 2) * sizeof(*bl->bufs),
DRM_MEM_BUFLISTS);
bl->count = 0;
bl->bufs = NULL;
bl->rp = NULL;
bl->wp = NULL;
bl->end = NULL;
return 0;
}
int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf)
{
int left;
unsigned long flags;
left = DRM_LEFTCOUNT(bl);
if (!left) {
DRM_ERROR("Overflow while adding buffer %d from filp %p\n",
buf->idx, buf->filp);
return -EINVAL;
}
buf->list = DRM_LIST_WAIT;
spin_lock_irqsave(&bl->write_lock, flags);
*bl->wp = buf;
if (++bl->wp >= bl->end) bl->wp = bl->bufs;
spin_unlock_irqrestore(&bl->write_lock, flags);
return 0;
}
drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl)
{
drm_buf_t *buf;
unsigned long flags;
spin_lock_irqsave(&bl->read_lock, flags);
buf = *bl->rp;
if (bl->rp == bl->wp) {
spin_unlock_irqrestore(&bl->read_lock, flags);
return NULL;
}
if (++bl->rp >= bl->end) bl->rp = bl->bufs;
spin_unlock_irqrestore(&bl->read_lock, flags);
return buf;
}
int DRM(freelist_create)(drm_freelist_t *bl, int count)
{
atomic_set(&bl->count, 0);
bl->next = NULL;
init_waitqueue_head(&bl->waiting);
bl->low_mark = 0;
bl->high_mark = 0;
atomic_set(&bl->wfh, 0);
spin_lock_init(&bl->lock);
++bl->initialized;
return 0;
}
int DRM(freelist_destroy)(drm_freelist_t *bl)
{
atomic_set(&bl->count, 0);
bl->next = NULL;
return 0;
}
int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl, drm_buf_t *buf)
{
drm_device_dma_t *dma = dev->dma;
if (!dma) {
DRM_ERROR("No DMA support\n");
return 1;
}
if (buf->waiting || buf->pending || buf->list == DRM_LIST_FREE) {
DRM_ERROR("Freed buffer %d: w%d, p%d, l%d\n",
buf->idx, buf->waiting, buf->pending, buf->list);
}
if (!bl) return 1;
buf->list = DRM_LIST_FREE;
spin_lock(&bl->lock);
buf->next = bl->next;
bl->next = buf;
spin_unlock(&bl->lock);
atomic_inc(&bl->count);
if (atomic_read(&bl->count) > dma->buf_count) {
DRM_ERROR("%d of %d buffers free after addition of %d\n",
atomic_read(&bl->count), dma->buf_count, buf->idx);
return 1;
}
/* Check for high water mark */
if (atomic_read(&bl->wfh) && atomic_read(&bl->count)>=bl->high_mark) {
atomic_set(&bl->wfh, 0);
wake_up_interruptible(&bl->waiting);
}
return 0;
}
static drm_buf_t *DRM(freelist_try)(drm_freelist_t *bl)
{
drm_buf_t *buf;
if (!bl) return NULL;
/* Get buffer */
spin_lock(&bl->lock);
if (!bl->next) {
spin_unlock(&bl->lock);
return NULL;
}
buf = bl->next;
bl->next = bl->next->next;
spin_unlock(&bl->lock);
atomic_dec(&bl->count);
buf->next = NULL;
buf->list = DRM_LIST_NONE;
if (buf->waiting || buf->pending) {
DRM_ERROR("Free buffer %d: w%d, p%d, l%d\n",
buf->idx, buf->waiting, buf->pending, buf->list);
}
return buf;
}
drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block)
{
drm_buf_t *buf = NULL;
DECLARE_WAITQUEUE(entry, current);
if (!bl || !bl->initialized) return NULL;
/* Check for low water mark */
if (atomic_read(&bl->count) <= bl->low_mark) /* Became low */
atomic_set(&bl->wfh, 1);
if (atomic_read(&bl->wfh)) {
if (block) {
add_wait_queue(&bl->waiting, &entry);
for (;;) {
current->state = TASK_INTERRUPTIBLE;
if (!atomic_read(&bl->wfh)
&& (buf = DRM(freelist_try)(bl))) break;
schedule();
if (signal_pending(current)) break;
}
current->state = TASK_RUNNING;
remove_wait_queue(&bl->waiting, &entry);
}
return buf;
}
return DRM(freelist_try)(bl);
}

View File

@ -1,140 +0,0 @@
/* lock.c -- IOCTLs for locking -*- linux-c -*-
* Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*/
/* Gamma-specific code extracted from drm_lock.h:
*/
static int DRM(flush_queue)(drm_device_t *dev, int context)
{
DECLARE_WAITQUEUE(entry, current);
int ret = 0;
drm_queue_t *q = dev->queuelist[context];
DRM_DEBUG("\n");
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) > 1) {
atomic_inc(&q->block_write);
add_wait_queue(&q->flush_queue, &entry);
atomic_inc(&q->block_count);
for (;;) {
current->state = TASK_INTERRUPTIBLE;
if (!DRM_BUFCOUNT(&q->waitlist)) break;
schedule();
if (signal_pending(current)) {
ret = -EINTR; /* Can't restart */
break;
}
}
atomic_dec(&q->block_count);
current->state = TASK_RUNNING;
remove_wait_queue(&q->flush_queue, &entry);
}
atomic_dec(&q->use_count);
/* NOTE: block_write is still incremented!
Use drm_flush_unlock_queue to decrement. */
return ret;
}
static int DRM(flush_unblock_queue)(drm_device_t *dev, int context)
{
drm_queue_t *q = dev->queuelist[context];
DRM_DEBUG("\n");
atomic_inc(&q->use_count);
if (atomic_read(&q->use_count) > 1) {
if (atomic_read(&q->block_write)) {
atomic_dec(&q->block_write);
wake_up_interruptible(&q->write_queue);
}
}
atomic_dec(&q->use_count);
return 0;
}
int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
drm_lock_flags_t flags)
{
int ret = 0;
int i;
DRM_DEBUG("\n");
if (flags & _DRM_LOCK_FLUSH) {
ret = DRM(flush_queue)(dev, DRM_KERNEL_CONTEXT);
if (!ret) ret = DRM(flush_queue)(dev, context);
}
if (flags & _DRM_LOCK_FLUSH_ALL) {
for (i = 0; !ret && i < dev->queue_count; i++) {
ret = DRM(flush_queue)(dev, i);
}
}
return ret;
}
int DRM(flush_unblock)(drm_device_t *dev, int context, drm_lock_flags_t flags)
{
int ret = 0;
int i;
DRM_DEBUG("\n");
if (flags & _DRM_LOCK_FLUSH) {
ret = DRM(flush_unblock_queue)(dev, DRM_KERNEL_CONTEXT);
if (!ret) ret = DRM(flush_unblock_queue)(dev, context);
}
if (flags & _DRM_LOCK_FLUSH_ALL) {
for (i = 0; !ret && i < dev->queue_count; i++) {
ret = DRM(flush_unblock_queue)(dev, i);
}
}
return ret;
}
int DRM(finish)(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int ret = 0;
drm_lock_t lock;
DRM_DEBUG("\n");
if (copy_from_user(&lock, (drm_lock_t __user *)arg, sizeof(lock)))
return -EFAULT;
ret = DRM(flush_block_and_flush)(dev, lock.context, lock.flags);
DRM(flush_unblock)(dev, lock.context, lock.flags);
return ret;
}

View File

@ -1,313 +0,0 @@
/* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*-
* Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*/
/* Gamma-specific code pulled from drm_dma.h:
*/
void DRM(clear_next_buffer)(drm_device_t *dev)
{
drm_device_dma_t *dma = dev->dma;
dma->next_buffer = NULL;
if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
wake_up_interruptible(&dma->next_queue->flush_queue);
}
dma->next_queue = NULL;
}
int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long))
{
int i;
int candidate = -1;
int j = jiffies;
if (!dev) {
DRM_ERROR("No device\n");
return -1;
}
if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
/* This only happens between the time the
interrupt is initialized and the time
the queues are initialized. */
return -1;
}
/* Doing "while locked" DMA? */
if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
return DRM_KERNEL_CONTEXT;
}
/* If there are buffers on the last_context
queue, and we have not been executing
this context very long, continue to
execute this context. */
if (dev->last_switch <= j
&& dev->last_switch + DRM_TIME_SLICE > j
&& DRM_WAITCOUNT(dev, dev->last_context)) {
return dev->last_context;
}
/* Otherwise, find a candidate */
for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
if (DRM_WAITCOUNT(dev, i)) {
candidate = dev->last_checked = i;
break;
}
}
if (candidate < 0) {
for (i = 0; i < dev->queue_count; i++) {
if (DRM_WAITCOUNT(dev, i)) {
candidate = dev->last_checked = i;
break;
}
}
}
if (wrapper
&& candidate >= 0
&& candidate != dev->last_context
&& dev->last_switch <= j
&& dev->last_switch + DRM_TIME_SLICE > j) {
if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) {
del_timer(&dev->timer);
dev->timer.function = wrapper;
dev->timer.data = (unsigned long)dev;
dev->timer.expires = dev->last_switch+DRM_TIME_SLICE;
add_timer(&dev->timer);
}
return -1;
}
return candidate;
}
int DRM(dma_enqueue)(struct file *filp, drm_dma_t *d)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int i;
drm_queue_t *q;
drm_buf_t *buf;
int idx;
int while_locked = 0;
drm_device_dma_t *dma = dev->dma;
int *ind;
int err;
DECLARE_WAITQUEUE(entry, current);
DRM_DEBUG("%d\n", d->send_count);
if (d->flags & _DRM_DMA_WHILE_LOCKED) {
int context = dev->lock.hw_lock->lock;
if (!_DRM_LOCK_IS_HELD(context)) {
DRM_ERROR("No lock held during \"while locked\""
" request\n");
return -EINVAL;
}
if (d->context != _DRM_LOCKING_CONTEXT(context)
&& _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
DRM_ERROR("Lock held by %d while %d makes"
" \"while locked\" request\n",
_DRM_LOCKING_CONTEXT(context),
d->context);
return -EINVAL;
}
q = dev->queuelist[DRM_KERNEL_CONTEXT];
while_locked = 1;
} else {
q = dev->queuelist[d->context];
}
atomic_inc(&q->use_count);
if (atomic_read(&q->block_write)) {
add_wait_queue(&q->write_queue, &entry);
atomic_inc(&q->block_count);
for (;;) {
current->state = TASK_INTERRUPTIBLE;
if (!atomic_read(&q->block_write)) break;
schedule();
if (signal_pending(current)) {
atomic_dec(&q->use_count);
remove_wait_queue(&q->write_queue, &entry);
return -EINTR;
}
}
atomic_dec(&q->block_count);
current->state = TASK_RUNNING;
remove_wait_queue(&q->write_queue, &entry);
}
ind = DRM(alloc)(d->send_count * sizeof(int), DRM_MEM_DRIVER);
if (!ind)
return -ENOMEM;
if (copy_from_user(ind, d->send_indices, d->send_count * sizeof(int))) {
err = -EFAULT;
goto out;
}
err = -EINVAL;
for (i = 0; i < d->send_count; i++) {
idx = ind[i];
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("Index %d (of %d max)\n",
ind[i], dma->buf_count - 1);
goto out;
}
buf = dma->buflist[ idx ];
if (buf->filp != filp) {
DRM_ERROR("Process %d using buffer not owned\n",
current->pid);
goto out;
}
if (buf->list != DRM_LIST_NONE) {
DRM_ERROR("Process %d using buffer %d on list %d\n",
current->pid, buf->idx, buf->list);
goto out;
}
buf->used = ind[i];
buf->while_locked = while_locked;
buf->context = d->context;
if (!buf->used) {
DRM_ERROR("Queueing 0 length buffer\n");
}
if (buf->pending) {
DRM_ERROR("Queueing pending buffer:"
" buffer %d, offset %d\n",
ind[i], i);
goto out;
}
if (buf->waiting) {
DRM_ERROR("Queueing waiting buffer:"
" buffer %d, offset %d\n",
ind[i], i);
goto out;
}
buf->waiting = 1;
if (atomic_read(&q->use_count) == 1
|| atomic_read(&q->finalization)) {
DRM(free_buffer)(dev, buf);
} else {
DRM(waitlist_put)(&q->waitlist, buf);
atomic_inc(&q->total_queued);
}
}
atomic_dec(&q->use_count);
return 0;
out:
DRM(free)(ind, d->send_count * sizeof(int), DRM_MEM_DRIVER);
atomic_dec(&q->use_count);
return err;
}
static int DRM(dma_get_buffers_of_order)(struct file *filp, drm_dma_t *d,
int order)
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int i;
drm_buf_t *buf;
drm_device_dma_t *dma = dev->dma;
for (i = d->granted_count; i < d->request_count; i++) {
buf = DRM(freelist_get)(&dma->bufs[order].freelist,
d->flags & _DRM_DMA_WAIT);
if (!buf) break;
if (buf->pending || buf->waiting) {
DRM_ERROR("Free buffer %d in use: filp %p (w%d, p%d)\n",
buf->idx,
buf->filp,
buf->waiting,
buf->pending);
}
buf->filp = filp;
if (copy_to_user(&d->request_indices[i],
&buf->idx,
sizeof(buf->idx)))
return -EFAULT;
if (copy_to_user(&d->request_sizes[i],
&buf->total,
sizeof(buf->total)))
return -EFAULT;
++d->granted_count;
}
return 0;
}
int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma)
{
int order;
int retcode = 0;
int tmp_order;
order = DRM(order)(dma->request_size);
dma->granted_count = 0;
retcode = DRM(dma_get_buffers_of_order)(filp, dma, order);
if (dma->granted_count < dma->request_count
&& (dma->flags & _DRM_DMA_SMALLER_OK)) {
for (tmp_order = order - 1;
!retcode
&& dma->granted_count < dma->request_count
&& tmp_order >= DRM_MIN_ORDER;
--tmp_order) {
retcode = DRM(dma_get_buffers_of_order)(filp, dma,
tmp_order);
}
}
if (dma->granted_count < dma->request_count
&& (dma->flags & _DRM_DMA_LARGER_OK)) {
for (tmp_order = order + 1;
!retcode
&& dma->granted_count < dma->request_count
&& tmp_order <= DRM_MAX_ORDER;
++tmp_order) {
retcode = DRM(dma_get_buffers_of_order)(filp, dma,
tmp_order);
}
}
return 0;
}

View File

@ -45,11 +45,6 @@
#define I810_BUF_UNMAPPED 0 #define I810_BUF_UNMAPPED 0
#define I810_BUF_MAPPED 1 #define I810_BUF_MAPPED 1
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
#define down_write down
#define up_write up
#endif
static drm_buf_t *i810_freelist_get(drm_device_t *dev) static drm_buf_t *i810_freelist_get(drm_device_t *dev)
{ {
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
@ -351,6 +346,7 @@ static int i810_dma_initialize(drm_device_t *dev,
DRM_ERROR("can not find mmio map!\n"); DRM_ERROR("can not find mmio map!\n");
return -EINVAL; return -EINVAL;
} }
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
if (!dev->agp_buffer_map) { if (!dev->agp_buffer_map) {
dev->dev_private = (void *)dev_priv; dev->dev_private = (void *)dev_priv;
@ -1383,3 +1379,19 @@ drm_ioctl_desc_t i810_ioctls[] = {
}; };
int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
/**
* Determine if the device really is AGP or not.
*
* All Intel graphics chipsets are treated as AGP, even if they are really
* PCI-e.
*
* \param dev The device to be tested.
*
* \returns
* A value of 1 is always retured to indictate every i810 is AGP.
*/
int i810_driver_device_is_agp(drm_device_t * dev)
{
return 1;
}

View File

@ -84,6 +84,7 @@ static struct drm_driver driver = {
.dev_priv_size = sizeof(drm_i810_buf_priv_t), .dev_priv_size = sizeof(drm_i810_buf_priv_t),
.pretakedown = i810_driver_pretakedown, .pretakedown = i810_driver_pretakedown,
.prerelease = i810_driver_prerelease, .prerelease = i810_driver_prerelease,
.device_is_agp = i810_driver_device_is_agp,
.release = i810_driver_release, .release = i810_driver_release,
.dma_quiescent = i810_driver_dma_quiescent, .dma_quiescent = i810_driver_dma_quiescent,
.reclaim_buffers = i810_reclaim_buffers, .reclaim_buffers = i810_reclaim_buffers,

View File

@ -120,6 +120,7 @@ extern int i810_driver_dma_quiescent(drm_device_t *dev);
extern void i810_driver_release(drm_device_t *dev, struct file *filp); extern void i810_driver_release(drm_device_t *dev, struct file *filp);
extern void i810_driver_pretakedown(drm_device_t *dev); extern void i810_driver_pretakedown(drm_device_t *dev);
extern void i810_driver_prerelease(drm_device_t *dev, DRMFILE filp); extern void i810_driver_prerelease(drm_device_t *dev, DRMFILE filp);
extern int i810_driver_device_is_agp(drm_device_t * dev);
#define I810_BASE(reg) ((unsigned long) \ #define I810_BASE(reg) ((unsigned long) \
dev_priv->mmio_map->handle) dev_priv->mmio_map->handle)

View File

@ -47,11 +47,6 @@
#define I830_BUF_UNMAPPED 0 #define I830_BUF_UNMAPPED 0
#define I830_BUF_MAPPED 1 #define I830_BUF_MAPPED 1
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
#define down_write down
#define up_write up
#endif
static drm_buf_t *i830_freelist_get(drm_device_t *dev) static drm_buf_t *i830_freelist_get(drm_device_t *dev)
{ {
drm_device_dma_t *dma = dev->dma; drm_device_dma_t *dma = dev->dma;
@ -358,6 +353,7 @@ static int i830_dma_initialize(drm_device_t *dev,
DRM_ERROR("can not find mmio map!\n"); DRM_ERROR("can not find mmio map!\n");
return -EINVAL; return -EINVAL;
} }
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
if(!dev->agp_buffer_map) { if(!dev->agp_buffer_map) {
dev->dev_private = (void *)dev_priv; dev->dev_private = (void *)dev_priv;
@ -1586,3 +1582,19 @@ drm_ioctl_desc_t i830_ioctls[] = {
}; };
int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls); int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
/**
* Determine if the device really is AGP or not.
*
* All Intel graphics chipsets are treated as AGP, even if they are really
* PCI-e.
*
* \param dev The device to be tested.
*
* \returns
* A value of 1 is always retured to indictate every i8xx is AGP.
*/
int i830_driver_device_is_agp(drm_device_t * dev)
{
return 1;
}

View File

@ -88,6 +88,7 @@ static struct drm_driver driver = {
.dev_priv_size = sizeof(drm_i830_buf_priv_t), .dev_priv_size = sizeof(drm_i830_buf_priv_t),
.pretakedown = i830_driver_pretakedown, .pretakedown = i830_driver_pretakedown,
.prerelease = i830_driver_prerelease, .prerelease = i830_driver_prerelease,
.device_is_agp = i830_driver_device_is_agp,
.release = i830_driver_release, .release = i830_driver_release,
.dma_quiescent = i830_driver_dma_quiescent, .dma_quiescent = i830_driver_dma_quiescent,
.reclaim_buffers = i830_reclaim_buffers, .reclaim_buffers = i830_reclaim_buffers,

View File

@ -137,6 +137,7 @@ extern void i830_driver_pretakedown(drm_device_t *dev);
extern void i830_driver_release(drm_device_t *dev, struct file *filp); extern void i830_driver_release(drm_device_t *dev, struct file *filp);
extern int i830_driver_dma_quiescent(drm_device_t *dev); extern int i830_driver_dma_quiescent(drm_device_t *dev);
extern void i830_driver_prerelease(drm_device_t *dev, DRMFILE filp); extern void i830_driver_prerelease(drm_device_t *dev, DRMFILE filp);
extern int i830_driver_device_is_agp(drm_device_t * dev);
#define I830_BASE(reg) ((unsigned long) \ #define I830_BASE(reg) ((unsigned long) \
dev_priv->mmio_map->handle) dev_priv->mmio_map->handle)

View File

@ -95,9 +95,8 @@ static int i915_dma_cleanup(drm_device_t * dev)
drm_core_ioremapfree( &dev_priv->ring.map, dev); drm_core_ioremapfree( &dev_priv->ring.map, dev);
} }
if (dev_priv->hw_status_page) { if (dev_priv->status_page_dmah) {
drm_pci_free(dev, PAGE_SIZE, dev_priv->hw_status_page, drm_pci_free(dev, dev_priv->status_page_dmah);
dev_priv->dma_status_page);
/* Need to rewrite hardware status page */ /* Need to rewrite hardware status page */
I915_WRITE(0x02080, 0x1ffff000); I915_WRITE(0x02080, 0x1ffff000);
} }
@ -174,16 +173,18 @@ static int i915_initialize(drm_device_t * dev,
dev_priv->allow_batchbuffer = 1; dev_priv->allow_batchbuffer = 1;
/* Program Hardware Status Page */ /* Program Hardware Status Page */
dev_priv->hw_status_page = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE,
0xffffffff, 0xffffffff);
&dev_priv->dma_status_page);
if (!dev_priv->hw_status_page) { if (!dev_priv->status_page_dmah) {
dev->dev_private = (void *)dev_priv; dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev); i915_dma_cleanup(dev);
DRM_ERROR("Can not allocate hardware status page\n"); DRM_ERROR("Can not allocate hardware status page\n");
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
} }
dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE); memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
@ -731,3 +732,19 @@ drm_ioctl_desc_t i915_ioctls[] = {
}; };
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
/**
* Determine if the device really is AGP or not.
*
* All Intel graphics chipsets are treated as AGP, even if they are really
* PCI-e.
*
* \param dev The device to be tested.
*
* \returns
* A value of 1 is always retured to indictate every i9x5 is AGP.
*/
int i915_driver_device_is_agp(drm_device_t * dev)
{
return 1;
}

View File

@ -79,6 +79,7 @@ static struct drm_driver driver = {
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.pretakedown = i915_driver_pretakedown, .pretakedown = i915_driver_pretakedown,
.prerelease = i915_driver_prerelease, .prerelease = i915_driver_prerelease,
.device_is_agp = i915_driver_device_is_agp,
.irq_preinstall = i915_driver_irq_preinstall, .irq_preinstall = i915_driver_irq_preinstall,
.irq_postinstall = i915_driver_irq_postinstall, .irq_postinstall = i915_driver_irq_postinstall,
.irq_uninstall = i915_driver_irq_uninstall, .irq_uninstall = i915_driver_irq_uninstall,

View File

@ -79,9 +79,10 @@ typedef struct drm_i915_private {
drm_i915_sarea_t *sarea_priv; drm_i915_sarea_t *sarea_priv;
drm_i915_ring_buffer_t ring; drm_i915_ring_buffer_t ring;
drm_dma_handle_t *status_page_dmah;
void *hw_status_page; void *hw_status_page;
unsigned long counter;
dma_addr_t dma_status_page; dma_addr_t dma_status_page;
unsigned long counter;
int back_offset; int back_offset;
int front_offset; int front_offset;
@ -102,6 +103,7 @@ typedef struct drm_i915_private {
extern void i915_kernel_lost_context(drm_device_t * dev); extern void i915_kernel_lost_context(drm_device_t * dev);
extern void i915_driver_pretakedown(drm_device_t *dev); extern void i915_driver_pretakedown(drm_device_t *dev);
extern void i915_driver_prerelease(drm_device_t *dev, DRMFILE filp); extern void i915_driver_prerelease(drm_device_t *dev, DRMFILE filp);
extern int i915_driver_device_is_agp(drm_device_t *dev);
/* i915_irq.c */ /* i915_irq.c */
extern int i915_irq_emit(DRM_IOCTL_ARGS); extern int i915_irq_emit(DRM_IOCTL_ARGS);

View File

@ -23,18 +23,21 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
* */
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com> /**
* Jeff Hartmann <jhartmann@valinux.com> * \file mga_dma.c
* Keith Whitwell <keith@tungstengraphics.com> * DMA support for MGA G200 / G400.
* *
* Rewritten by: * \author Rickard E. (Rik) Faith <faith@valinux.com>
* Gareth Hughes <gareth@valinux.com> * \author Jeff Hartmann <jhartmann@valinux.com>
* \author Keith Whitwell <keith@tungstengraphics.com>
* \author Gareth Hughes <gareth@valinux.com>
*/ */
#include "drmP.h" #include "drmP.h"
#include "drm.h" #include "drm.h"
#include "drm_sarea.h"
#include "mga_drm.h" #include "mga_drm.h"
#include "mga_drv.h" #include "mga_drv.h"
@ -148,7 +151,7 @@ void mga_do_dma_flush( drm_mga_private_t *dev_priv )
DRM_DEBUG( " space = 0x%06x\n", primary->space ); DRM_DEBUG( " space = 0x%06x\n", primary->space );
mga_flush_write_combine(); mga_flush_write_combine();
MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER ); MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
DRM_DEBUG( "done.\n" ); DRM_DEBUG( "done.\n" );
} }
@ -190,7 +193,7 @@ void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv )
DRM_DEBUG( " space = 0x%06x\n", primary->space ); DRM_DEBUG( " space = 0x%06x\n", primary->space );
mga_flush_write_combine(); mga_flush_write_combine();
MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER ); MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
set_bit( 0, &primary->wrapped ); set_bit( 0, &primary->wrapped );
DRM_DEBUG( "done.\n" ); DRM_DEBUG( "done.\n" );
@ -396,23 +399,383 @@ int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf )
* DMA initialization, cleanup * DMA initialization, cleanup
*/ */
int mga_driver_preinit(drm_device_t *dev, unsigned long flags)
{
drm_mga_private_t * dev_priv;
dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
if (!dev_priv)
return DRM_ERR(ENOMEM);
dev->dev_private = (void *)dev_priv;
memset(dev_priv, 0, sizeof(drm_mga_private_t));
dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
dev_priv->chipset = flags;
return 0;
}
/**
* Bootstrap the driver for AGP DMA.
*
* \todo
* Investigate whether there is any benifit to storing the WARP microcode in
* AGP memory. If not, the microcode may as well always be put in PCI
* memory.
*
* \todo
* This routine needs to set dma_bs->agp_mode to the mode actually configured
* in the hardware. Looking just at the Linux AGP driver code, I don't see
* an easy way to determine this.
*
* \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
*/
static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
drm_mga_dma_bootstrap_t * dma_bs)
{
drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private;
const unsigned int warp_size = mga_warp_microcode_size(dev_priv);
int err;
unsigned offset;
const unsigned secondary_size = dma_bs->secondary_bin_count
* dma_bs->secondary_bin_size;
const unsigned agp_size = (dma_bs->agp_size << 20);
drm_buf_desc_t req;
drm_agp_mode_t mode;
drm_agp_info_t info;
/* Acquire AGP. */
err = drm_agp_acquire(dev);
if (err) {
DRM_ERROR("Unable to acquire AGP\n");
return err;
}
err = drm_agp_info(dev, &info);
if (err) {
DRM_ERROR("Unable to get AGP info\n");
return err;
}
mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode;
err = drm_agp_enable(dev, mode);
if (err) {
DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
return err;
}
/* In addition to the usual AGP mode configuration, the G200 AGP cards
* need to have the AGP mode "manually" set.
*/
if (dev_priv->chipset == MGA_CARD_TYPE_G200) {
if (mode.mode & 0x02) {
MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE);
}
else {
MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE);
}
}
/* Allocate and bind AGP memory. */
dev_priv->agp_pages = agp_size / PAGE_SIZE;
dev_priv->agp_mem = drm_alloc_agp( dev, dev_priv->agp_pages, 0 );
if (dev_priv->agp_mem == NULL) {
dev_priv->agp_pages = 0;
DRM_ERROR("Unable to allocate %uMB AGP memory\n",
dma_bs->agp_size);
return DRM_ERR(ENOMEM);
}
err = drm_bind_agp( dev_priv->agp_mem, 0 );
if (err) {
DRM_ERROR("Unable to bind AGP memory\n");
return err;
}
offset = 0;
err = drm_addmap( dev, offset, warp_size,
_DRM_AGP, _DRM_READ_ONLY, & dev_priv->warp );
if (err) {
DRM_ERROR("Unable to map WARP microcode\n");
return err;
}
offset += warp_size;
err = drm_addmap( dev, offset, dma_bs->primary_size,
_DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary );
if (err) {
DRM_ERROR("Unable to map primary DMA region\n");
return err;
}
offset += dma_bs->primary_size;
err = drm_addmap( dev, offset, secondary_size,
_DRM_AGP, 0, & dev->agp_buffer_map );
if (err) {
DRM_ERROR("Unable to map secondary DMA region\n");
return err;
}
(void) memset( &req, 0, sizeof(req) );
req.count = dma_bs->secondary_bin_count;
req.size = dma_bs->secondary_bin_size;
req.flags = _DRM_AGP_BUFFER;
req.agp_start = offset;
err = drm_addbufs_agp( dev, & req );
if (err) {
DRM_ERROR("Unable to add secondary DMA buffers\n");
return err;
}
offset += secondary_size;
err = drm_addmap( dev, offset, agp_size - offset,
_DRM_AGP, 0, & dev_priv->agp_textures );
if (err) {
DRM_ERROR("Unable to map AGP texture region\n");
return err;
}
drm_core_ioremap(dev_priv->warp, dev);
drm_core_ioremap(dev_priv->primary, dev);
drm_core_ioremap(dev->agp_buffer_map, dev);
if (!dev_priv->warp->handle ||
!dev_priv->primary->handle || !dev->agp_buffer_map->handle) {
DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
dev_priv->warp->handle, dev_priv->primary->handle,
dev->agp_buffer_map->handle);
return DRM_ERR(ENOMEM);
}
dev_priv->dma_access = MGA_PAGPXFER;
dev_priv->wagp_enable = MGA_WAGP_ENABLE;
DRM_INFO("Initialized card for AGP DMA.\n");
return 0;
}
/**
* Bootstrap the driver for PCI DMA.
*
* \todo
* The algorithm for decreasing the size of the primary DMA buffer could be
* better. The size should be rounded up to the nearest page size, then
* decrease the request size by a single page each pass through the loop.
*
* \todo
* Determine whether the maximum address passed to drm_pci_alloc is correct.
* The same goes for drm_addbufs_pci.
*
* \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
*/
static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
drm_mga_dma_bootstrap_t * dma_bs)
{
drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private;
const unsigned int warp_size = mga_warp_microcode_size(dev_priv);
unsigned int primary_size;
unsigned int bin_count;
int err;
drm_buf_desc_t req;
if (dev->dma == NULL) {
DRM_ERROR("dev->dma is NULL\n");
return DRM_ERR(EFAULT);
}
/* The proper alignment is 0x100 for this mapping */
err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
_DRM_READ_ONLY, &dev_priv->warp);
if (err != 0) {
DRM_ERROR("Unable to create mapping for WARP microcode\n");
return err;
}
/* Other than the bottom two bits being used to encode other
* information, there don't appear to be any restrictions on the
* alignment of the primary or secondary DMA buffers.
*/
for ( primary_size = dma_bs->primary_size
; primary_size != 0
; primary_size >>= 1 ) {
/* The proper alignment for this mapping is 0x04 */
err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
_DRM_READ_ONLY, &dev_priv->primary);
if (!err)
break;
}
if (err != 0) {
DRM_ERROR("Unable to allocate primary DMA region\n");
return DRM_ERR(ENOMEM);
}
if (dev_priv->primary->size != dma_bs->primary_size) {
DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n",
dma_bs->primary_size,
(unsigned) dev_priv->primary->size);
dma_bs->primary_size = dev_priv->primary->size;
}
for ( bin_count = dma_bs->secondary_bin_count
; bin_count > 0
; bin_count-- ) {
(void) memset( &req, 0, sizeof(req) );
req.count = bin_count;
req.size = dma_bs->secondary_bin_size;
err = drm_addbufs_pci( dev, & req );
if (!err) {
break;
}
}
if (bin_count == 0) {
DRM_ERROR("Unable to add secondary DMA buffers\n");
return err;
}
if (bin_count != dma_bs->secondary_bin_count) {
DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u "
"to %u.\n", dma_bs->secondary_bin_count, bin_count);
dma_bs->secondary_bin_count = bin_count;
}
dev_priv->dma_access = 0;
dev_priv->wagp_enable = 0;
dma_bs->agp_mode = 0;
DRM_INFO("Initialized card for PCI DMA.\n");
return 0;
}
static int mga_do_dma_bootstrap(drm_device_t * dev,
drm_mga_dma_bootstrap_t * dma_bs)
{
const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
int err;
drm_mga_private_t * const dev_priv =
(drm_mga_private_t *) dev->dev_private;
dev_priv->used_new_dma_init = 1;
/* The first steps are the same for both PCI and AGP based DMA. Map
* the cards MMIO registers and map a status page.
*/
err = drm_addmap( dev, dev_priv->mmio_base, dev_priv->mmio_size,
_DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio );
if (err) {
DRM_ERROR("Unable to map MMIO region\n");
return err;
}
err = drm_addmap( dev, 0, SAREA_MAX, _DRM_SHM,
_DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
& dev_priv->status );
if (err) {
DRM_ERROR("Unable to map status region\n");
return err;
}
/* The DMA initialization procedure is slightly different for PCI and
* AGP cards. AGP cards just allocate a large block of AGP memory and
* carve off portions of it for internal uses. The remaining memory
* is returned to user-mode to be used for AGP textures.
*/
if (is_agp) {
err = mga_do_agp_dma_bootstrap(dev, dma_bs);
}
/* If we attempted to initialize the card for AGP DMA but failed,
* clean-up any mess that may have been created.
*/
if (err) {
mga_do_cleanup_dma(dev);
}
/* Not only do we want to try and initialized PCI cards for PCI DMA,
* but we also try to initialized AGP cards that could not be
* initialized for AGP DMA. This covers the case where we have an AGP
* card in a system with an unsupported AGP chipset. In that case the
* card will be detected as AGP, but we won't be able to allocate any
* AGP memory, etc.
*/
if (!is_agp || err) {
err = mga_do_pci_dma_bootstrap(dev, dma_bs);
}
return err;
}
int mga_dma_bootstrap(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_mga_dma_bootstrap_t bootstrap;
int err;
DRM_COPY_FROM_USER_IOCTL(bootstrap,
(drm_mga_dma_bootstrap_t __user *) data,
sizeof(bootstrap));
err = mga_do_dma_bootstrap(dev, & bootstrap);
if (! err) {
static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
const drm_mga_private_t * const dev_priv =
(drm_mga_private_t *) dev->dev_private;
if (dev_priv->agp_textures != NULL) {
bootstrap.texture_handle = dev_priv->agp_textures->offset;
bootstrap.texture_size = dev_priv->agp_textures->size;
}
else {
bootstrap.texture_handle = 0;
bootstrap.texture_size = 0;
}
bootstrap.agp_mode = modes[ bootstrap.agp_mode & 0x07 ];
if (DRM_COPY_TO_USER( (void __user *) data, & bootstrap,
sizeof(bootstrap))) {
err = DRM_ERR(EFAULT);
}
}
else {
mga_do_cleanup_dma(dev);
}
return err;
}
static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init ) static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
{ {
drm_mga_private_t *dev_priv; drm_mga_private_t *dev_priv;
int ret; int ret;
DRM_DEBUG( "\n" ); DRM_DEBUG( "\n" );
dev_priv = drm_alloc( sizeof(drm_mga_private_t), DRM_MEM_DRIVER );
if ( !dev_priv )
return DRM_ERR(ENOMEM);
memset( dev_priv, 0, sizeof(drm_mga_private_t) ); dev_priv = dev->dev_private;
dev_priv->chipset = init->chipset; if (init->sgram) {
dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
if ( init->sgram ) {
dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK; dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
} else { } else {
dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR; dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
@ -436,88 +799,66 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
DRM_GETSAREA(); DRM_GETSAREA();
if(!dev_priv->sarea) { if (!dev_priv->sarea) {
DRM_ERROR( "failed to find sarea!\n" ); DRM_ERROR("failed to find sarea!\n");
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset); if (! dev_priv->used_new_dma_init) {
if(!dev_priv->mmio) { dev_priv->status = drm_core_findmap(dev, init->status_offset);
DRM_ERROR( "failed to find mmio region!\n" ); if (!dev_priv->status) {
/* Assign dev_private so we can do cleanup. */ DRM_ERROR("failed to find status page!\n");
dev->dev_private = (void *)dev_priv; return DRM_ERR(EINVAL);
mga_do_cleanup_dma( dev ); }
return DRM_ERR(EINVAL); dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
} if (!dev_priv->mmio) {
dev_priv->status = drm_core_findmap(dev, init->status_offset); DRM_ERROR("failed to find mmio region!\n");
if(!dev_priv->status) { return DRM_ERR(EINVAL);
DRM_ERROR( "failed to find status page!\n" ); }
/* Assign dev_private so we can do cleanup. */ dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
dev->dev_private = (void *)dev_priv; if (!dev_priv->warp) {
mga_do_cleanup_dma( dev ); DRM_ERROR("failed to find warp microcode region!\n");
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
dev_priv->warp = drm_core_findmap(dev, init->warp_offset); dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
if(!dev_priv->warp) { if (!dev_priv->primary) {
DRM_ERROR( "failed to find warp microcode region!\n" ); DRM_ERROR("failed to find primary dma region!\n");
/* Assign dev_private so we can do cleanup. */ return DRM_ERR(EINVAL);
dev->dev_private = (void *)dev_priv; }
mga_do_cleanup_dma( dev ); dev->agp_buffer_token = init->buffers_offset;
return DRM_ERR(EINVAL); dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
} if (!dev->agp_buffer_map) {
dev_priv->primary = drm_core_findmap(dev, init->primary_offset); DRM_ERROR("failed to find dma buffer region!\n");
if(!dev_priv->primary) { return DRM_ERR(EINVAL);
DRM_ERROR( "failed to find primary dma region!\n" ); }
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv; drm_core_ioremap(dev_priv->warp, dev);
mga_do_cleanup_dma( dev ); drm_core_ioremap(dev_priv->primary, dev);
return DRM_ERR(EINVAL); drm_core_ioremap(dev->agp_buffer_map, dev);
}
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
if(!dev->agp_buffer_map) {
DRM_ERROR( "failed to find dma buffer region!\n" );
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return DRM_ERR(EINVAL);
} }
dev_priv->sarea_priv = dev_priv->sarea_priv =
(drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle + (drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle +
init->sarea_priv_offset); init->sarea_priv_offset);
drm_core_ioremap( dev_priv->warp, dev ); if (!dev_priv->warp->handle ||
drm_core_ioremap( dev_priv->primary, dev ); !dev_priv->primary->handle ||
drm_core_ioremap( dev->agp_buffer_map, dev ); ((dev_priv->dma_access != 0) &&
((dev->agp_buffer_map == NULL) ||
if(!dev_priv->warp->handle || (dev->agp_buffer_map->handle == NULL)))) {
!dev_priv->primary->handle || DRM_ERROR("failed to ioremap agp regions!\n");
!dev->agp_buffer_map->handle ) {
DRM_ERROR( "failed to ioremap agp regions!\n" );
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
} }
ret = mga_warp_install_microcode( dev_priv ); ret = mga_warp_install_microcode(dev_priv);
if ( ret < 0 ) { if (ret < 0) {
DRM_ERROR( "failed to install WARP ucode!\n" ); DRM_ERROR("failed to install WARP ucode!\n");
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return ret; return ret;
} }
ret = mga_warp_init( dev_priv ); ret = mga_warp_init(dev_priv);
if ( ret < 0 ) { if (ret < 0) {
DRM_ERROR( "failed to init WARP engine!\n" ); DRM_ERROR("failed to init WARP engine!\n");
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return ret; return ret;
} }
@ -557,22 +898,18 @@ static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
dev_priv->sarea_priv->last_frame.head = 0; dev_priv->sarea_priv->last_frame.head = 0;
dev_priv->sarea_priv->last_frame.wrap = 0; dev_priv->sarea_priv->last_frame.wrap = 0;
if ( mga_freelist_init( dev, dev_priv ) < 0 ) { if (mga_freelist_init(dev, dev_priv) < 0) {
DRM_ERROR( "could not initialize freelist\n" ); DRM_ERROR("could not initialize freelist\n");
/* Assign dev_private so we can do cleanup. */
dev->dev_private = (void *)dev_priv;
mga_do_cleanup_dma( dev );
return DRM_ERR(ENOMEM); return DRM_ERR(ENOMEM);
} }
/* Make dev_private visable to others. */
dev->dev_private = (void *)dev_priv;
return 0; return 0;
} }
static int mga_do_cleanup_dma( drm_device_t *dev ) static int mga_do_cleanup_dma( drm_device_t *dev )
{ {
DRM_DEBUG( "\n" ); int err = 0;
DRM_DEBUG("\n");
/* Make sure interrupts are disabled here because the uninstall ioctl /* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private * may not have been called from userspace and after dev_private
@ -583,20 +920,49 @@ static int mga_do_cleanup_dma( drm_device_t *dev )
if ( dev->dev_private ) { if ( dev->dev_private ) {
drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_private_t *dev_priv = dev->dev_private;
if ( dev_priv->warp != NULL ) if ((dev_priv->warp != NULL)
drm_core_ioremapfree( dev_priv->warp, dev ); && (dev_priv->mmio->type != _DRM_CONSISTENT))
if ( dev_priv->primary != NULL ) drm_core_ioremapfree(dev_priv->warp, dev);
drm_core_ioremapfree( dev_priv->primary, dev );
if ( dev->agp_buffer_map != NULL )
drm_core_ioremapfree( dev->agp_buffer_map, dev );
if ( dev_priv->head != NULL ) { if ((dev_priv->primary != NULL)
mga_freelist_cleanup( dev ); && (dev_priv->primary->type != _DRM_CONSISTENT))
drm_core_ioremapfree(dev_priv->primary, dev);
if (dev->agp_buffer_map != NULL)
drm_core_ioremapfree(dev->agp_buffer_map, dev);
if (dev_priv->used_new_dma_init) {
if (dev_priv->agp_mem != NULL) {
dev_priv->agp_textures = NULL;
drm_unbind_agp(dev_priv->agp_mem);
drm_free_agp(dev_priv->agp_mem, dev_priv->agp_pages);
dev_priv->agp_pages = 0;
dev_priv->agp_mem = NULL;
}
if ((dev->agp != NULL) && dev->agp->acquired) {
err = drm_agp_release(dev);
}
dev_priv->used_new_dma_init = 0;
} }
drm_free( dev->dev_private, sizeof(drm_mga_private_t), dev_priv->warp = NULL;
DRM_MEM_DRIVER ); dev_priv->primary = NULL;
dev->dev_private = NULL; dev_priv->mmio = NULL;
dev_priv->status = NULL;
dev_priv->sarea = NULL;
dev_priv->sarea_priv = NULL;
dev->agp_buffer_map = NULL;
memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
dev_priv->warp_pipe = 0;
memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
if (dev_priv->head != NULL) {
mga_freelist_cleanup(dev);
}
} }
return 0; return 0;
@ -606,14 +972,20 @@ int mga_dma_init( DRM_IOCTL_ARGS )
{ {
DRM_DEVICE; DRM_DEVICE;
drm_mga_init_t init; drm_mga_init_t init;
int err;
LOCK_TEST_WITH_RETURN( dev, filp ); LOCK_TEST_WITH_RETURN( dev, filp );
DRM_COPY_FROM_USER_IOCTL( init, (drm_mga_init_t __user *)data, sizeof(init) ); DRM_COPY_FROM_USER_IOCTL(init, (drm_mga_init_t __user *) data,
sizeof(init));
switch ( init.func ) { switch ( init.func ) {
case MGA_INIT_DMA: case MGA_INIT_DMA:
return mga_do_init_dma( dev, &init ); err = mga_do_init_dma(dev, &init);
if (err) {
(void) mga_do_cleanup_dma(dev);
}
return err;
case MGA_CLEANUP_DMA: case MGA_CLEANUP_DMA:
return mga_do_cleanup_dma( dev ); return mga_do_cleanup_dma( dev );
} }
@ -742,7 +1114,21 @@ int mga_dma_buffers( DRM_IOCTL_ARGS )
return ret; return ret;
} }
void mga_driver_pretakedown(drm_device_t *dev) /**
* Called just before the module is unloaded.
*/
int mga_driver_postcleanup(drm_device_t * dev)
{
drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
dev->dev_private = NULL;
return 0;
}
/**
* Called when the last opener of the device is closed.
*/
void mga_driver_pretakedown(drm_device_t * dev)
{ {
mga_do_cleanup_dma( dev ); mga_do_cleanup_dma( dev );
} }

View File

@ -73,7 +73,8 @@
#define MGA_CARD_TYPE_G200 1 #define MGA_CARD_TYPE_G200 1
#define MGA_CARD_TYPE_G400 2 #define MGA_CARD_TYPE_G400 2
#define MGA_CARD_TYPE_G450 3 /* not currently used */
#define MGA_CARD_TYPE_G550 4
#define MGA_FRONT 0x1 #define MGA_FRONT 0x1
#define MGA_BACK 0x2 #define MGA_BACK 0x2
@ -225,10 +226,6 @@ typedef struct _drm_mga_sarea {
} drm_mga_sarea_t; } drm_mga_sarea_t;
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (xf86drmMga.h)
*/
/* MGA specific ioctls /* MGA specific ioctls
* The device specific ioctl range is 0x40 to 0x79. * The device specific ioctl range is 0x40 to 0x79.
*/ */
@ -243,6 +240,14 @@ typedef struct _drm_mga_sarea {
#define DRM_MGA_BLIT 0x08 #define DRM_MGA_BLIT 0x08
#define DRM_MGA_GETPARAM 0x09 #define DRM_MGA_GETPARAM 0x09
/* 3.2:
* ioctls for operating on fences.
*/
#define DRM_MGA_SET_FENCE 0x0a
#define DRM_MGA_WAIT_FENCE 0x0b
#define DRM_MGA_DMA_BOOTSTRAP 0x0c
#define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t) #define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
#define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t) #define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t)
#define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET) #define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET)
@ -253,6 +258,9 @@ typedef struct _drm_mga_sarea {
#define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t) #define DRM_IOCTL_MGA_ILOAD DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t)
#define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t) #define DRM_IOCTL_MGA_BLIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t)
#define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t) #define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t)
#define DRM_IOCTL_MGA_SET_FENCE DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_SET_FENCE, uint32_t)
#define DRM_IOCTL_MGA_WAIT_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_WAIT_FENCE, uint32_t)
#define DRM_IOCTL_MGA_DMA_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_DMA_BOOTSTRAP, drm_mga_dma_bootstrap_t)
typedef struct _drm_mga_warp_index { typedef struct _drm_mga_warp_index {
int installed; int installed;
@ -291,12 +299,72 @@ typedef struct drm_mga_init {
unsigned long buffers_offset; unsigned long buffers_offset;
} drm_mga_init_t; } drm_mga_init_t;
typedef struct drm_mga_fullscreen { typedef struct drm_mga_dma_bootstrap {
enum { /**
MGA_INIT_FULLSCREEN = 0x01, * \name AGP texture region
MGA_CLEANUP_FULLSCREEN = 0x02 *
} func; * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will
} drm_mga_fullscreen_t; * be filled in with the actual AGP texture settings.
*
* \warning
* If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode
* is zero, it means that PCI memory (most likely through the use of
* an IOMMU) is being used for "AGP" textures.
*/
/*@{*/
unsigned long texture_handle; /**< Handle used to map AGP textures. */
uint32_t texture_size; /**< Size of the AGP texture region. */
/*@}*/
/**
* Requested size of the primary DMA region.
*
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
* filled in with the actual AGP mode. If AGP was not available
*/
uint32_t primary_size;
/**
* Requested number of secondary DMA buffers.
*
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
* filled in with the actual number of secondary DMA buffers
* allocated. Particularly when PCI DMA is used, this may be
* (subtantially) less than the number requested.
*/
uint32_t secondary_bin_count;
/**
* Requested size of each secondary DMA buffer.
*
* While the kernel \b is free to reduce
* dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed
* to reduce dma_mga_dma_bootstrap::secondary_bin_size.
*/
uint32_t secondary_bin_size;
/**
* Bit-wise mask of AGPSTAT2_* values. Currently only \c AGPSTAT2_1X,
* \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported. If this value is
* zero, it means that PCI DMA should be used, even if AGP is
* possible.
*
* On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
* filled in with the actual AGP mode. If AGP was not available
* (i.e., PCI DMA was used), this value will be zero.
*/
uint32_t agp_mode;
/**
* Desired AGP GART size, measured in megabytes.
*/
uint8_t agp_size;
} drm_mga_dma_bootstrap_t;
typedef struct drm_mga_clear { typedef struct drm_mga_clear {
unsigned int flags; unsigned int flags;
@ -341,6 +409,14 @@ typedef struct _drm_mga_blit {
*/ */
#define MGA_PARAM_IRQ_NR 1 #define MGA_PARAM_IRQ_NR 1
/* 3.2: Query the actual card type. The DDX only distinguishes between
* G200 chips and non-G200 chips, which it calls G400. It turns out that
* there are some very sublte differences between the G4x0 chips and the G550
* chips. Using this parameter query, a client-side driver can detect the
* difference between a G4x0 and a G550.
*/
#define MGA_PARAM_CARD_TYPE 2
typedef struct drm_mga_getparam { typedef struct drm_mga_getparam {
int param; int param;
void __user *value; void __user *value;

View File

@ -38,8 +38,15 @@
#include "drm_pciids.h" #include "drm_pciids.h"
static int mga_driver_device_is_agp(drm_device_t * dev);
static int postinit( struct drm_device *dev, unsigned long flags ) static int postinit( struct drm_device *dev, unsigned long flags )
{ {
drm_mga_private_t * const dev_priv =
(drm_mga_private_t *) dev->dev_private;
dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
dev->counters += 3; dev->counters += 3;
dev->types[6] = _DRM_STAT_IRQ; dev->types[6] = _DRM_STAT_IRQ;
dev->types[7] = _DRM_STAT_PRIMARY; dev->types[7] = _DRM_STAT_PRIMARY;
@ -79,8 +86,11 @@ extern int mga_max_ioctl;
static struct drm_driver driver = { static struct drm_driver driver = {
.driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
.preinit = mga_driver_preinit,
.postcleanup = mga_driver_postcleanup,
.pretakedown = mga_driver_pretakedown, .pretakedown = mga_driver_pretakedown,
.dma_quiescent = mga_driver_dma_quiescent, .dma_quiescent = mga_driver_dma_quiescent,
.device_is_agp = mga_driver_device_is_agp,
.vblank_wait = mga_driver_vblank_wait, .vblank_wait = mga_driver_vblank_wait,
.irq_preinstall = mga_driver_irq_preinstall, .irq_preinstall = mga_driver_irq_preinstall,
.irq_postinstall = mga_driver_irq_postinstall, .irq_postinstall = mga_driver_irq_postinstall,
@ -128,3 +138,38 @@ module_exit(mga_exit);
MODULE_AUTHOR( DRIVER_AUTHOR ); MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC ); MODULE_DESCRIPTION( DRIVER_DESC );
MODULE_LICENSE("GPL and additional rights"); MODULE_LICENSE("GPL and additional rights");
/**
* Determine if the device really is AGP or not.
*
* In addition to the usual tests performed by \c drm_device_is_agp, this
* function detects PCI G450 cards that appear to the system exactly like
* AGP G450 cards.
*
* \param dev The device to be tested.
*
* \returns
* If the device is a PCI G450, zero is returned. Otherwise 2 is returned.
*/
int mga_driver_device_is_agp(drm_device_t * dev)
{
const struct pci_dev * const pdev = dev->pdev;
/* There are PCI versions of the G450. These cards have the
* same PCI ID as the AGP G450, but have an additional PCI-to-PCI
* bridge chip. We detect these cards, which are not currently
* supported by this driver, by looking at the device ID of the
* bus the "card" is on. If vendor is 0x3388 (Hint Corp) and the
* device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
* device.
*/
if ( (pdev->device == 0x0525)
&& (pdev->bus->self->vendor == 0x3388)
&& (pdev->bus->self->device == 0x0021) ) {
return 0;
}
return 2;
}

View File

@ -38,10 +38,10 @@
#define DRIVER_NAME "mga" #define DRIVER_NAME "mga"
#define DRIVER_DESC "Matrox G200/G400" #define DRIVER_DESC "Matrox G200/G400"
#define DRIVER_DATE "20021029" #define DRIVER_DATE "20050607"
#define DRIVER_MAJOR 3 #define DRIVER_MAJOR 3
#define DRIVER_MINOR 1 #define DRIVER_MINOR 2
#define DRIVER_PATCHLEVEL 0 #define DRIVER_PATCHLEVEL 0
typedef struct drm_mga_primary_buffer { typedef struct drm_mga_primary_buffer {
@ -87,9 +87,43 @@ typedef struct drm_mga_private {
int chipset; int chipset;
int usec_timeout; int usec_timeout;
/**
* If set, the new DMA initialization sequence was used. This is
* primarilly used to select how the driver should uninitialized its
* internal DMA structures.
*/
int used_new_dma_init;
/**
* If AGP memory is used for DMA buffers, this will be the value
* \c MGA_PAGPXFER. Otherwise, it will be zero (for a PCI transfer).
*/
u32 dma_access;
/**
* If AGP memory is used for DMA buffers, this will be the value
* \c MGA_WAGP_ENABLE. Otherwise, it will be zero (for a PCI
* transfer).
*/
u32 wagp_enable;
/**
* \name MMIO region parameters.
*
* \sa drm_mga_private_t::mmio
*/
/*@{*/
u32 mmio_base; /**< Bus address of base of MMIO. */
u32 mmio_size; /**< Size of the MMIO region. */
/*@}*/
u32 clear_cmd; u32 clear_cmd;
u32 maccess; u32 maccess;
wait_queue_head_t fence_queue;
atomic_t last_fence_retired;
u32 next_fence_to_post;
unsigned int fb_cpp; unsigned int fb_cpp;
unsigned int front_offset; unsigned int front_offset;
unsigned int front_pitch; unsigned int front_pitch;
@ -108,35 +142,43 @@ typedef struct drm_mga_private {
drm_local_map_t *status; drm_local_map_t *status;
drm_local_map_t *warp; drm_local_map_t *warp;
drm_local_map_t *primary; drm_local_map_t *primary;
drm_local_map_t *buffers;
drm_local_map_t *agp_textures; drm_local_map_t *agp_textures;
DRM_AGP_MEM *agp_mem;
unsigned int agp_pages;
} drm_mga_private_t; } drm_mga_private_t;
/* mga_dma.c */ /* mga_dma.c */
extern int mga_dma_init( DRM_IOCTL_ARGS ); extern int mga_driver_preinit(drm_device_t * dev, unsigned long flags);
extern int mga_dma_flush( DRM_IOCTL_ARGS ); extern int mga_dma_bootstrap(DRM_IOCTL_ARGS);
extern int mga_dma_reset( DRM_IOCTL_ARGS ); extern int mga_dma_init(DRM_IOCTL_ARGS);
extern int mga_dma_buffers( DRM_IOCTL_ARGS ); extern int mga_dma_flush(DRM_IOCTL_ARGS);
extern void mga_driver_pretakedown(drm_device_t *dev); extern int mga_dma_reset(DRM_IOCTL_ARGS);
extern int mga_driver_dma_quiescent(drm_device_t *dev); extern int mga_dma_buffers(DRM_IOCTL_ARGS);
extern int mga_driver_postcleanup(drm_device_t * dev);
extern void mga_driver_pretakedown(drm_device_t * dev);
extern int mga_driver_dma_quiescent(drm_device_t * dev);
extern int mga_do_wait_for_idle( drm_mga_private_t *dev_priv ); extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
extern void mga_do_dma_flush( drm_mga_private_t *dev_priv ); extern void mga_do_dma_flush(drm_mga_private_t * dev_priv);
extern void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv ); extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv);
extern void mga_do_dma_wrap_end( drm_mga_private_t *dev_priv ); extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv);
extern int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf ); extern int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf );
/* mga_warp.c */ /* mga_warp.c */
extern int mga_warp_install_microcode( drm_mga_private_t *dev_priv ); extern unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv);
extern int mga_warp_init( drm_mga_private_t *dev_priv ); extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
extern int mga_warp_init(drm_mga_private_t * dev_priv);
extern int mga_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence); /* mga_irq.c */
extern irqreturn_t mga_driver_irq_handler( DRM_IRQ_ARGS ); extern int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence);
extern void mga_driver_irq_preinstall( drm_device_t *dev ); extern int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
extern void mga_driver_irq_postinstall( drm_device_t *dev ); extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
extern void mga_driver_irq_uninstall( drm_device_t *dev ); extern void mga_driver_irq_preinstall(drm_device_t * dev);
extern void mga_driver_irq_postinstall(drm_device_t * dev);
extern void mga_driver_irq_uninstall(drm_device_t * dev);
extern long mga_compat_ioctl(struct file *filp, unsigned int cmd, extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg); unsigned long arg);
@ -527,6 +569,12 @@ do { \
*/ */
#define MGA_EXEC 0x0100 #define MGA_EXEC 0x0100
/* AGP PLL encoding (for G200 only).
*/
#define MGA_AGP_PLL 0x1e4c
# define MGA_AGP2XPLL_DISABLE (0 << 0)
# define MGA_AGP2XPLL_ENABLE (1 << 0)
/* Warp registers /* Warp registers
*/ */
#define MGA_WR0 0x2d00 #define MGA_WR0 0x2d00

View File

@ -129,9 +129,76 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd,
DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam); DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
} }
typedef struct drm_mga_drm_bootstrap32 {
u32 texture_handle;
u32 texture_size;
u32 primary_size;
u32 secondary_bin_count;
u32 secondary_bin_size;
u32 agp_mode;
u8 agp_size;
} drm_mga_dma_bootstrap32_t;
static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_mga_dma_bootstrap32_t dma_bootstrap32;
drm_mga_dma_bootstrap_t __user *dma_bootstrap;
int err;
if (copy_from_user(&dma_bootstrap32, (void __user *)arg,
sizeof(dma_bootstrap32)))
return -EFAULT;
dma_bootstrap = compat_alloc_user_space(sizeof(*dma_bootstrap));
if (!access_ok(VERIFY_WRITE, dma_bootstrap, sizeof(*dma_bootstrap))
|| __put_user(dma_bootstrap32.texture_handle,
&dma_bootstrap->texture_handle)
|| __put_user(dma_bootstrap32.texture_size,
&dma_bootstrap->texture_size)
|| __put_user(dma_bootstrap32.primary_size,
&dma_bootstrap->primary_size)
|| __put_user(dma_bootstrap32.secondary_bin_count,
&dma_bootstrap->secondary_bin_count)
|| __put_user(dma_bootstrap32.secondary_bin_size,
&dma_bootstrap->secondary_bin_size)
|| __put_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode)
|| __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
return -EFAULT;
err = drm_ioctl(file->f_dentry->d_inode, file,
DRM_IOCTL_MGA_DMA_BOOTSTRAP,
(unsigned long)dma_bootstrap);
if (err)
return err;
if (__get_user(dma_bootstrap32.texture_handle,
&dma_bootstrap->texture_handle)
|| __get_user(dma_bootstrap32.texture_size,
&dma_bootstrap->texture_size)
|| __get_user(dma_bootstrap32.primary_size,
&dma_bootstrap->primary_size)
|| __get_user(dma_bootstrap32.secondary_bin_count,
&dma_bootstrap->secondary_bin_count)
|| __get_user(dma_bootstrap32.secondary_bin_size,
&dma_bootstrap->secondary_bin_size)
|| __get_user(dma_bootstrap32.agp_mode,
&dma_bootstrap->agp_mode)
|| __get_user(dma_bootstrap32.agp_size,
&dma_bootstrap->agp_size))
return -EFAULT;
if (copy_to_user((void __user *)arg, &dma_bootstrap32,
sizeof(dma_bootstrap32)))
return -EFAULT;
return 0;
}
drm_ioctl_compat_t *mga_compat_ioctls[] = { drm_ioctl_compat_t *mga_compat_ioctls[] = {
[DRM_MGA_INIT] = compat_mga_init, [DRM_MGA_INIT] = compat_mga_init,
[DRM_MGA_GETPARAM] = compat_mga_getparam, [DRM_MGA_GETPARAM] = compat_mga_getparam,
[DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
}; };
/** /**

View File

@ -41,15 +41,40 @@ irqreturn_t mga_driver_irq_handler( DRM_IRQ_ARGS )
drm_mga_private_t *dev_priv = drm_mga_private_t *dev_priv =
(drm_mga_private_t *)dev->dev_private; (drm_mga_private_t *)dev->dev_private;
int status; int status;
int handled = 0;
status = MGA_READ(MGA_STATUS);
status = MGA_READ( MGA_STATUS );
/* VBLANK interrupt */ /* VBLANK interrupt */
if ( status & MGA_VLINEPEN ) { if ( status & MGA_VLINEPEN ) {
MGA_WRITE( MGA_ICLEAR, MGA_VLINEICLR ); MGA_WRITE( MGA_ICLEAR, MGA_VLINEICLR );
atomic_inc(&dev->vbl_received); atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue); DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals( dev ); drm_vbl_send_signals(dev);
handled = 1;
}
/* SOFTRAP interrupt */
if (status & MGA_SOFTRAPEN) {
const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
const u32 prim_end = MGA_READ(MGA_PRIMEND);
MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);
/* In addition to clearing the interrupt-pending bit, we
* have to write to MGA_PRIMEND to re-start the DMA operation.
*/
if ( (prim_start & ~0x03) != (prim_end & ~0x03) ) {
MGA_WRITE(MGA_PRIMEND, prim_end);
}
atomic_inc(&dev_priv->last_fence_retired);
DRM_WAKEUP(&dev_priv->fence_queue);
handled = 1;
}
if ( handled ) {
return IRQ_HANDLED; return IRQ_HANDLED;
} }
return IRQ_NONE; return IRQ_NONE;
@ -73,9 +98,28 @@ int mga_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
return ret; return ret;
} }
void mga_driver_irq_preinstall( drm_device_t *dev ) { int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence)
drm_mga_private_t *dev_priv = {
(drm_mga_private_t *)dev->dev_private; drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
unsigned int cur_fence;
int ret = 0;
/* Assume that the user has missed the current sequence number
* by about a day rather than she wants to wait for years
* using fences.
*/
DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
(((cur_fence = atomic_read(&dev_priv->last_fence_retired))
- *sequence) <= (1 << 23)));
*sequence = cur_fence;
return ret;
}
void mga_driver_irq_preinstall(drm_device_t * dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
/* Disable *all* interrupts */ /* Disable *all* interrupts */
MGA_WRITE( MGA_IEN, 0 ); MGA_WRITE( MGA_IEN, 0 );
@ -83,12 +127,14 @@ void mga_driver_irq_preinstall( drm_device_t *dev ) {
MGA_WRITE( MGA_ICLEAR, ~0 ); MGA_WRITE( MGA_ICLEAR, ~0 );
} }
void mga_driver_irq_postinstall( drm_device_t *dev ) { void mga_driver_irq_postinstall(drm_device_t * dev)
drm_mga_private_t *dev_priv = {
(drm_mga_private_t *)dev->dev_private; drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
/* Turn on VBL interrupt */ DRM_INIT_WAITQUEUE( &dev_priv->fence_queue );
MGA_WRITE( MGA_IEN, MGA_VLINEIEN );
/* Turn on vertical blank interrupt and soft trap interrupt. */
MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
} }
void mga_driver_irq_uninstall( drm_device_t *dev ) { void mga_driver_irq_uninstall( drm_device_t *dev ) {
@ -98,5 +144,7 @@ void mga_driver_irq_uninstall( drm_device_t *dev ) {
return; return;
/* Disable *all* interrupts */ /* Disable *all* interrupts */
MGA_WRITE( MGA_IEN, 0 ); MGA_WRITE(MGA_IEN, 0);
dev->irq_enabled = 0;
} }

View File

@ -53,16 +53,16 @@ static void mga_emit_clip_rect( drm_mga_private_t *dev_priv,
/* Force reset of DWGCTL on G400 (eliminates clip disable bit). /* Force reset of DWGCTL on G400 (eliminates clip disable bit).
*/ */
if ( dev_priv->chipset == MGA_CARD_TYPE_G400 ) { if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
DMA_BLOCK( MGA_DWGCTL, ctx->dwgctl, DMA_BLOCK(MGA_DWGCTL, ctx->dwgctl,
MGA_LEN + MGA_EXEC, 0x80000000, MGA_LEN + MGA_EXEC, 0x80000000,
MGA_DWGCTL, ctx->dwgctl, MGA_DWGCTL, ctx->dwgctl,
MGA_LEN + MGA_EXEC, 0x80000000 ); MGA_LEN + MGA_EXEC, 0x80000000);
} }
DMA_BLOCK( MGA_DMAPAD, 0x00000000, DMA_BLOCK(MGA_DMAPAD, 0x00000000,
MGA_CXBNDRY, (box->x2 << 16) | box->x1, MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1,
MGA_YTOP, box->y1 * pitch, MGA_YTOP, box->y1 * pitch,
MGA_YBOT, box->y2 * pitch ); MGA_YBOT, (box->y2 - 1) * pitch);
ADVANCE_DMA(); ADVANCE_DMA();
} }
@ -260,12 +260,11 @@ static __inline__ void mga_g200_emit_pipe( drm_mga_private_t *dev_priv )
/* Padding required to to hardware bug. /* Padding required to to hardware bug.
*/ */
DMA_BLOCK( MGA_DMAPAD, 0xffffffff, DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
MGA_DMAPAD, 0xffffffff, MGA_DMAPAD, 0xffffffff,
MGA_DMAPAD, 0xffffffff, MGA_DMAPAD, 0xffffffff,
MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] | MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] |
MGA_WMODE_START | MGA_WMODE_START | dev_priv->wagp_enable));
MGA_WAGP_ENABLE) );
ADVANCE_DMA(); ADVANCE_DMA();
} }
@ -342,12 +341,11 @@ static __inline__ void mga_g400_emit_pipe( drm_mga_private_t *dev_priv )
MGA_WR60, MGA_G400_WR_MAGIC ); /* tex1 height */ MGA_WR60, MGA_G400_WR_MAGIC ); /* tex1 height */
/* Padding required to to hardware bug */ /* Padding required to to hardware bug */
DMA_BLOCK( MGA_DMAPAD, 0xffffffff, DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
MGA_DMAPAD, 0xffffffff, MGA_DMAPAD, 0xffffffff,
MGA_DMAPAD, 0xffffffff, MGA_DMAPAD, 0xffffffff,
MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] | MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] |
MGA_WMODE_START | MGA_WMODE_START | dev_priv->wagp_enable));
MGA_WAGP_ENABLE) );
ADVANCE_DMA(); ADVANCE_DMA();
} }
@ -459,9 +457,9 @@ static int mga_verify_state( drm_mga_private_t *dev_priv )
if ( dirty & MGA_UPLOAD_TEX0 ) if ( dirty & MGA_UPLOAD_TEX0 )
ret |= mga_verify_tex( dev_priv, 0 ); ret |= mga_verify_tex( dev_priv, 0 );
if ( dev_priv->chipset == MGA_CARD_TYPE_G400 ) { if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
if ( dirty & MGA_UPLOAD_TEX1 ) if (dirty & MGA_UPLOAD_TEX1)
ret |= mga_verify_tex( dev_priv, 1 ); ret |= mga_verify_tex(dev_priv, 1);
if ( dirty & MGA_UPLOAD_PIPE ) if ( dirty & MGA_UPLOAD_PIPE )
ret |= ( sarea_priv->warp_pipe > MGA_MAX_G400_PIPES ); ret |= ( sarea_priv->warp_pipe > MGA_MAX_G400_PIPES );
@ -686,12 +684,12 @@ static void mga_dma_dispatch_vertex( drm_device_t *dev, drm_buf_t *buf )
BEGIN_DMA( 1 ); BEGIN_DMA( 1 );
DMA_BLOCK( MGA_DMAPAD, 0x00000000, DMA_BLOCK(MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000,
MGA_SECADDRESS, (address | MGA_SECADDRESS, (address |
MGA_DMA_VERTEX), MGA_DMA_VERTEX),
MGA_SECEND, ((address + length) | MGA_SECEND, ((address + length) |
MGA_PAGPXFER) ); dev_priv->dma_access));
ADVANCE_DMA(); ADVANCE_DMA();
} while ( ++i < sarea_priv->nbox ); } while ( ++i < sarea_priv->nbox );
@ -733,11 +731,11 @@ static void mga_dma_dispatch_indices( drm_device_t *dev, drm_buf_t *buf,
BEGIN_DMA( 1 ); BEGIN_DMA( 1 );
DMA_BLOCK( MGA_DMAPAD, 0x00000000, DMA_BLOCK(MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000,
MGA_SETUPADDRESS, address + start, MGA_SETUPADDRESS, address + start,
MGA_SETUPEND, ((address + end) | MGA_SETUPEND, ((address + end) |
MGA_PAGPXFER) ); dev_priv->dma_access));
ADVANCE_DMA(); ADVANCE_DMA();
} while ( ++i < sarea_priv->nbox ); } while ( ++i < sarea_priv->nbox );
@ -764,7 +762,7 @@ static void mga_dma_dispatch_iload( drm_device_t *dev, drm_buf_t *buf,
drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_buf_priv_t *buf_priv = buf->dev_private; drm_mga_buf_priv_t *buf_priv = buf->dev_private;
drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state; drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state;
u32 srcorg = buf->bus_address | MGA_SRCACC_AGP | MGA_SRCMAP_SYSMEM; u32 srcorg = buf->bus_address | dev_priv->dma_access | MGA_SRCMAP_SYSMEM;
u32 y2; u32 y2;
DMA_LOCALS; DMA_LOCALS;
DRM_DEBUG( "buf=%d used=%d\n", buf->idx, buf->used ); DRM_DEBUG( "buf=%d used=%d\n", buf->idx, buf->used );
@ -1095,6 +1093,9 @@ static int mga_getparam( DRM_IOCTL_ARGS )
case MGA_PARAM_IRQ_NR: case MGA_PARAM_IRQ_NR:
value = dev->irq; value = dev->irq;
break; break;
case MGA_PARAM_CARD_TYPE:
value = dev_priv->chipset;
break;
default: default:
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
@ -1107,17 +1108,82 @@ static int mga_getparam( DRM_IOCTL_ARGS )
return 0; return 0;
} }
static int mga_set_fence(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_mga_private_t *dev_priv = dev->dev_private;
u32 temp;
DMA_LOCALS;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
}
DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
/* I would normal do this assignment in the declaration of temp,
* but dev_priv may be NULL.
*/
temp = dev_priv->next_fence_to_post;
dev_priv->next_fence_to_post++;
BEGIN_DMA(1);
DMA_BLOCK(MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000,
MGA_SOFTRAP, 0x00000000);
ADVANCE_DMA();
if (DRM_COPY_TO_USER( (u32 __user *) data, & temp, sizeof(u32))) {
DRM_ERROR("copy_to_user\n");
return DRM_ERR(EFAULT);
}
return 0;
}
static int mga_wait_fence(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_mga_private_t *dev_priv = dev->dev_private;
u32 fence;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
return DRM_ERR(EINVAL);
}
DRM_COPY_FROM_USER_IOCTL(fence, (u32 __user *) data, sizeof(u32));
DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
mga_driver_fence_wait(dev, & fence);
if (DRM_COPY_TO_USER( (u32 __user *) data, & fence, sizeof(u32))) {
DRM_ERROR("copy_to_user\n");
return DRM_ERR(EFAULT);
}
return 0;
}
drm_ioctl_desc_t mga_ioctls[] = { drm_ioctl_desc_t mga_ioctls[] = {
[DRM_IOCTL_NR(DRM_MGA_INIT)] = { mga_dma_init, 1, 1 }, [DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, 1, 1},
[DRM_IOCTL_NR(DRM_MGA_FLUSH)] = { mga_dma_flush, 1, 0 }, [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_RESET)] = { mga_dma_reset, 1, 0 }, [DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_SWAP)] = { mga_dma_swap, 1, 0 }, [DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_CLEAR)] = { mga_dma_clear, 1, 0 }, [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_VERTEX)] = { mga_dma_vertex, 1, 0 }, [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_INDICES)] = { mga_dma_indices, 1, 0 }, [DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_ILOAD)] = { mga_dma_iload, 1, 0 }, [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_BLIT)] = { mga_dma_blit, 1, 0 }, [DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_GETPARAM)]= { mga_getparam, 1, 0 }, [DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, 1, 0},
[DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, 1, 1},
}; };
int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls); int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);

View File

@ -48,65 +48,52 @@ do { \
vcbase += WARP_UCODE_SIZE( which ); \ vcbase += WARP_UCODE_SIZE( which ); \
} while (0) } while (0)
static const unsigned int mga_warp_g400_microcode_size =
(WARP_UCODE_SIZE(warp_g400_tgz) +
WARP_UCODE_SIZE(warp_g400_tgza) +
WARP_UCODE_SIZE(warp_g400_tgzaf) +
WARP_UCODE_SIZE(warp_g400_tgzf) +
WARP_UCODE_SIZE(warp_g400_tgzs) +
WARP_UCODE_SIZE(warp_g400_tgzsa) +
WARP_UCODE_SIZE(warp_g400_tgzsaf) +
WARP_UCODE_SIZE(warp_g400_tgzsf) +
WARP_UCODE_SIZE(warp_g400_t2gz) +
WARP_UCODE_SIZE(warp_g400_t2gza) +
WARP_UCODE_SIZE(warp_g400_t2gzaf) +
WARP_UCODE_SIZE(warp_g400_t2gzf) +
WARP_UCODE_SIZE(warp_g400_t2gzs) +
WARP_UCODE_SIZE(warp_g400_t2gzsa) +
WARP_UCODE_SIZE(warp_g400_t2gzsaf) +
WARP_UCODE_SIZE(warp_g400_t2gzsf));
static unsigned int mga_warp_g400_microcode_size( drm_mga_private_t *dev_priv ) static const unsigned int mga_warp_g200_microcode_size =
(WARP_UCODE_SIZE(warp_g200_tgz) +
WARP_UCODE_SIZE(warp_g200_tgza) +
WARP_UCODE_SIZE(warp_g200_tgzaf) +
WARP_UCODE_SIZE(warp_g200_tgzf) +
WARP_UCODE_SIZE(warp_g200_tgzs) +
WARP_UCODE_SIZE(warp_g200_tgzsa) +
WARP_UCODE_SIZE(warp_g200_tgzsaf) +
WARP_UCODE_SIZE(warp_g200_tgzsf));
unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv)
{ {
unsigned int size; switch (dev_priv->chipset) {
case MGA_CARD_TYPE_G400:
size = ( WARP_UCODE_SIZE( warp_g400_tgz ) + case MGA_CARD_TYPE_G550:
WARP_UCODE_SIZE( warp_g400_tgza ) + return PAGE_ALIGN(mga_warp_g400_microcode_size);
WARP_UCODE_SIZE( warp_g400_tgzaf ) + case MGA_CARD_TYPE_G200:
WARP_UCODE_SIZE( warp_g400_tgzf ) + return PAGE_ALIGN(mga_warp_g200_microcode_size);
WARP_UCODE_SIZE( warp_g400_tgzs ) + default:
WARP_UCODE_SIZE( warp_g400_tgzsa ) + return 0;
WARP_UCODE_SIZE( warp_g400_tgzsaf ) + }
WARP_UCODE_SIZE( warp_g400_tgzsf ) +
WARP_UCODE_SIZE( warp_g400_t2gz ) +
WARP_UCODE_SIZE( warp_g400_t2gza ) +
WARP_UCODE_SIZE( warp_g400_t2gzaf ) +
WARP_UCODE_SIZE( warp_g400_t2gzf ) +
WARP_UCODE_SIZE( warp_g400_t2gzs ) +
WARP_UCODE_SIZE( warp_g400_t2gzsa ) +
WARP_UCODE_SIZE( warp_g400_t2gzsaf ) +
WARP_UCODE_SIZE( warp_g400_t2gzsf ) );
size = PAGE_ALIGN( size );
DRM_DEBUG( "G400 ucode size = %d bytes\n", size );
return size;
}
static unsigned int mga_warp_g200_microcode_size( drm_mga_private_t *dev_priv )
{
unsigned int size;
size = ( WARP_UCODE_SIZE( warp_g200_tgz ) +
WARP_UCODE_SIZE( warp_g200_tgza ) +
WARP_UCODE_SIZE( warp_g200_tgzaf ) +
WARP_UCODE_SIZE( warp_g200_tgzf ) +
WARP_UCODE_SIZE( warp_g200_tgzs ) +
WARP_UCODE_SIZE( warp_g200_tgzsa ) +
WARP_UCODE_SIZE( warp_g200_tgzsaf ) +
WARP_UCODE_SIZE( warp_g200_tgzsf ) );
size = PAGE_ALIGN( size );
DRM_DEBUG( "G200 ucode size = %d bytes\n", size );
return size;
} }
static int mga_warp_install_g400_microcode( drm_mga_private_t *dev_priv ) static int mga_warp_install_g400_microcode( drm_mga_private_t *dev_priv )
{ {
unsigned char *vcbase = dev_priv->warp->handle; unsigned char *vcbase = dev_priv->warp->handle;
unsigned long pcbase = dev_priv->warp->offset; unsigned long pcbase = dev_priv->warp->offset;
unsigned int size;
size = mga_warp_g400_microcode_size( dev_priv );
if ( size > dev_priv->warp->size ) {
DRM_ERROR( "microcode too large! (%u > %lu)\n",
size, dev_priv->warp->size );
return DRM_ERR(ENOMEM);
}
memset( dev_priv->warp_pipe_phys, 0, memset( dev_priv->warp_pipe_phys, 0,
sizeof(dev_priv->warp_pipe_phys) ); sizeof(dev_priv->warp_pipe_phys) );
@ -136,35 +123,36 @@ static int mga_warp_install_g200_microcode( drm_mga_private_t *dev_priv )
{ {
unsigned char *vcbase = dev_priv->warp->handle; unsigned char *vcbase = dev_priv->warp->handle;
unsigned long pcbase = dev_priv->warp->offset; unsigned long pcbase = dev_priv->warp->offset;
unsigned int size;
size = mga_warp_g200_microcode_size( dev_priv ); memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
if ( size > dev_priv->warp->size ) {
DRM_ERROR( "microcode too large! (%u > %lu)\n",
size, dev_priv->warp->size );
return DRM_ERR(ENOMEM);
}
memset( dev_priv->warp_pipe_phys, 0, WARP_UCODE_INSTALL(warp_g200_tgz, MGA_WARP_TGZ);
sizeof(dev_priv->warp_pipe_phys) ); WARP_UCODE_INSTALL(warp_g200_tgzf, MGA_WARP_TGZF);
WARP_UCODE_INSTALL(warp_g200_tgza, MGA_WARP_TGZA);
WARP_UCODE_INSTALL( warp_g200_tgz, MGA_WARP_TGZ ); WARP_UCODE_INSTALL(warp_g200_tgzaf, MGA_WARP_TGZAF);
WARP_UCODE_INSTALL( warp_g200_tgzf, MGA_WARP_TGZF ); WARP_UCODE_INSTALL(warp_g200_tgzs, MGA_WARP_TGZS);
WARP_UCODE_INSTALL( warp_g200_tgza, MGA_WARP_TGZA ); WARP_UCODE_INSTALL(warp_g200_tgzsf, MGA_WARP_TGZSF);
WARP_UCODE_INSTALL( warp_g200_tgzaf, MGA_WARP_TGZAF ); WARP_UCODE_INSTALL(warp_g200_tgzsa, MGA_WARP_TGZSA);
WARP_UCODE_INSTALL( warp_g200_tgzs, MGA_WARP_TGZS ); WARP_UCODE_INSTALL(warp_g200_tgzsaf, MGA_WARP_TGZSAF);
WARP_UCODE_INSTALL( warp_g200_tgzsf, MGA_WARP_TGZSF );
WARP_UCODE_INSTALL( warp_g200_tgzsa, MGA_WARP_TGZSA );
WARP_UCODE_INSTALL( warp_g200_tgzsaf, MGA_WARP_TGZSAF );
return 0; return 0;
} }
int mga_warp_install_microcode( drm_mga_private_t *dev_priv ) int mga_warp_install_microcode( drm_mga_private_t *dev_priv )
{ {
switch ( dev_priv->chipset ) { const unsigned int size = mga_warp_microcode_size(dev_priv);
DRM_DEBUG("MGA ucode size = %d bytes\n", size);
if (size > dev_priv->warp->size) {
DRM_ERROR("microcode too large! (%u > %lu)\n",
size, dev_priv->warp->size);
return DRM_ERR(ENOMEM);
}
switch (dev_priv->chipset) {
case MGA_CARD_TYPE_G400: case MGA_CARD_TYPE_G400:
return mga_warp_install_g400_microcode( dev_priv ); case MGA_CARD_TYPE_G550:
return mga_warp_install_g400_microcode(dev_priv);
case MGA_CARD_TYPE_G200: case MGA_CARD_TYPE_G200:
return mga_warp_install_g200_microcode( dev_priv ); return mga_warp_install_g200_microcode( dev_priv );
default: default:
@ -182,10 +170,11 @@ int mga_warp_init( drm_mga_private_t *dev_priv )
*/ */
switch ( dev_priv->chipset ) { switch ( dev_priv->chipset ) {
case MGA_CARD_TYPE_G400: case MGA_CARD_TYPE_G400:
MGA_WRITE( MGA_WIADDR2, MGA_WMODE_SUSPEND ); case MGA_CARD_TYPE_G550:
MGA_WRITE( MGA_WGETMSB, 0x00000E00 ); MGA_WRITE(MGA_WIADDR2, MGA_WMODE_SUSPEND);
MGA_WRITE( MGA_WVRTXSZ, 0x00001807 ); MGA_WRITE(MGA_WGETMSB, 0x00000E00);
MGA_WRITE( MGA_WACCEPTSEQ, 0x18000000 ); MGA_WRITE(MGA_WVRTXSZ, 0x00001807);
MGA_WRITE(MGA_WACCEPTSEQ, 0x18000000);
break; break;
case MGA_CARD_TYPE_G200: case MGA_CARD_TYPE_G200:
MGA_WRITE( MGA_WIADDR, MGA_WMODE_SUSPEND ); MGA_WRITE( MGA_WIADDR, MGA_WMODE_SUSPEND );

View File

@ -326,7 +326,8 @@ static void r128_cce_init_ring_buffer( drm_device_t *dev,
ring_start = dev_priv->cce_ring->offset - dev->agp->base; ring_start = dev_priv->cce_ring->offset - dev->agp->base;
else else
#endif #endif
ring_start = dev_priv->cce_ring->offset - dev->sg->handle; ring_start = dev_priv->cce_ring->offset -
(unsigned long)dev->sg->virtual;
R128_WRITE( R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET ); R128_WRITE( R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET );
@ -487,6 +488,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
r128_do_cleanup_cce( dev ); r128_do_cleanup_cce( dev );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
if(!dev->agp_buffer_map) { if(!dev->agp_buffer_map) {
DRM_ERROR("could not find dma buffer region!\n"); DRM_ERROR("could not find dma buffer region!\n");
@ -537,7 +539,7 @@ static int r128_do_init_cce( drm_device_t *dev, drm_r128_init_t *init )
dev_priv->cce_buffers_offset = dev->agp->base; dev_priv->cce_buffers_offset = dev->agp->base;
else else
#endif #endif
dev_priv->cce_buffers_offset = dev->sg->handle; dev_priv->cce_buffers_offset = (unsigned long)dev->sg->virtual;
dev_priv->ring.start = (u32 *)dev_priv->cce_ring->handle; dev_priv->ring.start = (u32 *)dev_priv->cce_ring->handle;
dev_priv->ring.end = ((u32 *)dev_priv->cce_ring->handle dev_priv->ring.end = ((u32 *)dev_priv->cce_ring->handle

View File

@ -215,7 +215,7 @@ typedef struct drm_r128_sarea {
#define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t) #define DRM_IOCTL_R128_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t)
#define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t) #define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t)
#define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t) #define DRM_IOCTL_R128_CLEAR2 DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t)
#define DRM_IOCTL_R128_GETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t) #define DRM_IOCTL_R128_GETPARAM DRM_IOWR( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t)
#define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP) #define DRM_IOCTL_R128_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_R128_FLIP)
typedef struct drm_r128_init { typedef struct drm_r128_init {

View File

@ -0,0 +1,801 @@
/* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*-
*
* Copyright (C) The Weather Channel, Inc. 2002.
* Copyright (C) 2004 Nicolai Haehnle.
* All Rights Reserved.
*
* The Weather Channel (TM) funded Tungsten Graphics to develop the
* initial release of the Radeon 8500 driver under the XFree86 license.
* This notice must be preserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Nicolai Haehnle <prefect_@gmx.net>
*/
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
#include "radeon_drv.h"
#include "r300_reg.h"
#define R300_SIMULTANEOUS_CLIPRECTS 4
/* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects
*/
static const int r300_cliprect_cntl[4] = {
0xAAAA,
0xEEEE,
0xFEFE,
0xFFFE
};
/**
* Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
* buffer, starting with index n.
*/
static int r300_emit_cliprects(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf,
int n)
{
drm_clip_rect_t box;
int nr;
int i;
RING_LOCALS;
nr = cmdbuf->nbox - n;
if (nr > R300_SIMULTANEOUS_CLIPRECTS)
nr = R300_SIMULTANEOUS_CLIPRECTS;
DRM_DEBUG("%i cliprects\n", nr);
if (nr) {
BEGIN_RING(6 + nr*2);
OUT_RING( CP_PACKET0( R300_RE_CLIPRECT_TL_0, nr*2 - 1 ) );
for(i = 0; i < nr; ++i) {
if (DRM_COPY_FROM_USER_UNCHECKED(&box, &cmdbuf->boxes[n+i], sizeof(box))) {
DRM_ERROR("copy cliprect faulted\n");
return DRM_ERR(EFAULT);
}
box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
(box.y1 << R300_CLIPRECT_Y_SHIFT));
OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
(box.y2 << R300_CLIPRECT_Y_SHIFT));
}
OUT_RING_REG( R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr-1] );
/* TODO/SECURITY: Force scissors to a safe value, otherwise the
* client might be able to trample over memory.
* The impact should be very limited, but I'd rather be safe than
* sorry.
*/
OUT_RING( CP_PACKET0( R300_RE_SCISSORS_TL, 1 ) );
OUT_RING( 0 );
OUT_RING( R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK );
ADVANCE_RING();
} else {
/* Why we allow zero cliprect rendering:
* There are some commands in a command buffer that must be submitted
* even when there are no cliprects, e.g. DMA buffer discard
* or state setting (though state setting could be avoided by
* simulating a loss of context).
*
* Now since the cmdbuf interface is so chaotic right now (and is
* bound to remain that way for a bit until things settle down),
* it is basically impossible to filter out the commands that are
* necessary and those that aren't.
*
* So I choose the safe way and don't do any filtering at all;
* instead, I simply set up the engine so that all rendering
* can't produce any fragments.
*/
BEGIN_RING(2);
OUT_RING_REG( R300_RE_CLIPRECT_CNTL, 0 );
ADVANCE_RING();
}
return 0;
}
u8 r300_reg_flags[0x10000>>2];
void r300_init_reg_flags(void)
{
int i;
memset(r300_reg_flags, 0, 0x10000>>2);
#define ADD_RANGE_MARK(reg, count,mark) \
for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
r300_reg_flags[i]|=(mark);
#define MARK_SAFE 1
#define MARK_CHECK_OFFSET 2
#define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE)
/* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
ADD_RANGE(0x2080, 1);
ADD_RANGE(R300_SE_VTE_CNTL, 2);
ADD_RANGE(0x2134, 2);
ADD_RANGE(0x2140, 1);
ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
ADD_RANGE(0x21DC, 1);
ADD_RANGE(0x221C, 1);
ADD_RANGE(0x2220, 4);
ADD_RANGE(0x2288, 1);
ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
ADD_RANGE(R300_GB_ENABLE, 1);
ADD_RANGE(R300_GB_MSPOS0, 5);
ADD_RANGE(R300_TX_ENABLE, 1);
ADD_RANGE(0x4200, 4);
ADD_RANGE(0x4214, 1);
ADD_RANGE(R300_RE_POINTSIZE, 1);
ADD_RANGE(0x4230, 3);
ADD_RANGE(R300_RE_LINE_CNT, 1);
ADD_RANGE(0x4238, 1);
ADD_RANGE(0x4260, 3);
ADD_RANGE(0x4274, 4);
ADD_RANGE(0x4288, 5);
ADD_RANGE(0x42A0, 1);
ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
ADD_RANGE(0x42B4, 1);
ADD_RANGE(R300_RE_CULL_CNTL, 1);
ADD_RANGE(0x42C0, 2);
ADD_RANGE(R300_RS_CNTL_0, 2);
ADD_RANGE(R300_RS_INTERP_0, 8);
ADD_RANGE(R300_RS_ROUTE_0, 8);
ADD_RANGE(0x43A4, 2);
ADD_RANGE(0x43E8, 1);
ADD_RANGE(R300_PFS_CNTL_0, 3);
ADD_RANGE(R300_PFS_NODE_0, 4);
ADD_RANGE(R300_PFS_TEXI_0, 64);
ADD_RANGE(0x46A4, 5);
ADD_RANGE(R300_PFS_INSTR0_0, 64);
ADD_RANGE(R300_PFS_INSTR1_0, 64);
ADD_RANGE(R300_PFS_INSTR2_0, 64);
ADD_RANGE(R300_PFS_INSTR3_0, 64);
ADD_RANGE(0x4BC0, 1);
ADD_RANGE(0x4BC8, 3);
ADD_RANGE(R300_PP_ALPHA_TEST, 2);
ADD_RANGE(0x4BD8, 1);
ADD_RANGE(R300_PFS_PARAM_0_X, 64);
ADD_RANGE(0x4E00, 1);
ADD_RANGE(R300_RB3D_CBLEND, 2);
ADD_RANGE(R300_RB3D_COLORMASK, 1);
ADD_RANGE(0x4E10, 3);
ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */
ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
ADD_RANGE(0x4E50, 9);
ADD_RANGE(0x4E88, 1);
ADD_RANGE(0x4EA0, 2);
ADD_RANGE(R300_RB3D_ZSTENCIL_CNTL_0, 3);
ADD_RANGE(0x4F10, 4);
ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
ADD_RANGE(R300_RB3D_DEPTHPITCH, 1);
ADD_RANGE(0x4F28, 1);
ADD_RANGE(0x4F30, 2);
ADD_RANGE(0x4F44, 1);
ADD_RANGE(0x4F54, 1);
ADD_RANGE(R300_TX_FILTER_0, 16);
ADD_RANGE(R300_TX_UNK1_0, 16);
ADD_RANGE(R300_TX_SIZE_0, 16);
ADD_RANGE(R300_TX_FORMAT_0, 16);
/* Texture offset is dangerous and needs more checking */
ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
ADD_RANGE(R300_TX_UNK4_0, 16);
ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
/* Sporadic registers used as primitives are emitted */
ADD_RANGE(0x4f18, 1);
ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
}
static __inline__ int r300_check_range(unsigned reg, int count)
{
int i;
if(reg & ~0xffff)return -1;
for(i=(reg>>2);i<(reg>>2)+count;i++)
if(r300_reg_flags[i]!=MARK_SAFE)return 1;
return 0;
}
/* we expect offsets passed to the framebuffer to be either within video memory or
within AGP space */
static __inline__ int r300_check_offset(drm_radeon_private_t* dev_priv, u32 offset)
{
/* we realy want to check against end of video aperture
but this value is not being kept.
This code is correct for now (does the same thing as the
code that sets MC_FB_LOCATION) in radeon_cp.c */
if((offset>=dev_priv->fb_location) &&
(offset<dev_priv->gart_vm_start))return 0;
if((offset>=dev_priv->gart_vm_start) &&
(offset<dev_priv->gart_vm_start+dev_priv->gart_size))return 0;
return 1;
}
static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf,
drm_r300_cmd_header_t header)
{
int reg;
int sz;
int i;
int values[64];
RING_LOCALS;
sz = header.packet0.count;
reg = (header.packet0.reghi << 8) | header.packet0.reglo;
if((sz>64)||(sz<0)){
DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", reg, sz);
return DRM_ERR(EINVAL);
}
for(i=0;i<sz;i++){
values[i]=((int __user*)cmdbuf->buf)[i];
switch(r300_reg_flags[(reg>>2)+i]){
case MARK_SAFE:
break;
case MARK_CHECK_OFFSET:
if(r300_check_offset(dev_priv, (u32)values[i])){
DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n", reg, sz);
return DRM_ERR(EINVAL);
}
break;
default:
DRM_ERROR("Register %04x failed check as flag=%02x\n", reg+i*4, r300_reg_flags[(reg>>2)+i]);
return DRM_ERR(EINVAL);
}
}
BEGIN_RING(1+sz);
OUT_RING( CP_PACKET0( reg, sz-1 ) );
OUT_RING_TABLE( values, sz );
ADVANCE_RING();
cmdbuf->buf += sz*4;
cmdbuf->bufsz -= sz*4;
return 0;
}
/**
* Emits a packet0 setting arbitrary registers.
* Called by r300_do_cp_cmdbuf.
*
* Note that checks are performed on contents and addresses of the registers
*/
static __inline__ int r300_emit_packet0(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf,
drm_r300_cmd_header_t header)
{
int reg;
int sz;
RING_LOCALS;
sz = header.packet0.count;
reg = (header.packet0.reghi << 8) | header.packet0.reglo;
if (!sz)
return 0;
if (sz*4 > cmdbuf->bufsz)
return DRM_ERR(EINVAL);
if (reg+sz*4 >= 0x10000){
DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg, sz);
return DRM_ERR(EINVAL);
}
if(r300_check_range(reg, sz)){
/* go and check everything */
return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf, header);
}
/* the rest of the data is safe to emit, whatever the values the user passed */
BEGIN_RING(1+sz);
OUT_RING( CP_PACKET0( reg, sz-1 ) );
OUT_RING_TABLE( (int __user*)cmdbuf->buf, sz );
ADVANCE_RING();
cmdbuf->buf += sz*4;
cmdbuf->bufsz -= sz*4;
return 0;
}
/**
* Uploads user-supplied vertex program instructions or parameters onto
* the graphics card.
* Called by r300_do_cp_cmdbuf.
*/
static __inline__ int r300_emit_vpu(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf,
drm_r300_cmd_header_t header)
{
int sz;
int addr;
RING_LOCALS;
sz = header.vpu.count;
addr = (header.vpu.adrhi << 8) | header.vpu.adrlo;
if (!sz)
return 0;
if (sz*16 > cmdbuf->bufsz)
return DRM_ERR(EINVAL);
BEGIN_RING(5+sz*4);
/* Wait for VAP to come to senses.. */
/* there is no need to emit it multiple times, (only once before VAP is programmed,
but this optimization is for later */
OUT_RING_REG( R300_VAP_PVS_WAITIDLE, 0 );
OUT_RING_REG( R300_VAP_PVS_UPLOAD_ADDRESS, addr );
OUT_RING( CP_PACKET0_TABLE( R300_VAP_PVS_UPLOAD_DATA, sz*4 - 1 ) );
OUT_RING_TABLE( (int __user*)cmdbuf->buf, sz*4 );
ADVANCE_RING();
cmdbuf->buf += sz*16;
cmdbuf->bufsz -= sz*16;
return 0;
}
/**
* Emit a clear packet from userspace.
* Called by r300_emit_packet3.
*/
static __inline__ int r300_emit_clear(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf)
{
RING_LOCALS;
if (8*4 > cmdbuf->bufsz)
return DRM_ERR(EINVAL);
BEGIN_RING(10);
OUT_RING( CP_PACKET3( R200_3D_DRAW_IMMD_2, 8 ) );
OUT_RING( R300_PRIM_TYPE_POINT|R300_PRIM_WALK_RING|
(1<<R300_PRIM_NUM_VERTICES_SHIFT) );
OUT_RING_TABLE( (int __user*)cmdbuf->buf, 8 );
ADVANCE_RING();
cmdbuf->buf += 8*4;
cmdbuf->bufsz -= 8*4;
return 0;
}
static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf,
u32 header)
{
int count, i,k;
#define MAX_ARRAY_PACKET 64
u32 payload[MAX_ARRAY_PACKET];
u32 narrays;
RING_LOCALS;
count=(header>>16) & 0x3fff;
if((count+1)>MAX_ARRAY_PACKET){
DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", count);
return DRM_ERR(EINVAL);
}
memset(payload, 0, MAX_ARRAY_PACKET*4);
memcpy(payload, cmdbuf->buf+4, (count+1)*4);
/* carefully check packet contents */
narrays=payload[0];
k=0;
i=1;
while((k<narrays) && (i<(count+1))){
i++; /* skip attribute field */
if(r300_check_offset(dev_priv, payload[i])){
DRM_ERROR("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i);
return DRM_ERR(EINVAL);
}
k++;
i++;
if(k==narrays)break;
/* have one more to process, they come in pairs */
if(r300_check_offset(dev_priv, payload[i])){
DRM_ERROR("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i);
return DRM_ERR(EINVAL);
}
k++;
i++;
}
/* do the counts match what we expect ? */
if((k!=narrays) || (i!=(count+1))){
DRM_ERROR("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", k, i, narrays, count+1);
return DRM_ERR(EINVAL);
}
/* all clear, output packet */
BEGIN_RING(count+2);
OUT_RING(header);
OUT_RING_TABLE(payload, count+1);
ADVANCE_RING();
cmdbuf->buf += (count+2)*4;
cmdbuf->bufsz -= (count+2)*4;
return 0;
}
static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf)
{
u32 header;
int count;
RING_LOCALS;
if (4 > cmdbuf->bufsz)
return DRM_ERR(EINVAL);
/* Fixme !! This simply emits a packet without much checking.
We need to be smarter. */
/* obtain first word - actual packet3 header */
header = *(u32 __user*)cmdbuf->buf;
/* Is it packet 3 ? */
if( (header>>30)!=0x3 ) {
DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
return DRM_ERR(EINVAL);
}
count=(header>>16) & 0x3fff;
/* Check again now that we know how much data to expect */
if ((count+2)*4 > cmdbuf->bufsz){
DRM_ERROR("Expected packet3 of length %d but have only %d bytes left\n",
(count+2)*4, cmdbuf->bufsz);
return DRM_ERR(EINVAL);
}
/* Is it a packet type we know about ? */
switch(header & 0xff00){
case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */
case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */
case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */
case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */
case RADEON_WAIT_FOR_IDLE:
case RADEON_CP_NOP:
/* these packets are safe */
break;
default:
DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
return DRM_ERR(EINVAL);
}
BEGIN_RING(count+2);
OUT_RING(header);
OUT_RING_TABLE( (int __user*)(cmdbuf->buf+4), count+1);
ADVANCE_RING();
cmdbuf->buf += (count+2)*4;
cmdbuf->bufsz -= (count+2)*4;
return 0;
}
/**
* Emit a rendering packet3 from userspace.
* Called by r300_do_cp_cmdbuf.
*/
static __inline__ int r300_emit_packet3(drm_radeon_private_t* dev_priv,
drm_radeon_cmd_buffer_t* cmdbuf,
drm_r300_cmd_header_t header)
{
int n;
int ret;
char __user* orig_buf = cmdbuf->buf;
int orig_bufsz = cmdbuf->bufsz;
/* This is a do-while-loop so that we run the interior at least once,
* even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
*/
n = 0;
do {
if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) {
ret = r300_emit_cliprects(dev_priv, cmdbuf, n);
if (ret)
return ret;
cmdbuf->buf = orig_buf;
cmdbuf->bufsz = orig_bufsz;
}
switch(header.packet3.packet) {
case R300_CMD_PACKET3_CLEAR:
DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
ret = r300_emit_clear(dev_priv, cmdbuf);
if (ret) {
DRM_ERROR("r300_emit_clear failed\n");
return ret;
}
break;
case R300_CMD_PACKET3_RAW:
DRM_DEBUG("R300_CMD_PACKET3_RAW\n");
ret = r300_emit_raw_packet3(dev_priv, cmdbuf);
if (ret) {
DRM_ERROR("r300_emit_raw_packet3 failed\n");
return ret;
}
break;
default:
DRM_ERROR("bad packet3 type %i at %p\n",
header.packet3.packet,
cmdbuf->buf - sizeof(header));
return DRM_ERR(EINVAL);
}
n += R300_SIMULTANEOUS_CLIPRECTS;
} while(n < cmdbuf->nbox);
return 0;
}
/* Some of the R300 chips seem to be extremely touchy about the two registers
* that are configured in r300_pacify.
* Among the worst offenders seems to be the R300 ND (0x4E44): When userspace
* sends a command buffer that contains only state setting commands and a
* vertex program/parameter upload sequence, this will eventually lead to a
* lockup, unless the sequence is bracketed by calls to r300_pacify.
* So we should take great care to *always* call r300_pacify before
* *anything* 3D related, and again afterwards. This is what the
* call bracket in r300_do_cp_cmdbuf is for.
*/
/**
* Emit the sequence to pacify R300.
*/
static __inline__ void r300_pacify(drm_radeon_private_t* dev_priv)
{
RING_LOCALS;
BEGIN_RING(6);
OUT_RING( CP_PACKET0( R300_RB3D_DSTCACHE_CTLSTAT, 0 ) );
OUT_RING( 0xa );
OUT_RING( CP_PACKET0( 0x4f18, 0 ) );
OUT_RING( 0x3 );
OUT_RING( CP_PACKET3( RADEON_CP_NOP, 0 ) );
OUT_RING( 0x0 );
ADVANCE_RING();
}
/**
* Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
* The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
* be careful about how this function is called.
*/
static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
buf->pending = 1;
buf->used = 0;
}
/**
* Parses and validates a user-supplied command buffer and emits appropriate
* commands on the DMA ring buffer.
* Called by the ioctl handler function radeon_cp_cmdbuf.
*/
int r300_do_cp_cmdbuf(drm_device_t* dev,
DRMFILE filp,
drm_file_t* filp_priv,
drm_radeon_cmd_buffer_t* cmdbuf)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_device_dma_t *dma = dev->dma;
drm_buf_t *buf = NULL;
int emit_dispatch_age = 0;
int ret = 0;
DRM_DEBUG("\n");
/* See the comment above r300_emit_begin3d for why this call must be here,
* and what the cleanup gotos are for. */
r300_pacify(dev_priv);
if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
if (ret)
goto cleanup;
}
while(cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) {
int idx;
drm_r300_cmd_header_t header;
header.u = *(unsigned int *)cmdbuf->buf;
cmdbuf->buf += sizeof(header);
cmdbuf->bufsz -= sizeof(header);
switch(header.header.cmd_type) {
case R300_CMD_PACKET0:
DRM_DEBUG("R300_CMD_PACKET0\n");
ret = r300_emit_packet0(dev_priv, cmdbuf, header);
if (ret) {
DRM_ERROR("r300_emit_packet0 failed\n");
goto cleanup;
}
break;
case R300_CMD_VPU:
DRM_DEBUG("R300_CMD_VPU\n");
ret = r300_emit_vpu(dev_priv, cmdbuf, header);
if (ret) {
DRM_ERROR("r300_emit_vpu failed\n");
goto cleanup;
}
break;
case R300_CMD_PACKET3:
DRM_DEBUG("R300_CMD_PACKET3\n");
ret = r300_emit_packet3(dev_priv, cmdbuf, header);
if (ret) {
DRM_ERROR("r300_emit_packet3 failed\n");
goto cleanup;
}
break;
case R300_CMD_END3D:
DRM_DEBUG("R300_CMD_END3D\n");
/* TODO:
Ideally userspace driver should not need to issue this call,
i.e. the drm driver should issue it automatically and prevent
lockups.
In practice, we do not understand why this call is needed and what
it does (except for some vague guesses that it has to do with cache
coherence) and so the user space driver does it.
Once we are sure which uses prevent lockups the code could be moved
into the kernel and the userspace driver will not
need to use this command.
Note that issuing this command does not hurt anything
except, possibly, performance */
r300_pacify(dev_priv);
break;
case R300_CMD_CP_DELAY:
/* simple enough, we can do it here */
DRM_DEBUG("R300_CMD_CP_DELAY\n");
{
int i;
RING_LOCALS;
BEGIN_RING(header.delay.count);
for(i=0;i<header.delay.count;i++)
OUT_RING(RADEON_CP_PACKET2);
ADVANCE_RING();
}
break;
case R300_CMD_DMA_DISCARD:
DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
idx = header.dma.buf_idx;
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
idx, dma->buf_count - 1);
ret = DRM_ERR(EINVAL);
goto cleanup;
}
buf = dma->buflist[idx];
if (buf->filp != filp || buf->pending) {
DRM_ERROR("bad buffer %p %p %d\n",
buf->filp, filp, buf->pending);
ret = DRM_ERR(EINVAL);
goto cleanup;
}
emit_dispatch_age = 1;
r300_discard_buffer(dev, buf);
break;
case R300_CMD_WAIT:
/* simple enough, we can do it here */
DRM_DEBUG("R300_CMD_WAIT\n");
if(header.wait.flags==0)break; /* nothing to do */
{
RING_LOCALS;
BEGIN_RING(2);
OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );
OUT_RING( (header.wait.flags & 0xf)<<14 );
ADVANCE_RING();
}
break;
default:
DRM_ERROR("bad cmd_type %i at %p\n",
header.header.cmd_type,
cmdbuf->buf - sizeof(header));
ret = DRM_ERR(EINVAL);
goto cleanup;
}
}
DRM_DEBUG("END\n");
cleanup:
r300_pacify(dev_priv);
/* We emit the vertex buffer age here, outside the pacifier "brackets"
* for two reasons:
* (1) This may coalesce multiple age emissions into a single one and
* (2) more importantly, some chips lock up hard when scratch registers
* are written inside the pacifier bracket.
*/
if (emit_dispatch_age) {
RING_LOCALS;
/* Emit the vertex buffer age */
BEGIN_RING(2);
RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch);
ADVANCE_RING();
}
COMMIT_RING();
return ret;
}

1412
drivers/char/drm/r300_reg.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -32,6 +32,7 @@
#include "drm.h" #include "drm.h"
#include "radeon_drm.h" #include "radeon_drm.h"
#include "radeon_drv.h" #include "radeon_drv.h"
#include "r300_reg.h"
#define RADEON_FIFO_DEBUG 0 #define RADEON_FIFO_DEBUG 0
@ -1151,6 +1152,8 @@ static void radeon_cp_init_ring_buffer( drm_device_t *dev,
#if __OS_HAS_AGP #if __OS_HAS_AGP
if ( !dev_priv->is_pci ) { if ( !dev_priv->is_pci ) {
/* set RADEON_AGP_BASE here instead of relying on X from user space */
RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR, RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR,
dev_priv->ring_rptr->offset dev_priv->ring_rptr->offset
- dev->agp->base - dev->agp->base
@ -1407,6 +1410,7 @@ static int radeon_do_init_cp( drm_device_t *dev, drm_radeon_init_t *init )
radeon_do_cleanup_cp(dev); radeon_do_cleanup_cp(dev);
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
if(!dev->agp_buffer_map) { if(!dev->agp_buffer_map) {
DRM_ERROR("could not find dma buffer region!\n"); DRM_ERROR("could not find dma buffer region!\n");
@ -1625,6 +1629,9 @@ int radeon_cp_init( DRM_IOCTL_ARGS )
DRM_COPY_FROM_USER_IOCTL( init, (drm_radeon_init_t __user *)data, sizeof(init) ); DRM_COPY_FROM_USER_IOCTL( init, (drm_radeon_init_t __user *)data, sizeof(init) );
if(init.func == RADEON_INIT_R300_CP)
r300_init_reg_flags();
switch ( init.func ) { switch ( init.func ) {
case RADEON_INIT_CP: case RADEON_INIT_CP:
case RADEON_INIT_R200_CP: case RADEON_INIT_R200_CP:
@ -2039,15 +2046,43 @@ int radeon_driver_preinit(struct drm_device *dev, unsigned long flags)
case CHIP_RV200: case CHIP_RV200:
case CHIP_R200: case CHIP_R200:
case CHIP_R300: case CHIP_R300:
case CHIP_R420:
dev_priv->flags |= CHIP_HAS_HIERZ; dev_priv->flags |= CHIP_HAS_HIERZ;
break; break;
default: default:
/* all other chips have no hierarchical z buffer */ /* all other chips have no hierarchical z buffer */
break; break;
} }
if (drm_device_is_agp(dev))
dev_priv->flags |= CHIP_IS_AGP;
DRM_DEBUG("%s card detected\n",
((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : "PCI"));
return ret; return ret;
} }
int radeon_presetup(struct drm_device *dev)
{
int ret;
drm_local_map_t *map;
drm_radeon_private_t *dev_priv = dev->dev_private;
ret = drm_addmap(dev, drm_get_resource_start(dev, 2),
drm_get_resource_len(dev, 2), _DRM_REGISTERS,
_DRM_READ_ONLY, &dev_priv->mmio);
if (ret != 0)
return ret;
ret = drm_addmap(dev, drm_get_resource_start(dev, 0),
drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER,
_DRM_WRITE_COMBINING, &map);
if (ret != 0)
return ret;
return 0;
}
int radeon_driver_postcleanup(struct drm_device *dev) int radeon_driver_postcleanup(struct drm_device *dev)
{ {
drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_private_t *dev_priv = dev->dev_private;

View File

@ -195,6 +195,52 @@ typedef union {
#define RADEON_WAIT_2D 0x1 #define RADEON_WAIT_2D 0x1
#define RADEON_WAIT_3D 0x2 #define RADEON_WAIT_3D 0x2
/* Allowed parameters for R300_CMD_PACKET3
*/
#define R300_CMD_PACKET3_CLEAR 0
#define R300_CMD_PACKET3_RAW 1
/* Commands understood by cmd_buffer ioctl for R300.
* The interface has not been stabilized, so some of these may be removed
* and eventually reordered before stabilization.
*/
#define R300_CMD_PACKET0 1
#define R300_CMD_VPU 2 /* emit vertex program upload */
#define R300_CMD_PACKET3 3 /* emit a packet3 */
#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */
#define R300_CMD_CP_DELAY 5
#define R300_CMD_DMA_DISCARD 6
#define R300_CMD_WAIT 7
# define R300_WAIT_2D 0x1
# define R300_WAIT_3D 0x2
# define R300_WAIT_2D_CLEAN 0x3
# define R300_WAIT_3D_CLEAN 0x4
typedef union {
unsigned int u;
struct {
unsigned char cmd_type, pad0, pad1, pad2;
} header;
struct {
unsigned char cmd_type, count, reglo, reghi;
} packet0;
struct {
unsigned char cmd_type, count, adrlo, adrhi;
} vpu;
struct {
unsigned char cmd_type, packet, pad0, pad1;
} packet3;
struct {
unsigned char cmd_type, packet;
unsigned short count; /* amount of packet2 to emit */
} delay;
struct {
unsigned char cmd_type, buf_idx, pad0, pad1;
} dma;
struct {
unsigned char cmd_type, flags, pad0, pad1;
} wait;
} drm_r300_cmd_header_t;
#define RADEON_FRONT 0x1 #define RADEON_FRONT 0x1
#define RADEON_BACK 0x2 #define RADEON_BACK 0x2

View File

@ -76,6 +76,7 @@ static struct drm_driver driver = {
.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
.dev_priv_size = sizeof(drm_radeon_buf_priv_t), .dev_priv_size = sizeof(drm_radeon_buf_priv_t),
.preinit = radeon_driver_preinit, .preinit = radeon_driver_preinit,
.presetup = radeon_presetup,
.postcleanup = radeon_driver_postcleanup, .postcleanup = radeon_driver_postcleanup,
.prerelease = radeon_driver_prerelease, .prerelease = radeon_driver_prerelease,
.pretakedown = radeon_driver_pretakedown, .pretakedown = radeon_driver_pretakedown,

View File

@ -82,9 +82,10 @@
* - Add support for r100 cube maps * - Add support for r100 cube maps
* 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear
* texture filtering on r200 * texture filtering on r200
* 1.17- Add initial support for R300 (3D).
*/ */
#define DRIVER_MAJOR 1 #define DRIVER_MAJOR 1
#define DRIVER_MINOR 16 #define DRIVER_MINOR 17
#define DRIVER_PATCHLEVEL 0 #define DRIVER_PATCHLEVEL 0
#define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 ) #define GET_RING_HEAD(dev_priv) DRM_READ32( (dev_priv)->ring_rptr, 0 )
@ -106,7 +107,9 @@ enum radeon_family {
CHIP_RV280, CHIP_RV280,
CHIP_R300, CHIP_R300,
CHIP_RS300, CHIP_RS300,
CHIP_R350,
CHIP_RV350, CHIP_RV350,
CHIP_R420,
CHIP_LAST, CHIP_LAST,
}; };
@ -290,6 +293,7 @@ extern int radeon_wait_ring( drm_radeon_private_t *dev_priv, int n );
extern int radeon_do_cp_idle( drm_radeon_private_t *dev_priv ); extern int radeon_do_cp_idle( drm_radeon_private_t *dev_priv );
extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags); extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags);
extern int radeon_presetup(struct drm_device *dev);
extern int radeon_driver_postcleanup(struct drm_device *dev); extern int radeon_driver_postcleanup(struct drm_device *dev);
extern int radeon_mem_alloc( DRM_IOCTL_ARGS ); extern int radeon_mem_alloc( DRM_IOCTL_ARGS );
@ -320,6 +324,14 @@ extern int radeon_postcleanup( struct drm_device *dev );
extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg); unsigned long arg);
/* r300_cmdbuf.c */
extern void r300_init_reg_flags(void);
extern int r300_do_cp_cmdbuf(drm_device_t* dev, DRMFILE filp,
drm_file_t* filp_priv,
drm_radeon_cmd_buffer_t* cmdbuf);
/* Flags for stats.boxes /* Flags for stats.boxes
*/ */
#define RADEON_BOX_DMA_IDLE 0x1 #define RADEON_BOX_DMA_IDLE 0x1
@ -357,6 +369,11 @@ extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
#define RADEON_CRTC2_OFFSET 0x0324 #define RADEON_CRTC2_OFFSET 0x0324
#define RADEON_CRTC2_OFFSET_CNTL 0x0328 #define RADEON_CRTC2_OFFSET_CNTL 0x0328
#define RADEON_MPP_TB_CONFIG 0x01c0
#define RADEON_MEM_CNTL 0x0140
#define RADEON_MEM_SDRAM_MODE_REG 0x0158
#define RADEON_AGP_BASE 0x0170
#define RADEON_RB3D_COLOROFFSET 0x1c40 #define RADEON_RB3D_COLOROFFSET 0x1c40
#define RADEON_RB3D_COLORPITCH 0x1c48 #define RADEON_RB3D_COLORPITCH 0x1c48
@ -651,16 +668,27 @@ extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
#define RADEON_CP_PACKET1 0x40000000 #define RADEON_CP_PACKET1 0x40000000
#define RADEON_CP_PACKET2 0x80000000 #define RADEON_CP_PACKET2 0x80000000
#define RADEON_CP_PACKET3 0xC0000000 #define RADEON_CP_PACKET3 0xC0000000
# define RADEON_CP_NOP 0x00001000
# define RADEON_CP_NEXT_CHAR 0x00001900
# define RADEON_CP_PLY_NEXTSCAN 0x00001D00
# define RADEON_CP_SET_SCISSORS 0x00001E00
/* GEN_INDX_PRIM is unsupported starting with R300 */
# define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300 # define RADEON_3D_RNDR_GEN_INDX_PRIM 0x00002300
# define RADEON_WAIT_FOR_IDLE 0x00002600 # define RADEON_WAIT_FOR_IDLE 0x00002600
# define RADEON_3D_DRAW_VBUF 0x00002800 # define RADEON_3D_DRAW_VBUF 0x00002800
# define RADEON_3D_DRAW_IMMD 0x00002900 # define RADEON_3D_DRAW_IMMD 0x00002900
# define RADEON_3D_DRAW_INDX 0x00002A00 # define RADEON_3D_DRAW_INDX 0x00002A00
# define RADEON_CP_LOAD_PALETTE 0x00002C00
# define RADEON_3D_LOAD_VBPNTR 0x00002F00 # define RADEON_3D_LOAD_VBPNTR 0x00002F00
# define RADEON_MPEG_IDCT_MACROBLOCK 0x00003000 # define RADEON_MPEG_IDCT_MACROBLOCK 0x00003000
# define RADEON_MPEG_IDCT_MACROBLOCK_REV 0x00003100 # define RADEON_MPEG_IDCT_MACROBLOCK_REV 0x00003100
# define RADEON_3D_CLEAR_ZMASK 0x00003200 # define RADEON_3D_CLEAR_ZMASK 0x00003200
# define RADEON_CP_INDX_BUFFER 0x00003300
# define RADEON_CP_3D_DRAW_VBUF_2 0x00003400
# define RADEON_CP_3D_DRAW_IMMD_2 0x00003500
# define RADEON_CP_3D_DRAW_INDX_2 0x00003600
# define RADEON_3D_CLEAR_HIZ 0x00003700 # define RADEON_3D_CLEAR_HIZ 0x00003700
# define RADEON_CP_3D_CLEAR_CMASK 0x00003802
# define RADEON_CNTL_HOSTDATA_BLT 0x00009400 # define RADEON_CNTL_HOSTDATA_BLT 0x00009400
# define RADEON_CNTL_PAINT_MULTI 0x00009A00 # define RADEON_CNTL_PAINT_MULTI 0x00009A00
# define RADEON_CNTL_BITBLT_MULTI 0x00009B00 # define RADEON_CNTL_BITBLT_MULTI 0x00009B00

View File

@ -1493,7 +1493,7 @@ static void radeon_cp_dispatch_indices( drm_device_t *dev,
} }
#define RADEON_MAX_TEXTURE_SIZE (RADEON_BUFFER_SIZE - 8 * sizeof(u32)) #define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE
static int radeon_cp_dispatch_texture( DRMFILE filp, static int radeon_cp_dispatch_texture( DRMFILE filp,
drm_device_t *dev, drm_device_t *dev,
@ -1506,10 +1506,11 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
u32 format; u32 format;
u32 *buffer; u32 *buffer;
const u8 __user *data; const u8 __user *data;
int size, dwords, tex_width, blit_width; int size, dwords, tex_width, blit_width, spitch;
u32 height; u32 height;
int i; int i;
u32 texpitch, microtile; u32 texpitch, microtile;
u32 offset;
RING_LOCALS; RING_LOCALS;
DRM_GET_PRIV_WITH_RETURN( filp_priv, filp ); DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
@ -1530,17 +1531,6 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
RADEON_WAIT_UNTIL_IDLE(); RADEON_WAIT_UNTIL_IDLE();
ADVANCE_RING(); ADVANCE_RING();
#ifdef __BIG_ENDIAN
/* The Mesa texture functions provide the data in little endian as the
* chip wants it, but we need to compensate for the fact that the CP
* ring gets byte-swapped
*/
BEGIN_RING( 2 );
OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );
ADVANCE_RING();
#endif
/* The compiler won't optimize away a division by a variable, /* The compiler won't optimize away a division by a variable,
* even if the only legal values are powers of two. Thus, we'll * even if the only legal values are powers of two. Thus, we'll
* use a shift instead. * use a shift instead.
@ -1572,6 +1562,10 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
DRM_ERROR( "invalid texture format %d\n", tex->format ); DRM_ERROR( "invalid texture format %d\n", tex->format );
return DRM_ERR(EINVAL); return DRM_ERR(EINVAL);
} }
spitch = blit_width >> 6;
if (spitch == 0 && image->height > 1)
return DRM_ERR(EINVAL);
texpitch = tex->pitch; texpitch = tex->pitch;
if ((texpitch << 22) & RADEON_DST_TILE_MICRO) { if ((texpitch << 22) & RADEON_DST_TILE_MICRO) {
microtile = 1; microtile = 1;
@ -1624,25 +1618,6 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
*/ */
buffer = (u32*)((char*)dev->agp_buffer_map->handle + buf->offset); buffer = (u32*)((char*)dev->agp_buffer_map->handle + buf->offset);
dwords = size / 4; dwords = size / 4;
buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
RADEON_GMC_BRUSH_NONE |
(format << 8) |
RADEON_GMC_SRC_DATATYPE_COLOR |
RADEON_ROP3_S |
RADEON_DP_SRC_SOURCE_HOST_DATA |
RADEON_GMC_CLR_CMP_CNTL_DIS |
RADEON_GMC_WR_MSK_DIS);
buffer[2] = (texpitch << 22) | (tex->offset >> 10);
buffer[3] = 0xffffffff;
buffer[4] = 0xffffffff;
buffer[5] = (image->y << 16) | image->x;
buffer[6] = (height << 16) | image->width;
buffer[7] = dwords;
buffer += 8;
if (microtile) { if (microtile) {
/* texture micro tiling in use, minimum texture width is thus 16 bytes. /* texture micro tiling in use, minimum texture width is thus 16 bytes.
@ -1750,9 +1725,28 @@ static int radeon_cp_dispatch_texture( DRMFILE filp,
} }
buf->filp = filp; buf->filp = filp;
buf->used = (dwords + 8) * sizeof(u32); buf->used = size;
radeon_cp_dispatch_indirect( dev, buf, 0, buf->used ); offset = dev_priv->gart_buffers_offset + buf->offset;
radeon_cp_discard_buffer( dev, buf ); BEGIN_RING(9);
OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
RADEON_GMC_DST_PITCH_OFFSET_CNTL |
RADEON_GMC_BRUSH_NONE |
(format << 8) |
RADEON_GMC_SRC_DATATYPE_COLOR |
RADEON_ROP3_S |
RADEON_DP_SRC_SOURCE_MEMORY |
RADEON_GMC_CLR_CMP_CNTL_DIS |
RADEON_GMC_WR_MSK_DIS );
OUT_RING((spitch << 22) | (offset >> 10));
OUT_RING((texpitch << 22) | (tex->offset >> 10));
OUT_RING(0);
OUT_RING((image->x << 16) | image->y);
OUT_RING((image->width << 16) | height);
RADEON_WAIT_UNTIL_2D_IDLE();
ADVANCE_RING();
radeon_cp_discard_buffer(dev, buf);
/* Update the input parameters for next time */ /* Update the input parameters for next time */
image->y += height; image->y += height;
@ -2797,6 +2791,17 @@ static int radeon_cp_cmdbuf( DRM_IOCTL_ARGS )
orig_nbox = cmdbuf.nbox; orig_nbox = cmdbuf.nbox;
if(dev_priv->microcode_version == UCODE_R300) {
int temp;
temp=r300_do_cp_cmdbuf(dev, filp, filp_priv, &cmdbuf);
if (orig_bufsz != 0)
drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
return temp;
}
/* microcode_version != r300 */
while ( cmdbuf.bufsz >= sizeof(header) ) { while ( cmdbuf.bufsz >= sizeof(header) ) {
header.i = *(int *)cmdbuf.buf; header.i = *(int *)cmdbuf.buf;

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,209 @@
/* savage_drm.h -- Public header for the savage driver
*
* Copyright 2004 Felix Kuehling
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __SAVAGE_DRM_H__
#define __SAVAGE_DRM_H__
#ifndef __SAVAGE_SAREA_DEFINES__
#define __SAVAGE_SAREA_DEFINES__
/* 2 heaps (1 for card, 1 for agp), each divided into upto 128
* regions, subject to a minimum region size of (1<<16) == 64k.
*
* Clients may subdivide regions internally, but when sharing between
* clients, the region size is the minimum granularity.
*/
#define SAVAGE_CARD_HEAP 0
#define SAVAGE_AGP_HEAP 1
#define SAVAGE_NR_TEX_HEAPS 2
#define SAVAGE_NR_TEX_REGIONS 16
#define SAVAGE_LOG_MIN_TEX_REGION_SIZE 16
#endif /* __SAVAGE_SAREA_DEFINES__ */
typedef struct _drm_savage_sarea {
/* LRU lists for texture memory in agp space and on the card.
*/
drm_tex_region_t texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS+1];
unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
/* Mechanism to validate card state.
*/
int ctxOwner;
} drm_savage_sarea_t, *drm_savage_sarea_ptr;
/* Savage-specific ioctls
*/
#define DRM_SAVAGE_BCI_INIT 0x00
#define DRM_SAVAGE_BCI_CMDBUF 0x01
#define DRM_SAVAGE_BCI_EVENT_EMIT 0x02
#define DRM_SAVAGE_BCI_EVENT_WAIT 0x03
#define DRM_IOCTL_SAVAGE_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
#define DRM_IOCTL_SAVAGE_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
#define DRM_IOCTL_SAVAGE_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
#define DRM_IOCTL_SAVAGE_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
#define SAVAGE_DMA_PCI 1
#define SAVAGE_DMA_AGP 3
typedef struct drm_savage_init {
enum {
SAVAGE_INIT_BCI = 1,
SAVAGE_CLEANUP_BCI = 2
} func;
unsigned int sarea_priv_offset;
/* some parameters */
unsigned int cob_size;
unsigned int bci_threshold_lo, bci_threshold_hi;
unsigned int dma_type;
/* frame buffer layout */
unsigned int fb_bpp;
unsigned int front_offset, front_pitch;
unsigned int back_offset, back_pitch;
unsigned int depth_bpp;
unsigned int depth_offset, depth_pitch;
/* local textures */
unsigned int texture_offset;
unsigned int texture_size;
/* physical locations of non-permanent maps */
unsigned long status_offset;
unsigned long buffers_offset;
unsigned long agp_textures_offset;
unsigned long cmd_dma_offset;
} drm_savage_init_t;
typedef union drm_savage_cmd_header drm_savage_cmd_header_t;
typedef struct drm_savage_cmdbuf {
/* command buffer in client's address space */
drm_savage_cmd_header_t __user *cmd_addr;
unsigned int size; /* size of the command buffer in 64bit units */
unsigned int dma_idx; /* DMA buffer index to use */
int discard; /* discard DMA buffer when done */
/* vertex buffer in client's address space */
unsigned int __user *vb_addr;
unsigned int vb_size; /* size of client vertex buffer in bytes */
unsigned int vb_stride; /* stride of vertices in 32bit words */
/* boxes in client's address space */
drm_clip_rect_t __user *box_addr;
unsigned int nbox; /* number of clipping boxes */
} drm_savage_cmdbuf_t;
#define SAVAGE_WAIT_2D 0x1 /* wait for 2D idle before updating event tag */
#define SAVAGE_WAIT_3D 0x2 /* wait for 3D idle before updating event tag */
#define SAVAGE_WAIT_IRQ 0x4 /* emit or wait for IRQ, not implemented yet */
typedef struct drm_savage_event {
unsigned int count;
unsigned int flags;
} drm_savage_event_emit_t, drm_savage_event_wait_t;
/* Commands for the cmdbuf ioctl
*/
#define SAVAGE_CMD_STATE 0 /* a range of state registers */
#define SAVAGE_CMD_DMA_PRIM 1 /* vertices from DMA buffer */
#define SAVAGE_CMD_VB_PRIM 2 /* vertices from client vertex buffer */
#define SAVAGE_CMD_DMA_IDX 3 /* indexed vertices from DMA buffer */
#define SAVAGE_CMD_VB_IDX 4 /* indexed vertices client vertex buffer */
#define SAVAGE_CMD_CLEAR 5 /* clear buffers */
#define SAVAGE_CMD_SWAP 6 /* swap buffers */
/* Primitive types
*/
#define SAVAGE_PRIM_TRILIST 0 /* triangle list */
#define SAVAGE_PRIM_TRISTRIP 1 /* triangle strip */
#define SAVAGE_PRIM_TRIFAN 2 /* triangle fan */
#define SAVAGE_PRIM_TRILIST_201 3 /* reorder verts for correct flat
* shading on s3d */
/* Skip flags (vertex format)
*/
#define SAVAGE_SKIP_Z 0x01
#define SAVAGE_SKIP_W 0x02
#define SAVAGE_SKIP_C0 0x04
#define SAVAGE_SKIP_C1 0x08
#define SAVAGE_SKIP_S0 0x10
#define SAVAGE_SKIP_T0 0x20
#define SAVAGE_SKIP_ST0 0x30
#define SAVAGE_SKIP_S1 0x40
#define SAVAGE_SKIP_T1 0x80
#define SAVAGE_SKIP_ST1 0xc0
#define SAVAGE_SKIP_ALL_S3D 0x3f
#define SAVAGE_SKIP_ALL_S4 0xff
/* Buffer names for clear command
*/
#define SAVAGE_FRONT 0x1
#define SAVAGE_BACK 0x2
#define SAVAGE_DEPTH 0x4
/* 64-bit command header
*/
union drm_savage_cmd_header {
struct {
unsigned char cmd; /* command */
unsigned char pad0;
unsigned short pad1;
unsigned short pad2;
unsigned short pad3;
} cmd; /* generic */
struct {
unsigned char cmd;
unsigned char global; /* need idle engine? */
unsigned short count; /* number of consecutive registers */
unsigned short start; /* first register */
unsigned short pad3;
} state; /* SAVAGE_CMD_STATE */
struct {
unsigned char cmd;
unsigned char prim; /* primitive type */
unsigned short skip; /* vertex format (skip flags) */
unsigned short count; /* number of vertices */
unsigned short start; /* first vertex in DMA/vertex buffer */
} prim; /* SAVAGE_CMD_DMA_PRIM, SAVAGE_CMD_VB_PRIM */
struct {
unsigned char cmd;
unsigned char prim;
unsigned short skip;
unsigned short count; /* number of indices that follow */
unsigned short pad3;
} idx; /* SAVAGE_CMD_DMA_IDX, SAVAGE_CMD_VB_IDX */
struct {
unsigned char cmd;
unsigned char pad0;
unsigned short pad1;
unsigned int flags;
} clear0; /* SAVAGE_CMD_CLEAR */
struct {
unsigned int mask;
unsigned int value;
} clear1; /* SAVAGE_CMD_CLEAR data */
};
#endif

View File

@ -0,0 +1,112 @@
/* savage_drv.c -- Savage driver for Linux
*
* Copyright 2004 Felix Kuehling
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/config.h>
#include "drmP.h"
#include "savage_drm.h"
#include "savage_drv.h"
#include "drm_pciids.h"
static int postinit( struct drm_device *dev, unsigned long flags )
{
DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d: %s\n",
DRIVER_NAME,
DRIVER_MAJOR,
DRIVER_MINOR,
DRIVER_PATCHLEVEL,
DRIVER_DATE,
dev->primary.minor,
pci_pretty_name(dev->pdev)
);
return 0;
}
static int version( drm_version_t *version )
{
int len;
version->version_major = DRIVER_MAJOR;
version->version_minor = DRIVER_MINOR;
version->version_patchlevel = DRIVER_PATCHLEVEL;
DRM_COPY( version->name, DRIVER_NAME );
DRM_COPY( version->date, DRIVER_DATE );
DRM_COPY( version->desc, DRIVER_DESC );
return 0;
}
static struct pci_device_id pciidlist[] = {
savage_PCI_IDS
};
extern drm_ioctl_desc_t savage_ioctls[];
extern int savage_max_ioctl;
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR |
DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
.dev_priv_size = sizeof(drm_savage_buf_priv_t),
.preinit = savage_preinit,
.postinit = postinit,
.postcleanup = savage_postcleanup,
.reclaim_buffers = savage_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
.version = version,
.ioctls = savage_ioctls,
.dma_ioctl = savage_bci_buffers,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
},
.pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
}
};
static int __init savage_init(void)
{
driver.num_ioctls = savage_max_ioctl;
return drm_init(&driver);
}
static void __exit savage_exit(void)
{
drm_exit(&driver);
}
module_init(savage_init);
module_exit(savage_exit);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
MODULE_LICENSE("GPL and additional rights");

View File

@ -0,0 +1,579 @@
/* savage_drv.h -- Private header for the savage driver
*
* Copyright 2004 Felix Kuehling
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __SAVAGE_DRV_H__
#define __SAVAGE_DRV_H__
#define DRIVER_AUTHOR "Felix Kuehling"
#define DRIVER_NAME "savage"
#define DRIVER_DESC "Savage3D/MX/IX, Savage4, SuperSavage, Twister, ProSavage[DDR]"
#define DRIVER_DATE "20050313"
#define DRIVER_MAJOR 2
#define DRIVER_MINOR 4
#define DRIVER_PATCHLEVEL 1
/* Interface history:
*
* 1.x The DRM driver from the VIA/S3 code drop, basically a dummy
* 2.0 The first real DRM
* 2.1 Scissors registers managed by the DRM, 3D operations clipped by
* cliprects of the cmdbuf ioctl
* 2.2 Implemented SAVAGE_CMD_DMA_IDX and SAVAGE_CMD_VB_IDX
* 2.3 Event counters used by BCI_EVENT_EMIT/WAIT ioctls are now 32 bits
* wide and thus very long lived (unlikely to ever wrap). The size
* in the struct was 32 bits before, but only 16 bits were used
* 2.4 Implemented command DMA. Now drm_savage_init_t.cmd_dma_offset is
* actually used
*/
typedef struct drm_savage_age {
uint16_t event;
unsigned int wrap;
} drm_savage_age_t;
typedef struct drm_savage_buf_priv {
struct drm_savage_buf_priv *next;
struct drm_savage_buf_priv *prev;
drm_savage_age_t age;
drm_buf_t *buf;
} drm_savage_buf_priv_t;
typedef struct drm_savage_dma_page {
drm_savage_age_t age;
unsigned int used, flushed;
} drm_savage_dma_page_t;
#define SAVAGE_DMA_PAGE_SIZE 1024 /* in dwords */
/* Fake DMA buffer size in bytes. 4 pages. Allows a maximum command
* size of 16kbytes or 4k entries. Minimum requirement would be
* 10kbytes for 255 40-byte vertices in one drawing command. */
#define SAVAGE_FAKE_DMA_SIZE (SAVAGE_DMA_PAGE_SIZE*4*4)
/* interesting bits of hardware state that are saved in dev_priv */
typedef union {
struct drm_savage_common_state {
uint32_t vbaddr;
} common;
struct {
unsigned char pad[sizeof(struct drm_savage_common_state)];
uint32_t texctrl, texaddr;
uint32_t scstart, new_scstart;
uint32_t scend, new_scend;
} s3d;
struct {
unsigned char pad[sizeof(struct drm_savage_common_state)];
uint32_t texdescr, texaddr0, texaddr1;
uint32_t drawctrl0, new_drawctrl0;
uint32_t drawctrl1, new_drawctrl1;
} s4;
} drm_savage_state_t;
/* these chip tags should match the ones in the 2D driver in savage_regs.h. */
enum savage_family {
S3_UNKNOWN = 0,
S3_SAVAGE3D,
S3_SAVAGE_MX,
S3_SAVAGE4,
S3_PROSAVAGE,
S3_TWISTER,
S3_PROSAVAGEDDR,
S3_SUPERSAVAGE,
S3_SAVAGE2000,
S3_LAST
};
#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
#define S3_SAVAGE4_SERIES(chip) ((chip==S3_SAVAGE4) \
|| (chip==S3_PROSAVAGE) \
|| (chip==S3_TWISTER) \
|| (chip==S3_PROSAVAGEDDR))
#define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE))
#define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000))
#define S3_MOBILE_TWISTER_SERIES(chip) ((chip==S3_TWISTER) \
||(chip==S3_PROSAVAGEDDR))
/* flags */
#define SAVAGE_IS_AGP 1
typedef struct drm_savage_private {
drm_savage_sarea_t *sarea_priv;
drm_savage_buf_priv_t head, tail;
/* who am I? */
enum savage_family chipset;
unsigned int cob_size;
unsigned int bci_threshold_lo, bci_threshold_hi;
unsigned int dma_type;
/* frame buffer layout */
unsigned int fb_bpp;
unsigned int front_offset, front_pitch;
unsigned int back_offset, back_pitch;
unsigned int depth_bpp;
unsigned int depth_offset, depth_pitch;
/* bitmap descriptors for swap and clear */
unsigned int front_bd, back_bd, depth_bd;
/* local textures */
unsigned int texture_offset;
unsigned int texture_size;
/* memory regions in physical memory */
drm_local_map_t *sarea;
drm_local_map_t *mmio;
drm_local_map_t *fb;
drm_local_map_t *aperture;
drm_local_map_t *status;
drm_local_map_t *agp_textures;
drm_local_map_t *cmd_dma;
drm_local_map_t fake_dma;
struct {
int handle;
unsigned long base, size;
} mtrr[3];
/* BCI and status-related stuff */
volatile uint32_t *status_ptr, *bci_ptr;
uint32_t status_used_mask;
uint16_t event_counter;
unsigned int event_wrap;
/* Savage4 command DMA */
drm_savage_dma_page_t *dma_pages;
unsigned int nr_dma_pages, first_dma_page, current_dma_page;
drm_savage_age_t last_dma_age;
/* saved hw state for global/local check on S3D */
uint32_t hw_draw_ctrl, hw_zbuf_ctrl;
/* and for scissors (global, so don't emit if not changed) */
uint32_t hw_scissors_start, hw_scissors_end;
drm_savage_state_t state;
/* after emitting a wait cmd Savage3D needs 63 nops before next DMA */
unsigned int waiting;
/* config/hardware-dependent function pointers */
int (*wait_fifo)(struct drm_savage_private *dev_priv, unsigned int n);
int (*wait_evnt)(struct drm_savage_private *dev_priv, uint16_t e);
/* Err, there is a macro wait_event in include/linux/wait.h.
* Avoid unwanted macro expansion. */
void (*emit_clip_rect)(struct drm_savage_private *dev_priv,
drm_clip_rect_t *pbox);
void (*dma_flush)(struct drm_savage_private *dev_priv);
} drm_savage_private_t;
/* ioctls */
extern int savage_bci_cmdbuf(DRM_IOCTL_ARGS);
extern int savage_bci_buffers(DRM_IOCTL_ARGS);
/* BCI functions */
extern uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
unsigned int flags);
extern void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf);
extern void savage_dma_reset(drm_savage_private_t *dev_priv);
extern void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page);
extern uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv,
unsigned int n);
extern int savage_preinit(drm_device_t *dev, unsigned long chipset);
extern int savage_postcleanup(drm_device_t *dev);
extern int savage_do_cleanup_bci(drm_device_t *dev);
extern void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp);
/* state functions */
extern void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
drm_clip_rect_t *pbox);
extern void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
drm_clip_rect_t *pbox);
#define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */
#define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */
#define SAVAGE_MMIO_SIZE 0x00080000 /* 512kB */
#define SAVAGE_APERTURE_OFFSET 0x02000000 /* 32MB */
#define SAVAGE_APERTURE_SIZE 0x05000000 /* 5 tiled surfaces, 16MB each */
#define SAVAGE_BCI_OFFSET 0x00010000 /* offset of the BCI region
* inside the MMIO region */
#define SAVAGE_BCI_FIFO_SIZE 32 /* number of entries in on-chip
* BCI FIFO */
/*
* MMIO registers
*/
#define SAVAGE_STATUS_WORD0 0x48C00
#define SAVAGE_STATUS_WORD1 0x48C04
#define SAVAGE_ALT_STATUS_WORD0 0x48C60
#define SAVAGE_FIFO_USED_MASK_S3D 0x0001ffff
#define SAVAGE_FIFO_USED_MASK_S4 0x001fffff
/* Copied from savage_bci.h in the 2D driver with some renaming. */
/* Bitmap descriptors */
#define SAVAGE_BD_STRIDE_SHIFT 0
#define SAVAGE_BD_BPP_SHIFT 16
#define SAVAGE_BD_TILE_SHIFT 24
#define SAVAGE_BD_BW_DISABLE (1<<28)
/* common: */
#define SAVAGE_BD_TILE_LINEAR 0
/* savage4, MX, IX, 3D */
#define SAVAGE_BD_TILE_16BPP 2
#define SAVAGE_BD_TILE_32BPP 3
/* twister, prosavage, DDR, supersavage, 2000 */
#define SAVAGE_BD_TILE_DEST 1
#define SAVAGE_BD_TILE_TEXTURE 2
/* GBD - BCI enable */
/* savage4, MX, IX, 3D */
#define SAVAGE_GBD_BCI_ENABLE 8
/* twister, prosavage, DDR, supersavage, 2000 */
#define SAVAGE_GBD_BCI_ENABLE_TWISTER 0
#define SAVAGE_GBD_BIG_ENDIAN 4
#define SAVAGE_GBD_LITTLE_ENDIAN 0
#define SAVAGE_GBD_64 1
/* Global Bitmap Descriptor */
#define SAVAGE_BCI_GLB_BD_LOW 0x8168
#define SAVAGE_BCI_GLB_BD_HIGH 0x816C
/*
* BCI registers
*/
/* Savage4/Twister/ProSavage 3D registers */
#define SAVAGE_DRAWLOCALCTRL_S4 0x1e
#define SAVAGE_TEXPALADDR_S4 0x1f
#define SAVAGE_TEXCTRL0_S4 0x20
#define SAVAGE_TEXCTRL1_S4 0x21
#define SAVAGE_TEXADDR0_S4 0x22
#define SAVAGE_TEXADDR1_S4 0x23
#define SAVAGE_TEXBLEND0_S4 0x24
#define SAVAGE_TEXBLEND1_S4 0x25
#define SAVAGE_TEXXPRCLR_S4 0x26 /* never used */
#define SAVAGE_TEXDESCR_S4 0x27
#define SAVAGE_FOGTABLE_S4 0x28
#define SAVAGE_FOGCTRL_S4 0x30
#define SAVAGE_STENCILCTRL_S4 0x31
#define SAVAGE_ZBUFCTRL_S4 0x32
#define SAVAGE_ZBUFOFF_S4 0x33
#define SAVAGE_DESTCTRL_S4 0x34
#define SAVAGE_DRAWCTRL0_S4 0x35
#define SAVAGE_DRAWCTRL1_S4 0x36
#define SAVAGE_ZWATERMARK_S4 0x37
#define SAVAGE_DESTTEXRWWATERMARK_S4 0x38
#define SAVAGE_TEXBLENDCOLOR_S4 0x39
/* Savage3D/MX/IX 3D registers */
#define SAVAGE_TEXPALADDR_S3D 0x18
#define SAVAGE_TEXXPRCLR_S3D 0x19 /* never used */
#define SAVAGE_TEXADDR_S3D 0x1A
#define SAVAGE_TEXDESCR_S3D 0x1B
#define SAVAGE_TEXCTRL_S3D 0x1C
#define SAVAGE_FOGTABLE_S3D 0x20
#define SAVAGE_FOGCTRL_S3D 0x30
#define SAVAGE_DRAWCTRL_S3D 0x31
#define SAVAGE_ZBUFCTRL_S3D 0x32
#define SAVAGE_ZBUFOFF_S3D 0x33
#define SAVAGE_DESTCTRL_S3D 0x34
#define SAVAGE_SCSTART_S3D 0x35
#define SAVAGE_SCEND_S3D 0x36
#define SAVAGE_ZWATERMARK_S3D 0x37
#define SAVAGE_DESTTEXRWWATERMARK_S3D 0x38
/* common stuff */
#define SAVAGE_VERTBUFADDR 0x3e
#define SAVAGE_BITPLANEWTMASK 0xd7
#define SAVAGE_DMABUFADDR 0x51
/* texture enable bits (needed for tex addr checking) */
#define SAVAGE_TEXCTRL_TEXEN_MASK 0x00010000 /* S3D */
#define SAVAGE_TEXDESCR_TEX0EN_MASK 0x02000000 /* S4 */
#define SAVAGE_TEXDESCR_TEX1EN_MASK 0x04000000 /* S4 */
/* Global fields in Savage4/Twister/ProSavage 3D registers:
*
* All texture registers and DrawLocalCtrl are local. All other
* registers are global. */
/* Global fields in Savage3D/MX/IX 3D registers:
*
* All texture registers are local. DrawCtrl and ZBufCtrl are
* partially local. All other registers are global.
*
* DrawCtrl global fields: cullMode, alphaTestCmpFunc, alphaTestEn, alphaRefVal
* ZBufCtrl global fields: zCmpFunc, zBufEn
*/
#define SAVAGE_DRAWCTRL_S3D_GLOBAL 0x03f3c00c
#define SAVAGE_ZBUFCTRL_S3D_GLOBAL 0x00000027
/* Masks for scissor bits (drawCtrl[01] on s4, scissorStart/End on s3d)
*/
#define SAVAGE_SCISSOR_MASK_S4 0x00fff7ff
#define SAVAGE_SCISSOR_MASK_S3D 0x07ff07ff
/*
* BCI commands
*/
#define BCI_CMD_NOP 0x40000000
#define BCI_CMD_RECT 0x48000000
#define BCI_CMD_RECT_XP 0x01000000
#define BCI_CMD_RECT_YP 0x02000000
#define BCI_CMD_SCANLINE 0x50000000
#define BCI_CMD_LINE 0x5C000000
#define BCI_CMD_LINE_LAST_PIXEL 0x58000000
#define BCI_CMD_BYTE_TEXT 0x63000000
#define BCI_CMD_NT_BYTE_TEXT 0x67000000
#define BCI_CMD_BIT_TEXT 0x6C000000
#define BCI_CMD_GET_ROP(cmd) (((cmd) >> 16) & 0xFF)
#define BCI_CMD_SET_ROP(cmd, rop) ((cmd) |= ((rop & 0xFF) << 16))
#define BCI_CMD_SEND_COLOR 0x00008000
#define BCI_CMD_CLIP_NONE 0x00000000
#define BCI_CMD_CLIP_CURRENT 0x00002000
#define BCI_CMD_CLIP_LR 0x00004000
#define BCI_CMD_CLIP_NEW 0x00006000
#define BCI_CMD_DEST_GBD 0x00000000
#define BCI_CMD_DEST_PBD 0x00000800
#define BCI_CMD_DEST_PBD_NEW 0x00000C00
#define BCI_CMD_DEST_SBD 0x00001000
#define BCI_CMD_DEST_SBD_NEW 0x00001400
#define BCI_CMD_SRC_TRANSPARENT 0x00000200
#define BCI_CMD_SRC_SOLID 0x00000000
#define BCI_CMD_SRC_GBD 0x00000020
#define BCI_CMD_SRC_COLOR 0x00000040
#define BCI_CMD_SRC_MONO 0x00000060
#define BCI_CMD_SRC_PBD_COLOR 0x00000080
#define BCI_CMD_SRC_PBD_MONO 0x000000A0
#define BCI_CMD_SRC_PBD_COLOR_NEW 0x000000C0
#define BCI_CMD_SRC_PBD_MONO_NEW 0x000000E0
#define BCI_CMD_SRC_SBD_COLOR 0x00000100
#define BCI_CMD_SRC_SBD_MONO 0x00000120
#define BCI_CMD_SRC_SBD_COLOR_NEW 0x00000140
#define BCI_CMD_SRC_SBD_MONO_NEW 0x00000160
#define BCI_CMD_PAT_TRANSPARENT 0x00000010
#define BCI_CMD_PAT_NONE 0x00000000
#define BCI_CMD_PAT_COLOR 0x00000002
#define BCI_CMD_PAT_MONO 0x00000003
#define BCI_CMD_PAT_PBD_COLOR 0x00000004
#define BCI_CMD_PAT_PBD_MONO 0x00000005
#define BCI_CMD_PAT_PBD_COLOR_NEW 0x00000006
#define BCI_CMD_PAT_PBD_MONO_NEW 0x00000007
#define BCI_CMD_PAT_SBD_COLOR 0x00000008
#define BCI_CMD_PAT_SBD_MONO 0x00000009
#define BCI_CMD_PAT_SBD_COLOR_NEW 0x0000000A
#define BCI_CMD_PAT_SBD_MONO_NEW 0x0000000B
#define BCI_BD_BW_DISABLE 0x10000000
#define BCI_BD_TILE_MASK 0x03000000
#define BCI_BD_TILE_NONE 0x00000000
#define BCI_BD_TILE_16 0x02000000
#define BCI_BD_TILE_32 0x03000000
#define BCI_BD_GET_BPP(bd) (((bd) >> 16) & 0xFF)
#define BCI_BD_SET_BPP(bd, bpp) ((bd) |= (((bpp) & 0xFF) << 16))
#define BCI_BD_GET_STRIDE(bd) ((bd) & 0xFFFF)
#define BCI_BD_SET_STRIDE(bd, st) ((bd) |= ((st) & 0xFFFF))
#define BCI_CMD_SET_REGISTER 0x96000000
#define BCI_CMD_WAIT 0xC0000000
#define BCI_CMD_WAIT_3D 0x00010000
#define BCI_CMD_WAIT_2D 0x00020000
#define BCI_CMD_UPDATE_EVENT_TAG 0x98000000
#define BCI_CMD_DRAW_PRIM 0x80000000
#define BCI_CMD_DRAW_INDEXED_PRIM 0x88000000
#define BCI_CMD_DRAW_CONT 0x01000000
#define BCI_CMD_DRAW_TRILIST 0x00000000
#define BCI_CMD_DRAW_TRISTRIP 0x02000000
#define BCI_CMD_DRAW_TRIFAN 0x04000000
#define BCI_CMD_DRAW_SKIPFLAGS 0x000000ff
#define BCI_CMD_DRAW_NO_Z 0x00000001
#define BCI_CMD_DRAW_NO_W 0x00000002
#define BCI_CMD_DRAW_NO_CD 0x00000004
#define BCI_CMD_DRAW_NO_CS 0x00000008
#define BCI_CMD_DRAW_NO_U0 0x00000010
#define BCI_CMD_DRAW_NO_V0 0x00000020
#define BCI_CMD_DRAW_NO_UV0 0x00000030
#define BCI_CMD_DRAW_NO_U1 0x00000040
#define BCI_CMD_DRAW_NO_V1 0x00000080
#define BCI_CMD_DRAW_NO_UV1 0x000000c0
#define BCI_CMD_DMA 0xa8000000
#define BCI_W_H(w, h) ((((h) << 16) | (w)) & 0x0FFF0FFF)
#define BCI_X_Y(x, y) ((((y) << 16) | (x)) & 0x0FFF0FFF)
#define BCI_X_W(x, y) ((((w) << 16) | (x)) & 0x0FFF0FFF)
#define BCI_CLIP_LR(l, r) ((((r) << 16) | (l)) & 0x0FFF0FFF)
#define BCI_CLIP_TL(t, l) ((((t) << 16) | (l)) & 0x0FFF0FFF)
#define BCI_CLIP_BR(b, r) ((((b) << 16) | (r)) & 0x0FFF0FFF)
#define BCI_LINE_X_Y(x, y) (((y) << 16) | ((x) & 0xFFFF))
#define BCI_LINE_STEPS(diag, axi) (((axi) << 16) | ((diag) & 0xFFFF))
#define BCI_LINE_MISC(maj, ym, xp, yp, err) \
(((maj) & 0x1FFF) | \
((ym) ? 1<<13 : 0) | \
((xp) ? 1<<14 : 0) | \
((yp) ? 1<<15 : 0) | \
((err) << 16))
/*
* common commands
*/
#define BCI_SET_REGISTERS( first, n ) \
BCI_WRITE(BCI_CMD_SET_REGISTER | \
((uint32_t)(n) & 0xff) << 16 | \
((uint32_t)(first) & 0xffff))
#define DMA_SET_REGISTERS( first, n ) \
DMA_WRITE(BCI_CMD_SET_REGISTER | \
((uint32_t)(n) & 0xff) << 16 | \
((uint32_t)(first) & 0xffff))
#define BCI_DRAW_PRIMITIVE(n, type, skip) \
BCI_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
((n) << 16))
#define DMA_DRAW_PRIMITIVE(n, type, skip) \
DMA_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
((n) << 16))
#define BCI_DRAW_INDICES_S3D(n, type, i0) \
BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \
((n) << 16) | (i0))
#define BCI_DRAW_INDICES_S4(n, type, skip) \
BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \
(skip) | ((n) << 16))
#define BCI_DMA(n) \
BCI_WRITE(BCI_CMD_DMA | (((n) >> 1) - 1))
/*
* access to MMIO
*/
#define SAVAGE_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
#define SAVAGE_WRITE(reg) DRM_WRITE32( dev_priv->mmio, (reg) )
/*
* access to the burst command interface (BCI)
*/
#define SAVAGE_BCI_DEBUG 1
#define BCI_LOCALS volatile uint32_t *bci_ptr;
#define BEGIN_BCI( n ) do { \
dev_priv->wait_fifo(dev_priv, (n)); \
bci_ptr = dev_priv->bci_ptr; \
} while(0)
#define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val)
#define BCI_COPY_FROM_USER(src,n) do { \
unsigned int i; \
for (i = 0; i < n; ++i) { \
uint32_t val; \
DRM_GET_USER_UNCHECKED(val, &((uint32_t*)(src))[i]); \
BCI_WRITE(val); \
} \
} while(0)
/*
* command DMA support
*/
#define SAVAGE_DMA_DEBUG 1
#define DMA_LOCALS uint32_t *dma_ptr;
#define BEGIN_DMA( n ) do { \
unsigned int cur = dev_priv->current_dma_page; \
unsigned int rest = SAVAGE_DMA_PAGE_SIZE - \
dev_priv->dma_pages[cur].used; \
if ((n) > rest) { \
dma_ptr = savage_dma_alloc(dev_priv, (n)); \
} else { /* fast path for small allocations */ \
dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + \
cur * SAVAGE_DMA_PAGE_SIZE + \
dev_priv->dma_pages[cur].used; \
if (dev_priv->dma_pages[cur].used == 0) \
savage_dma_wait(dev_priv, cur); \
dev_priv->dma_pages[cur].used += (n); \
} \
} while(0)
#define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val)
#define DMA_COPY_FROM_USER(src,n) do { \
DRM_COPY_FROM_USER_UNCHECKED(dma_ptr, (src), (n)*4); \
dma_ptr += n; \
} while(0)
#if SAVAGE_DMA_DEBUG
#define DMA_COMMIT() do { \
unsigned int cur = dev_priv->current_dma_page; \
uint32_t *expected = (uint32_t *)dev_priv->cmd_dma->handle + \
cur * SAVAGE_DMA_PAGE_SIZE + \
dev_priv->dma_pages[cur].used; \
if (dma_ptr != expected) { \
DRM_ERROR("DMA allocation and use don't match: " \
"%p != %p\n", expected, dma_ptr); \
savage_dma_reset(dev_priv); \
} \
} while(0)
#else
#define DMA_COMMIT() do {/* nothing */} while(0)
#endif
#define DMA_FLUSH() dev_priv->dma_flush(dev_priv)
/* Buffer aging via event tag
*/
#define UPDATE_EVENT_COUNTER( ) do { \
if (dev_priv->status_ptr) { \
uint16_t count; \
/* coordinate with Xserver */ \
count = dev_priv->status_ptr[1023]; \
if (count < dev_priv->event_counter) \
dev_priv->event_wrap++; \
dev_priv->event_counter = count; \
} \
} while(0)
#define SET_AGE( age, e, w ) do { \
(age)->event = e; \
(age)->wrap = w; \
} while(0)
#define TEST_AGE( age, e, w ) \
( (age)->wrap < (w) || ( (age)->wrap == (w) && (age)->event <= (e) ) )
#endif /* __SAVAGE_DRV_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -1923,6 +1923,17 @@ config R8169_VLAN
If in doubt, say Y. If in doubt, say Y.
config SIS190
tristate "SiS190 gigabit ethernet support"
depends on PCI
select CRC32
select MII
---help---
Say Y here if you have a SiS 190 PCI Gigabit Ethernet adapter.
To compile this driver as a module, choose M here: the module
will be called sis190. This is recommended.
config SKGE config SKGE
tristate "New SysKonnect GigaEthernet support (EXPERIMENTAL)" tristate "New SysKonnect GigaEthernet support (EXPERIMENTAL)"
depends on PCI && EXPERIMENTAL depends on PCI && EXPERIMENTAL
@ -2093,6 +2104,25 @@ endmenu
menu "Ethernet (10000 Mbit)" menu "Ethernet (10000 Mbit)"
depends on !UML depends on !UML
config CHELSIO_T1
tristate "Chelsio 10Gb Ethernet support"
depends on PCI
help
This driver supports Chelsio N110 and N210 models 10Gb Ethernet
cards. More information about adapter features and performance
tuning is in <file:Documentation/networking/cxgb.txt>.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
For customer support, please visit our customer support page at
<http://www.chelsio.com/support.htm>.
Please send feedback to <linux-bugs@chelsio.com>.
To compile this driver as a module, choose M here: the module
will be called cxgb.
config IXGB config IXGB
tristate "Intel(R) PRO/10GbE support" tristate "Intel(R) PRO/10GbE support"
depends on PCI depends on PCI

View File

@ -9,6 +9,7 @@ endif
obj-$(CONFIG_E1000) += e1000/ obj-$(CONFIG_E1000) += e1000/
obj-$(CONFIG_IBM_EMAC) += ibm_emac/ obj-$(CONFIG_IBM_EMAC) += ibm_emac/
obj-$(CONFIG_IXGB) += ixgb/ obj-$(CONFIG_IXGB) += ixgb/
obj-$(CONFIG_CHELSIO_T1) += chelsio/
obj-$(CONFIG_BONDING) += bonding/ obj-$(CONFIG_BONDING) += bonding/
obj-$(CONFIG_GIANFAR) += gianfar_driver.o obj-$(CONFIG_GIANFAR) += gianfar_driver.o
@ -42,6 +43,7 @@ obj-$(CONFIG_EEPRO100) += eepro100.o
obj-$(CONFIG_E100) += e100.o obj-$(CONFIG_E100) += e100.o
obj-$(CONFIG_TLAN) += tlan.o obj-$(CONFIG_TLAN) += tlan.o
obj-$(CONFIG_EPIC100) += epic100.o obj-$(CONFIG_EPIC100) += epic100.o
obj-$(CONFIG_SIS190) += sis190.o
obj-$(CONFIG_SIS900) += sis900.o obj-$(CONFIG_SIS900) += sis900.o
obj-$(CONFIG_YELLOWFIN) += yellowfin.o obj-$(CONFIG_YELLOWFIN) += yellowfin.o
obj-$(CONFIG_ACENIC) += acenic.o obj-$(CONFIG_ACENIC) += acenic.o

View File

@ -0,0 +1,11 @@
#
# Chelsio 10Gb NIC driver for Linux.
#
obj-$(CONFIG_CHELSIO_T1) += cxgb.o
EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/chelsio $(DEBUG_FLAGS)
cxgb-objs := cxgb2.o espi.o pm3393.o sge.o subr.o mv88x201x.o

View File

@ -0,0 +1,314 @@
/*****************************************************************************
* *
* File: common.h *
* $Revision: 1.21 $ *
* $Date: 2005/06/22 00:43:25 $ *
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#ifndef _CXGB_COMMON_H_
#define _CXGB_COMMON_H_
#include <linux/config.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/crc32.h>
#include <linux/init.h>
#include <asm/io.h>
#include <linux/pci_ids.h>
#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver"
#define DRV_NAME "cxgb"
#define DRV_VERSION "2.1.1"
#define PFX DRV_NAME ": "
#define CH_ERR(fmt, ...) printk(KERN_ERR PFX fmt, ## __VA_ARGS__)
#define CH_WARN(fmt, ...) printk(KERN_WARNING PFX fmt, ## __VA_ARGS__)
#define CH_ALERT(fmt, ...) printk(KERN_ALERT PFX fmt, ## __VA_ARGS__)
#define CH_DEVICE(devid, ssid, idx) \
{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
#define SUPPORTED_PAUSE (1 << 13)
#define SUPPORTED_LOOPBACK (1 << 15)
#define ADVERTISED_PAUSE (1 << 13)
#define ADVERTISED_ASYM_PAUSE (1 << 14)
typedef struct adapter adapter_t;
void t1_elmer0_ext_intr(adapter_t *adapter);
void t1_link_changed(adapter_t *adapter, int port_id, int link_status,
int speed, int duplex, int fc);
struct t1_rx_mode {
struct net_device *dev;
u32 idx;
struct dev_mc_list *list;
};
#define t1_rx_mode_promisc(rm) (rm->dev->flags & IFF_PROMISC)
#define t1_rx_mode_allmulti(rm) (rm->dev->flags & IFF_ALLMULTI)
#define t1_rx_mode_mc_cnt(rm) (rm->dev->mc_count)
static inline u8 *t1_get_next_mcaddr(struct t1_rx_mode *rm)
{
u8 *addr = 0;
if (rm->idx++ < rm->dev->mc_count) {
addr = rm->list->dmi_addr;
rm->list = rm->list->next;
}
return addr;
}
#define MAX_NPORTS 4
#define SPEED_INVALID 0xffff
#define DUPLEX_INVALID 0xff
enum {
CHBT_BOARD_N110,
CHBT_BOARD_N210
};
enum {
CHBT_TERM_T1,
CHBT_TERM_T2
};
enum {
CHBT_MAC_PM3393,
};
enum {
CHBT_PHY_88X2010,
};
enum {
PAUSE_RX = 1 << 0,
PAUSE_TX = 1 << 1,
PAUSE_AUTONEG = 1 << 2
};
/* Revisions of T1 chip */
enum {
TERM_T1A = 0,
TERM_T1B = 1,
TERM_T2 = 3
};
struct sge_params {
unsigned int cmdQ_size[2];
unsigned int freelQ_size[2];
unsigned int large_buf_capacity;
unsigned int rx_coalesce_usecs;
unsigned int last_rx_coalesce_raw;
unsigned int default_rx_coalesce_usecs;
unsigned int sample_interval_usecs;
unsigned int coalesce_enable;
unsigned int polling;
};
struct chelsio_pci_params {
unsigned short speed;
unsigned char width;
unsigned char is_pcix;
};
struct adapter_params {
struct sge_params sge;
struct chelsio_pci_params pci;
const struct board_info *brd_info;
unsigned int nports; /* # of ethernet ports */
unsigned int stats_update_period;
unsigned short chip_revision;
unsigned char chip_version;
};
struct link_config {
unsigned int supported; /* link capabilities */
unsigned int advertising; /* advertised capabilities */
unsigned short requested_speed; /* speed user has requested */
unsigned short speed; /* actual link speed */
unsigned char requested_duplex; /* duplex user has requested */
unsigned char duplex; /* actual link duplex */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
unsigned char autoneg; /* autonegotiating? */
};
struct cmac;
struct cphy;
struct port_info {
struct net_device *dev;
struct cmac *mac;
struct cphy *phy;
struct link_config link_config;
struct net_device_stats netstats;
};
struct sge;
struct peespi;
struct adapter {
u8 *regs;
struct pci_dev *pdev;
unsigned long registered_device_map;
unsigned long open_device_map;
unsigned long flags;
const char *name;
int msg_enable;
u32 mmio_len;
struct work_struct ext_intr_handler_task;
struct adapter_params params;
struct vlan_group *vlan_grp;
/* Terminator modules. */
struct sge *sge;
struct peespi *espi;
struct port_info port[MAX_NPORTS];
struct work_struct stats_update_task;
struct timer_list stats_update_timer;
struct semaphore mib_mutex;
spinlock_t tpi_lock;
spinlock_t work_lock;
/* guards async operations */
spinlock_t async_lock ____cacheline_aligned;
u32 slow_intr_mask;
};
enum { /* adapter flags */
FULL_INIT_DONE = 1 << 0,
TSO_CAPABLE = 1 << 2,
TCP_CSUM_CAPABLE = 1 << 3,
UDP_CSUM_CAPABLE = 1 << 4,
VLAN_ACCEL_CAPABLE = 1 << 5,
RX_CSUM_ENABLED = 1 << 6,
};
struct mdio_ops;
struct gmac;
struct gphy;
struct board_info {
unsigned char board;
unsigned char port_number;
unsigned long caps;
unsigned char chip_term;
unsigned char chip_mac;
unsigned char chip_phy;
unsigned int clock_core;
unsigned int clock_mc3;
unsigned int clock_mc4;
unsigned int espi_nports;
unsigned int clock_cspi;
unsigned int clock_elmer0;
unsigned char mdio_mdien;
unsigned char mdio_mdiinv;
unsigned char mdio_mdc;
unsigned char mdio_phybaseaddr;
struct gmac *gmac;
struct gphy *gphy;
struct mdio_ops *mdio_ops;
const char *desc;
};
extern struct pci_device_id t1_pci_tbl[];
static inline int adapter_matches_type(const adapter_t *adapter,
int version, int revision)
{
return adapter->params.chip_version == version &&
adapter->params.chip_revision == revision;
}
#define t1_is_T1B(adap) adapter_matches_type(adap, CHBT_TERM_T1, TERM_T1B)
#define is_T2(adap) adapter_matches_type(adap, CHBT_TERM_T2, TERM_T2)
/* Returns true if an adapter supports VLAN acceleration and TSO */
static inline int vlan_tso_capable(const adapter_t *adapter)
{
return !t1_is_T1B(adapter);
}
#define for_each_port(adapter, iter) \
for (iter = 0; iter < (adapter)->params.nports; ++iter)
#define board_info(adapter) ((adapter)->params.brd_info)
#define is_10G(adapter) (board_info(adapter)->caps & SUPPORTED_10000baseT_Full)
static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
{
return board_info(adap)->clock_core / 1000000;
}
extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
extern void t1_interrupts_enable(adapter_t *adapter);
extern void t1_interrupts_disable(adapter_t *adapter);
extern void t1_interrupts_clear(adapter_t *adapter);
extern int elmer0_ext_intr_handler(adapter_t *adapter);
extern int t1_slow_intr_handler(adapter_t *adapter);
extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
extern const struct board_info *t1_get_board_info(unsigned int board_id);
extern const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
unsigned short ssid);
extern int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data);
extern int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
struct adapter_params *p);
extern int t1_init_hw_modules(adapter_t *adapter);
extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
extern void t1_free_sw_modules(adapter_t *adapter);
extern void t1_fatal_err(adapter_t *adapter);
extern void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable);
extern void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable);
extern void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable);
#endif /* _CXGB_COMMON_H_ */

148
drivers/net/chelsio/cphy.h Normal file
View File

@ -0,0 +1,148 @@
/*****************************************************************************
* *
* File: cphy.h *
* $Revision: 1.7 $ *
* $Date: 2005/06/21 18:29:47 $ *
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#ifndef _CXGB_CPHY_H_
#define _CXGB_CPHY_H_
#include "common.h"
struct mdio_ops {
void (*init)(adapter_t *adapter, const struct board_info *bi);
int (*read)(adapter_t *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int *val);
int (*write)(adapter_t *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int val);
};
/* PHY interrupt types */
enum {
cphy_cause_link_change = 0x1,
cphy_cause_error = 0x2
};
struct cphy;
/* PHY operations */
struct cphy_ops {
void (*destroy)(struct cphy *);
int (*reset)(struct cphy *, int wait);
int (*interrupt_enable)(struct cphy *);
int (*interrupt_disable)(struct cphy *);
int (*interrupt_clear)(struct cphy *);
int (*interrupt_handler)(struct cphy *);
int (*autoneg_enable)(struct cphy *);
int (*autoneg_disable)(struct cphy *);
int (*autoneg_restart)(struct cphy *);
int (*advertise)(struct cphy *phy, unsigned int advertise_map);
int (*set_loopback)(struct cphy *, int on);
int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
int *duplex, int *fc);
};
/* A PHY instance */
struct cphy {
int addr; /* PHY address */
adapter_t *adapter; /* associated adapter */
struct cphy_ops *ops; /* PHY operations */
int (*mdio_read)(adapter_t *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int *val);
int (*mdio_write)(adapter_t *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int val);
struct cphy_instance *instance;
};
/* Convenience MDIO read/write wrappers */
static inline int mdio_read(struct cphy *cphy, int mmd, int reg,
unsigned int *valp)
{
return cphy->mdio_read(cphy->adapter, cphy->addr, mmd, reg, valp);
}
static inline int mdio_write(struct cphy *cphy, int mmd, int reg,
unsigned int val)
{
return cphy->mdio_write(cphy->adapter, cphy->addr, mmd, reg, val);
}
static inline int simple_mdio_read(struct cphy *cphy, int reg,
unsigned int *valp)
{
return mdio_read(cphy, 0, reg, valp);
}
static inline int simple_mdio_write(struct cphy *cphy, int reg,
unsigned int val)
{
return mdio_write(cphy, 0, reg, val);
}
/* Convenience initializer */
static inline void cphy_init(struct cphy *phy, adapter_t *adapter,
int phy_addr, struct cphy_ops *phy_ops,
struct mdio_ops *mdio_ops)
{
phy->adapter = adapter;
phy->addr = phy_addr;
phy->ops = phy_ops;
if (mdio_ops) {
phy->mdio_read = mdio_ops->read;
phy->mdio_write = mdio_ops->write;
}
}
/* Operations of the PHY-instance factory */
struct gphy {
/* Construct a PHY instance with the given PHY address */
struct cphy *(*create)(adapter_t *adapter, int phy_addr,
struct mdio_ops *mdio_ops);
/*
* Reset the PHY chip. This resets the whole PHY chip, not individual
* ports.
*/
int (*reset)(adapter_t *adapter);
};
extern struct gphy t1_mv88x201x_ops;
extern struct gphy t1_dummy_phy_ops;
#endif /* _CXGB_CPHY_H_ */

View File

@ -0,0 +1,145 @@
/*****************************************************************************
* *
* File: cpl5_cmd.h *
* $Revision: 1.6 $ *
* $Date: 2005/06/21 18:29:47 $ *
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#ifndef _CXGB_CPL5_CMD_H_
#define _CXGB_CPL5_CMD_H_
#include <asm/byteorder.h>
#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
#error "Adjust your <asm/byteorder.h> defines"
#endif
enum CPL_opcode {
CPL_RX_PKT = 0xAD,
CPL_TX_PKT = 0xB2,
CPL_TX_PKT_LSO = 0xB6,
};
enum { /* TX_PKT_LSO ethernet types */
CPL_ETH_II,
CPL_ETH_II_VLAN,
CPL_ETH_802_3,
CPL_ETH_802_3_VLAN
};
struct cpl_rx_data {
u32 rsvd0;
u32 len;
u32 seq;
u16 urg;
u8 rsvd1;
u8 status;
};
/*
* We want this header's alignment to be no more stringent than 2-byte aligned.
* All fields are u8 or u16 except for the length. However that field is not
* used so we break it into 2 16-bit parts to easily meet our alignment needs.
*/
struct cpl_tx_pkt {
u8 opcode;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 iff:4;
u8 ip_csum_dis:1;
u8 l4_csum_dis:1;
u8 vlan_valid:1;
u8 rsvd:1;
#else
u8 rsvd:1;
u8 vlan_valid:1;
u8 l4_csum_dis:1;
u8 ip_csum_dis:1;
u8 iff:4;
#endif
u16 vlan;
u16 len_hi;
u16 len_lo;
};
struct cpl_tx_pkt_lso {
u8 opcode;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 iff:4;
u8 ip_csum_dis:1;
u8 l4_csum_dis:1;
u8 vlan_valid:1;
u8 rsvd:1;
#else
u8 rsvd:1;
u8 vlan_valid:1;
u8 l4_csum_dis:1;
u8 ip_csum_dis:1;
u8 iff:4;
#endif
u16 vlan;
u32 len;
u32 rsvd2;
u8 rsvd3;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 tcp_hdr_words:4;
u8 ip_hdr_words:4;
#else
u8 ip_hdr_words:4;
u8 tcp_hdr_words:4;
#endif
u16 eth_type_mss;
};
struct cpl_rx_pkt {
u8 opcode;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 iff:4;
u8 csum_valid:1;
u8 bad_pkt:1;
u8 vlan_valid:1;
u8 rsvd:1;
#else
u8 rsvd:1;
u8 vlan_valid:1;
u8 bad_pkt:1;
u8 csum_valid:1;
u8 iff:4;
#endif
u16 csum;
u16 vlan;
u16 len;
};
#endif /* _CXGB_CPL5_CMD_H_ */

1256
drivers/net/chelsio/cxgb2.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,151 @@
/*****************************************************************************
* *
* File: elmer0.h *
* $Revision: 1.6 $ *
* $Date: 2005/06/21 22:49:43 $ *
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#ifndef _CXGB_ELMER0_H_
#define _CXGB_ELMER0_H_
/* ELMER0 registers */
#define A_ELMER0_VERSION 0x100000
#define A_ELMER0_PHY_CFG 0x100004
#define A_ELMER0_INT_ENABLE 0x100008
#define A_ELMER0_INT_CAUSE 0x10000c
#define A_ELMER0_GPI_CFG 0x100010
#define A_ELMER0_GPI_STAT 0x100014
#define A_ELMER0_GPO 0x100018
#define A_ELMER0_PORT0_MI1_CFG 0x400000
#define S_MI1_MDI_ENABLE 0
#define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE)
#define F_MI1_MDI_ENABLE V_MI1_MDI_ENABLE(1U)
#define S_MI1_MDI_INVERT 1
#define V_MI1_MDI_INVERT(x) ((x) << S_MI1_MDI_INVERT)
#define F_MI1_MDI_INVERT V_MI1_MDI_INVERT(1U)
#define S_MI1_PREAMBLE_ENABLE 2
#define V_MI1_PREAMBLE_ENABLE(x) ((x) << S_MI1_PREAMBLE_ENABLE)
#define F_MI1_PREAMBLE_ENABLE V_MI1_PREAMBLE_ENABLE(1U)
#define S_MI1_SOF 3
#define M_MI1_SOF 0x3
#define V_MI1_SOF(x) ((x) << S_MI1_SOF)
#define G_MI1_SOF(x) (((x) >> S_MI1_SOF) & M_MI1_SOF)
#define S_MI1_CLK_DIV 5
#define M_MI1_CLK_DIV 0xff
#define V_MI1_CLK_DIV(x) ((x) << S_MI1_CLK_DIV)
#define G_MI1_CLK_DIV(x) (((x) >> S_MI1_CLK_DIV) & M_MI1_CLK_DIV)
#define A_ELMER0_PORT0_MI1_ADDR 0x400004
#define S_MI1_REG_ADDR 0
#define M_MI1_REG_ADDR 0x1f
#define V_MI1_REG_ADDR(x) ((x) << S_MI1_REG_ADDR)
#define G_MI1_REG_ADDR(x) (((x) >> S_MI1_REG_ADDR) & M_MI1_REG_ADDR)
#define S_MI1_PHY_ADDR 5
#define M_MI1_PHY_ADDR 0x1f
#define V_MI1_PHY_ADDR(x) ((x) << S_MI1_PHY_ADDR)
#define G_MI1_PHY_ADDR(x) (((x) >> S_MI1_PHY_ADDR) & M_MI1_PHY_ADDR)
#define A_ELMER0_PORT0_MI1_DATA 0x400008
#define S_MI1_DATA 0
#define M_MI1_DATA 0xffff
#define V_MI1_DATA(x) ((x) << S_MI1_DATA)
#define G_MI1_DATA(x) (((x) >> S_MI1_DATA) & M_MI1_DATA)
#define A_ELMER0_PORT0_MI1_OP 0x40000c
#define S_MI1_OP 0
#define M_MI1_OP 0x3
#define V_MI1_OP(x) ((x) << S_MI1_OP)
#define G_MI1_OP(x) (((x) >> S_MI1_OP) & M_MI1_OP)
#define S_MI1_ADDR_AUTOINC 2
#define V_MI1_ADDR_AUTOINC(x) ((x) << S_MI1_ADDR_AUTOINC)
#define F_MI1_ADDR_AUTOINC V_MI1_ADDR_AUTOINC(1U)
#define S_MI1_OP_BUSY 31
#define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY)
#define F_MI1_OP_BUSY V_MI1_OP_BUSY(1U)
#define A_ELMER0_PORT1_MI1_CFG 0x500000
#define A_ELMER0_PORT1_MI1_ADDR 0x500004
#define A_ELMER0_PORT1_MI1_DATA 0x500008
#define A_ELMER0_PORT1_MI1_OP 0x50000c
#define A_ELMER0_PORT2_MI1_CFG 0x600000
#define A_ELMER0_PORT2_MI1_ADDR 0x600004
#define A_ELMER0_PORT2_MI1_DATA 0x600008
#define A_ELMER0_PORT2_MI1_OP 0x60000c
#define A_ELMER0_PORT3_MI1_CFG 0x700000
#define A_ELMER0_PORT3_MI1_ADDR 0x700004
#define A_ELMER0_PORT3_MI1_DATA 0x700008
#define A_ELMER0_PORT3_MI1_OP 0x70000c
/* Simple bit definition for GPI and GP0 registers. */
#define ELMER0_GP_BIT0 0x0001
#define ELMER0_GP_BIT1 0x0002
#define ELMER0_GP_BIT2 0x0004
#define ELMER0_GP_BIT3 0x0008
#define ELMER0_GP_BIT4 0x0010
#define ELMER0_GP_BIT5 0x0020
#define ELMER0_GP_BIT6 0x0040
#define ELMER0_GP_BIT7 0x0080
#define ELMER0_GP_BIT8 0x0100
#define ELMER0_GP_BIT9 0x0200
#define ELMER0_GP_BIT10 0x0400
#define ELMER0_GP_BIT11 0x0800
#define ELMER0_GP_BIT12 0x1000
#define ELMER0_GP_BIT13 0x2000
#define ELMER0_GP_BIT14 0x4000
#define ELMER0_GP_BIT15 0x8000
#define ELMER0_GP_BIT16 0x10000
#define ELMER0_GP_BIT17 0x20000
#define ELMER0_GP_BIT18 0x40000
#define ELMER0_GP_BIT19 0x80000
#define MI1_OP_DIRECT_WRITE 1
#define MI1_OP_DIRECT_READ 2
#define MI1_OP_INDIRECT_ADDRESS 0
#define MI1_OP_INDIRECT_WRITE 1
#define MI1_OP_INDIRECT_READ_INC 2
#define MI1_OP_INDIRECT_READ 3
#endif /* _CXGB_ELMER0_H_ */

346
drivers/net/chelsio/espi.c Normal file
View File

@ -0,0 +1,346 @@
/*****************************************************************************
* *
* File: espi.c *
* $Revision: 1.14 $ *
* $Date: 2005/05/14 00:59:32 $ *
* Description: *
* Ethernet SPI functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#include "common.h"
#include "regs.h"
#include "espi.h"
struct peespi {
adapter_t *adapter;
struct espi_intr_counts intr_cnt;
u32 misc_ctrl;
spinlock_t lock;
};
#define ESPI_INTR_MASK (F_DIP4ERR | F_RXDROP | F_TXDROP | F_RXOVERFLOW | \
F_RAMPARITYERR | F_DIP2PARITYERR)
#define MON_MASK (V_MONITORED_PORT_NUM(3) | F_MONITORED_DIRECTION \
| F_MONITORED_INTERFACE)
#define TRICN_CNFG 14
#define TRICN_CMD_READ 0x11
#define TRICN_CMD_WRITE 0x21
#define TRICN_CMD_ATTEMPTS 10
static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr,
int ch_addr, int reg_offset, u32 wr_data)
{
int busy, attempts = TRICN_CMD_ATTEMPTS;
writel(V_WRITE_DATA(wr_data) |
V_REGISTER_OFFSET(reg_offset) |
V_CHANNEL_ADDR(ch_addr) | V_MODULE_ADDR(module_addr) |
V_BUNDLE_ADDR(bundle_addr) |
V_SPI4_COMMAND(TRICN_CMD_WRITE),
adapter->regs + A_ESPI_CMD_ADDR);
writel(0, adapter->regs + A_ESPI_GOSTAT);
do {
busy = readl(adapter->regs + A_ESPI_GOSTAT) & F_ESPI_CMD_BUSY;
} while (busy && --attempts);
if (busy)
CH_ERR("%s: TRICN write timed out\n", adapter->name);
return busy;
}
/* 1. Deassert rx_reset_core. */
/* 2. Program TRICN_CNFG registers. */
/* 3. Deassert rx_reset_link */
static int tricn_init(adapter_t *adapter)
{
int i = 0;
int sme = 1;
int stat = 0;
int timeout = 0;
int is_ready = 0;
int dynamic_deskew = 0;
if (dynamic_deskew)
sme = 0;
/* 1 */
timeout=1000;
do {
stat = readl(adapter->regs + A_ESPI_RX_RESET);
is_ready = (stat & 0x4);
timeout--;
udelay(5);
} while (!is_ready || (timeout==0));
writel(0x2, adapter->regs + A_ESPI_RX_RESET);
if (timeout==0)
{
CH_ERR("ESPI : ERROR : Timeout tricn_init() \n");
t1_fatal_err(adapter);
}
/* 2 */
if (sme) {
tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81);
tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81);
tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81);
}
for (i=1; i<= 8; i++) tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1);
for (i=1; i<= 2; i++) tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1);
for (i=1; i<= 3; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
for (i=4; i<= 4; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
for (i=5; i<= 5; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
for (i=6; i<= 6; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
for (i=7; i<= 7; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0x80);
for (i=8; i<= 8; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
/* 3 */
writel(0x3, adapter->regs + A_ESPI_RX_RESET);
return 0;
}
void t1_espi_intr_enable(struct peespi *espi)
{
u32 enable, pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
/*
* Cannot enable ESPI interrupts on T1B because HW asserts the
* interrupt incorrectly, namely the driver gets ESPI interrupts
* but no data is actually dropped (can verify this reading the ESPI
* drop registers). Also, once the ESPI interrupt is asserted it
* cannot be cleared (HW bug).
*/
enable = t1_is_T1B(espi->adapter) ? 0 : ESPI_INTR_MASK;
writel(enable, espi->adapter->regs + A_ESPI_INTR_ENABLE);
writel(pl_intr | F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
}
void t1_espi_intr_clear(struct peespi *espi)
{
writel(0xffffffff, espi->adapter->regs + A_ESPI_INTR_STATUS);
writel(F_PL_INTR_ESPI, espi->adapter->regs + A_PL_CAUSE);
}
void t1_espi_intr_disable(struct peespi *espi)
{
u32 pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
writel(0, espi->adapter->regs + A_ESPI_INTR_ENABLE);
writel(pl_intr & ~F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
}
int t1_espi_intr_handler(struct peespi *espi)
{
u32 cnt;
u32 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
if (status & F_DIP4ERR)
espi->intr_cnt.DIP4_err++;
if (status & F_RXDROP)
espi->intr_cnt.rx_drops++;
if (status & F_TXDROP)
espi->intr_cnt.tx_drops++;
if (status & F_RXOVERFLOW)
espi->intr_cnt.rx_ovflw++;
if (status & F_RAMPARITYERR)
espi->intr_cnt.parity_err++;
if (status & F_DIP2PARITYERR) {
espi->intr_cnt.DIP2_parity_err++;
/*
* Must read the error count to clear the interrupt
* that it causes.
*/
cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
}
/*
* For T1B we need to write 1 to clear ESPI interrupts. For T2+ we
* write the status as is.
*/
if (status && t1_is_T1B(espi->adapter))
status = 1;
writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
return 0;
}
const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi)
{
return &espi->intr_cnt;
}
static void espi_setup_for_pm3393(adapter_t *adapter)
{
u32 wmark = t1_is_T1B(adapter) ? 0x4000 : 0x3200;
writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0);
writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN1);
writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2);
writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN3);
writel(0x100, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
writel(wmark, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
writel(3, adapter->regs + A_ESPI_CALENDAR_LENGTH);
writel(0x08000008, adapter->regs + A_ESPI_TRAIN);
writel(V_RX_NPORTS(1) | V_TX_NPORTS(1), adapter->regs + A_PORT_CONFIG);
}
/* T2 Init part -- */
/* 1. Set T_ESPI_MISCCTRL_ADDR */
/* 2. Init ESPI registers. */
/* 3. Init TriCN Hard Macro */
int t1_espi_init(struct peespi *espi, int mac_type, int nports)
{
u32 cnt;
u32 status_enable_extra = 0;
adapter_t *adapter = espi->adapter;
u32 status, burstval = 0x800100;
/* Disable ESPI training. MACs that can handle it enable it below. */
writel(0, adapter->regs + A_ESPI_TRAIN);
if (is_T2(adapter)) {
writel(V_OUT_OF_SYNC_COUNT(4) |
V_DIP2_PARITY_ERR_THRES(3) |
V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL);
if (nports == 4) {
/* T204: maxburst1 = 0x40, maxburst2 = 0x20 */
burstval = 0x200040;
}
}
writel(burstval, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
switch (mac_type) {
case CHBT_MAC_PM3393:
espi_setup_for_pm3393(adapter);
break;
default:
return -1;
}
/*
* Make sure any pending interrupts from the SPI are
* Cleared before enabling the interrupt.
*/
writel(ESPI_INTR_MASK, espi->adapter->regs + A_ESPI_INTR_ENABLE);
status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
if (status & F_DIP2PARITYERR) {
cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
}
/*
* For T1B we need to write 1 to clear ESPI interrupts. For T2+ we
* write the status as is.
*/
if (status && t1_is_T1B(espi->adapter))
status = 1;
writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
writel(status_enable_extra | F_RXSTATUSENABLE,
adapter->regs + A_ESPI_FIFO_STATUS_ENABLE);
if (is_T2(adapter)) {
tricn_init(adapter);
/*
* Always position the control at the 1st port egress IN
* (sop,eop) counter to reduce PIOs for T/N210 workaround.
*/
espi->misc_ctrl = (readl(adapter->regs + A_ESPI_MISC_CONTROL)
& ~MON_MASK) | (F_MONITORED_DIRECTION
| F_MONITORED_INTERFACE);
writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
spin_lock_init(&espi->lock);
}
return 0;
}
void t1_espi_destroy(struct peespi *espi)
{
kfree(espi);
}
struct peespi *t1_espi_create(adapter_t *adapter)
{
struct peespi *espi = kmalloc(sizeof(*espi), GFP_KERNEL);
memset(espi, 0, sizeof(*espi));
if (espi)
espi->adapter = adapter;
return espi;
}
void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val)
{
struct peespi *espi = adapter->espi;
if (!is_T2(adapter))
return;
spin_lock(&espi->lock);
espi->misc_ctrl = (val & ~MON_MASK) |
(espi->misc_ctrl & MON_MASK);
writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
spin_unlock(&espi->lock);
}
u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait)
{
u32 sel;
struct peespi *espi = adapter->espi;
if (!is_T2(adapter))
return 0;
sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2);
if (!wait) {
if (!spin_trylock(&espi->lock))
return 0;
}
else
spin_lock(&espi->lock);
if ((sel != (espi->misc_ctrl & MON_MASK))) {
writel(((espi->misc_ctrl & ~MON_MASK) | sel),
adapter->regs + A_ESPI_MISC_CONTROL);
sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
}
else
sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
spin_unlock(&espi->lock);
return sel;
}

View File

@ -0,0 +1,68 @@
/*****************************************************************************
* *
* File: espi.h *
* $Revision: 1.7 $ *
* $Date: 2005/06/21 18:29:47 $ *
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#ifndef _CXGB_ESPI_H_
#define _CXGB_ESPI_H_
#include "common.h"
struct espi_intr_counts {
unsigned int DIP4_err;
unsigned int rx_drops;
unsigned int tx_drops;
unsigned int rx_ovflw;
unsigned int parity_err;
unsigned int DIP2_parity_err;
};
struct peespi;
struct peespi *t1_espi_create(adapter_t *adapter);
void t1_espi_destroy(struct peespi *espi);
int t1_espi_init(struct peespi *espi, int mac_type, int nports);
void t1_espi_intr_enable(struct peespi *);
void t1_espi_intr_clear(struct peespi *);
void t1_espi_intr_disable(struct peespi *);
int t1_espi_intr_handler(struct peespi *);
const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi);
void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val);
u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait);
#endif /* _CXGB_ESPI_H_ */

134
drivers/net/chelsio/gmac.h Normal file
View File

@ -0,0 +1,134 @@
/*****************************************************************************
* *
* File: gmac.h *
* $Revision: 1.6 $ *
* $Date: 2005/06/21 18:29:47 $ *
* Description: *
* Generic MAC functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#ifndef _CXGB_GMAC_H_
#define _CXGB_GMAC_H_
#include "common.h"
enum { MAC_STATS_UPDATE_FAST, MAC_STATS_UPDATE_FULL };
enum { MAC_DIRECTION_RX = 1, MAC_DIRECTION_TX = 2 };
struct cmac_statistics {
/* Transmit */
u64 TxOctetsOK;
u64 TxOctetsBad;
u64 TxUnicastFramesOK;
u64 TxMulticastFramesOK;
u64 TxBroadcastFramesOK;
u64 TxPauseFrames;
u64 TxFramesWithDeferredXmissions;
u64 TxLateCollisions;
u64 TxTotalCollisions;
u64 TxFramesAbortedDueToXSCollisions;
u64 TxUnderrun;
u64 TxLengthErrors;
u64 TxInternalMACXmitError;
u64 TxFramesWithExcessiveDeferral;
u64 TxFCSErrors;
/* Receive */
u64 RxOctetsOK;
u64 RxOctetsBad;
u64 RxUnicastFramesOK;
u64 RxMulticastFramesOK;
u64 RxBroadcastFramesOK;
u64 RxPauseFrames;
u64 RxFCSErrors;
u64 RxAlignErrors;
u64 RxSymbolErrors;
u64 RxDataErrors;
u64 RxSequenceErrors;
u64 RxRuntErrors;
u64 RxJabberErrors;
u64 RxInternalMACRcvError;
u64 RxInRangeLengthErrors;
u64 RxOutOfRangeLengthField;
u64 RxFrameTooLongErrors;
};
struct cmac_ops {
void (*destroy)(struct cmac *);
int (*reset)(struct cmac *);
int (*interrupt_enable)(struct cmac *);
int (*interrupt_disable)(struct cmac *);
int (*interrupt_clear)(struct cmac *);
int (*interrupt_handler)(struct cmac *);
int (*enable)(struct cmac *, int);
int (*disable)(struct cmac *, int);
int (*loopback_enable)(struct cmac *);
int (*loopback_disable)(struct cmac *);
int (*set_mtu)(struct cmac *, int mtu);
int (*set_rx_mode)(struct cmac *, struct t1_rx_mode *rm);
int (*set_speed_duplex_fc)(struct cmac *, int speed, int duplex, int fc);
int (*get_speed_duplex_fc)(struct cmac *, int *speed, int *duplex,
int *fc);
const struct cmac_statistics *(*statistics_update)(struct cmac *, int);
int (*macaddress_get)(struct cmac *, u8 mac_addr[6]);
int (*macaddress_set)(struct cmac *, u8 mac_addr[6]);
};
typedef struct _cmac_instance cmac_instance;
struct cmac {
struct cmac_statistics stats;
adapter_t *adapter;
struct cmac_ops *ops;
cmac_instance *instance;
};
struct gmac {
unsigned int stats_update_period;
struct cmac *(*create)(adapter_t *adapter, int index);
int (*reset)(adapter_t *);
};
extern struct gmac t1_pm3393_ops;
extern struct gmac t1_chelsio_mac_ops;
extern struct gmac t1_vsc7321_ops;
extern struct gmac t1_ixf1010_ops;
extern struct gmac t1_dummy_mac_ops;
#endif /* _CXGB_GMAC_H_ */

View File

@ -0,0 +1,252 @@
/*****************************************************************************
* *
* File: mv88x201x.c *
* $Revision: 1.12 $ *
* $Date: 2005/04/15 19:27:14 $ *
* Description: *
* Marvell PHY (mv88x201x) functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#include "cphy.h"
#include "elmer0.h"
/*
* The 88x2010 Rev C. requires some link status registers * to be read
* twice in order to get the right values. Future * revisions will fix
* this problem and then this macro * can disappear.
*/
#define MV88x2010_LINK_STATUS_BUGS 1
static int led_init(struct cphy *cphy)
{
/* Setup the LED registers so we can turn on/off.
* Writing these bits maps control to another
* register. mmd(0x1) addr(0x7)
*/
mdio_write(cphy, 0x3, 0x8304, 0xdddd);
return 0;
}
static int led_link(struct cphy *cphy, u32 do_enable)
{
u32 led = 0;
#define LINK_ENABLE_BIT 0x1
mdio_read(cphy, 0x1, 0x7, &led);
if (do_enable & LINK_ENABLE_BIT) {
led |= LINK_ENABLE_BIT;
mdio_write(cphy, 0x1, 0x7, led);
} else {
led &= ~LINK_ENABLE_BIT;
mdio_write(cphy, 0x1, 0x7, led);
}
return 0;
}
/* Port Reset */
static int mv88x201x_reset(struct cphy *cphy, int wait)
{
/* This can be done through registers. It is not required since
* a full chip reset is used.
*/
return 0;
}
static int mv88x201x_interrupt_enable(struct cphy *cphy)
{
u32 elmer;
/* Enable PHY LASI interrupts. */
mdio_write(cphy, 0x1, 0x9002, 0x1);
/* Enable Marvell interrupts through Elmer0. */
t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
elmer |= ELMER0_GP_BIT6;
t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
return 0;
}
static int mv88x201x_interrupt_disable(struct cphy *cphy)
{
u32 elmer;
/* Disable PHY LASI interrupts. */
mdio_write(cphy, 0x1, 0x9002, 0x0);
/* Disable Marvell interrupts through Elmer0. */
t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
elmer &= ~ELMER0_GP_BIT6;
t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
return 0;
}
static int mv88x201x_interrupt_clear(struct cphy *cphy)
{
u32 elmer;
u32 val;
#ifdef MV88x2010_LINK_STATUS_BUGS
/* Required to read twice before clear takes affect. */
mdio_read(cphy, 0x1, 0x9003, &val);
mdio_read(cphy, 0x1, 0x9004, &val);
mdio_read(cphy, 0x1, 0x9005, &val);
/* Read this register after the others above it else
* the register doesn't clear correctly.
*/
mdio_read(cphy, 0x1, 0x1, &val);
#endif
/* Clear link status. */
mdio_read(cphy, 0x1, 0x1, &val);
/* Clear PHY LASI interrupts. */
mdio_read(cphy, 0x1, 0x9005, &val);
#ifdef MV88x2010_LINK_STATUS_BUGS
/* Do it again. */
mdio_read(cphy, 0x1, 0x9003, &val);
mdio_read(cphy, 0x1, 0x9004, &val);
#endif
/* Clear Marvell interrupts through Elmer0. */
t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
elmer |= ELMER0_GP_BIT6;
t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
return 0;
}
static int mv88x201x_interrupt_handler(struct cphy *cphy)
{
/* Clear interrupts */
mv88x201x_interrupt_clear(cphy);
/* We have only enabled link change interrupts and so
* cphy_cause must be a link change interrupt.
*/
return cphy_cause_link_change;
}
static int mv88x201x_set_loopback(struct cphy *cphy, int on)
{
return 0;
}
static int mv88x201x_get_link_status(struct cphy *cphy, int *link_ok,
int *speed, int *duplex, int *fc)
{
u32 val = 0;
#define LINK_STATUS_BIT 0x4
if (link_ok) {
/* Read link status. */
mdio_read(cphy, 0x1, 0x1, &val);
val &= LINK_STATUS_BIT;
*link_ok = (val == LINK_STATUS_BIT);
/* Turn on/off Link LED */
led_link(cphy, *link_ok);
}
if (speed)
*speed = SPEED_10000;
if (duplex)
*duplex = DUPLEX_FULL;
if (fc)
*fc = PAUSE_RX | PAUSE_TX;
return 0;
}
static void mv88x201x_destroy(struct cphy *cphy)
{
kfree(cphy);
}
static struct cphy_ops mv88x201x_ops = {
.destroy = mv88x201x_destroy,
.reset = mv88x201x_reset,
.interrupt_enable = mv88x201x_interrupt_enable,
.interrupt_disable = mv88x201x_interrupt_disable,
.interrupt_clear = mv88x201x_interrupt_clear,
.interrupt_handler = mv88x201x_interrupt_handler,
.get_link_status = mv88x201x_get_link_status,
.set_loopback = mv88x201x_set_loopback,
};
static struct cphy *mv88x201x_phy_create(adapter_t *adapter, int phy_addr,
struct mdio_ops *mdio_ops)
{
u32 val;
struct cphy *cphy = kmalloc(sizeof(*cphy), GFP_KERNEL);
if (!cphy)
return NULL;
memset(cphy, 0, sizeof(*cphy));
cphy_init(cphy, adapter, phy_addr, &mv88x201x_ops, mdio_ops);
/* Commands the PHY to enable XFP's clock. */
mdio_read(cphy, 0x3, 0x8300, &val);
mdio_write(cphy, 0x3, 0x8300, val | 1);
/* Clear link status. Required because of a bug in the PHY. */
mdio_read(cphy, 0x1, 0x8, &val);
mdio_read(cphy, 0x3, 0x8, &val);
/* Allows for Link,Ack LED turn on/off */
led_init(cphy);
return cphy;
}
/* Chip Reset */
static int mv88x201x_phy_reset(adapter_t *adapter)
{
u32 val;
t1_tpi_read(adapter, A_ELMER0_GPO, &val);
val &= ~4;
t1_tpi_write(adapter, A_ELMER0_GPO, val);
msleep(100);
t1_tpi_write(adapter, A_ELMER0_GPO, val | 4);
msleep(1000);
/* Now lets enable the Laser. Delay 100us */
t1_tpi_read(adapter, A_ELMER0_GPO, &val);
val |= 0x8000;
t1_tpi_write(adapter, A_ELMER0_GPO, val);
udelay(100);
return 0;
}
struct gphy t1_mv88x201x_ops = {
mv88x201x_phy_create,
mv88x201x_phy_reset
};

View File

@ -0,0 +1,826 @@
/*****************************************************************************
* *
* File: pm3393.c *
* $Revision: 1.16 $ *
* $Date: 2005/05/14 00:59:32 $ *
* Description: *
* PMC/SIERRA (pm3393) MAC-PHY functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#include "common.h"
#include "regs.h"
#include "gmac.h"
#include "elmer0.h"
#include "suni1x10gexp_regs.h"
/* 802.3ae 10Gb/s MDIO Manageable Device(MMD)
*/
enum {
MMD_RESERVED,
MMD_PMAPMD,
MMD_WIS,
MMD_PCS,
MMD_PHY_XGXS, /* XGMII Extender Sublayer */
MMD_DTE_XGXS,
};
enum {
PHY_XGXS_CTRL_1,
PHY_XGXS_STATUS_1
};
#define OFFSET(REG_ADDR) (REG_ADDR << 2)
/* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */
#define MAX_FRAME_SIZE 9600
#define IPG 12
#define TXXG_CONF1_VAL ((IPG << SUNI1x10GEXP_BITOFF_TXXG_IPGT) | \
SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN | SUNI1x10GEXP_BITMSK_TXXG_CRCEN | \
SUNI1x10GEXP_BITMSK_TXXG_PADEN)
#define RXXG_CONF1_VAL (SUNI1x10GEXP_BITMSK_RXXG_PUREP | 0x14 | \
SUNI1x10GEXP_BITMSK_RXXG_FLCHK | SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP)
/* Update statistics every 15 minutes */
#define STATS_TICK_SECS (15 * 60)
enum { /* RMON registers */
RxOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW,
RxUnicastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW,
RxMulticastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW,
RxBroadcastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW,
RxPAUSEMACCtrlFramesReceived = SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW,
RxFrameCheckSequenceErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW,
RxFramesLostDueToInternalMACErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW,
RxSymbolErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW,
RxInRangeLengthErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW,
RxFramesTooLongErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW,
RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW,
RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW,
RxUndersizedFrames = SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW,
TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW,
TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW,
TxTransmitSystemError = SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW,
TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW,
TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW,
TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW,
TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW
};
struct _cmac_instance {
u8 enabled;
u8 fc;
u8 mac_addr[6];
};
static int pmread(struct cmac *cmac, u32 reg, u32 * data32)
{
t1_tpi_read(cmac->adapter, OFFSET(reg), data32);
return 0;
}
static int pmwrite(struct cmac *cmac, u32 reg, u32 data32)
{
t1_tpi_write(cmac->adapter, OFFSET(reg), data32);
return 0;
}
/* Port reset. */
static int pm3393_reset(struct cmac *cmac)
{
return 0;
}
/*
* Enable interrupts for the PM3393
1. Enable PM3393 BLOCK interrupts.
2. Enable PM3393 Master Interrupt bit(INTE)
3. Enable ELMER's PM3393 bit.
4. Enable Terminator external interrupt.
*/
static int pm3393_interrupt_enable(struct cmac *cmac)
{
u32 pl_intr;
/* PM3393 - Enabling all hardware block interrupts.
*/
pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0xffff);
/* Don't interrupt on statistics overflow, we are polling */
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0xffff);
/* PM3393 - Global interrupt enable
*/
/* TBD XXX Disable for now until we figure out why error interrupts keep asserting. */
pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE,
0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ );
/* TERMINATOR - PL_INTERUPTS_EXT */
pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE);
pl_intr |= F_PL_INTR_EXT;
writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE);
return 0;
}
static int pm3393_interrupt_disable(struct cmac *cmac)
{
u32 elmer;
/* PM3393 - Enabling HW interrupt blocks. */
pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0);
pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0);
/* PM3393 - Global interrupt enable */
pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, 0);
/* ELMER - External chip interrupts. */
t1_tpi_read(cmac->adapter, A_ELMER0_INT_ENABLE, &elmer);
elmer &= ~ELMER0_GP_BIT1;
t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer);
/* TERMINATOR - PL_INTERUPTS_EXT */
/* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP
* COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level.
*/
return 0;
}
static int pm3393_interrupt_clear(struct cmac *cmac)
{
u32 elmer;
u32 pl_intr;
u32 val32;
/* PM3393 - Clearing HW interrupt blocks. Note, this assumes
* bit WCIMODE=0 for a clear-on-read.
*/
pmread(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT, &val32);
pmread(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT, &val32);
pmread(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_RXXG_INTERRUPT, &val32);
pmread(cmac, SUNI1x10GEXP_REG_TXXG_INTERRUPT, &val32);
pmread(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT, &val32);
pmread(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION,
&val32);
pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS, &val32);
pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE, &val32);
/* PM3393 - Global interrupt status
*/
pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, &val32);
/* ELMER - External chip interrupts.
*/
t1_tpi_read(cmac->adapter, A_ELMER0_INT_CAUSE, &elmer);
elmer |= ELMER0_GP_BIT1;
t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer);
/* TERMINATOR - PL_INTERUPTS_EXT
*/
pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE);
pl_intr |= F_PL_INTR_EXT;
writel(pl_intr, cmac->adapter->regs + A_PL_CAUSE);
return 0;
}
/* Interrupt handler */
static int pm3393_interrupt_handler(struct cmac *cmac)
{
u32 master_intr_status;
/*
1. Read master interrupt register.
2. Read BLOCK's interrupt status registers.
3. Handle BLOCK interrupts.
*/
/* Read the master interrupt status register. */
pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS,
&master_intr_status);
/* TBD XXX Lets just clear everything for now */
pm3393_interrupt_clear(cmac);
return 0;
}
static int pm3393_enable(struct cmac *cmac, int which)
{
if (which & MAC_DIRECTION_RX)
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1,
(RXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_RXXG_RXEN));
if (which & MAC_DIRECTION_TX) {
u32 val = TXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_TXXG_TXEN0;
if (cmac->instance->fc & PAUSE_RX)
val |= SUNI1x10GEXP_BITMSK_TXXG_FCRX;
if (cmac->instance->fc & PAUSE_TX)
val |= SUNI1x10GEXP_BITMSK_TXXG_FCTX;
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, val);
}
cmac->instance->enabled |= which;
return 0;
}
static int pm3393_enable_port(struct cmac *cmac, int which)
{
/* Clear port statistics */
pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
SUNI1x10GEXP_BITMSK_MSTAT_CLEAR);
udelay(2);
memset(&cmac->stats, 0, sizeof(struct cmac_statistics));
pm3393_enable(cmac, which);
/*
* XXX This should be done by the PHY and preferrably not at all.
* The PHY doesn't give us link status indication on its own so have
* the link management code query it instead.
*/
{
extern void link_changed(adapter_t *adapter, int port_id);
link_changed(cmac->adapter, 0);
}
return 0;
}
static int pm3393_disable(struct cmac *cmac, int which)
{
if (which & MAC_DIRECTION_RX)
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, RXXG_CONF1_VAL);
if (which & MAC_DIRECTION_TX)
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, TXXG_CONF1_VAL);
/*
* The disable is graceful. Give the PM3393 time. Can't wait very
* long here, we may be holding locks.
*/
udelay(20);
cmac->instance->enabled &= ~which;
return 0;
}
static int pm3393_loopback_enable(struct cmac *cmac)
{
return 0;
}
static int pm3393_loopback_disable(struct cmac *cmac)
{
return 0;
}
static int pm3393_set_mtu(struct cmac *cmac, int mtu)
{
int enabled = cmac->instance->enabled;
/* MAX_FRAME_SIZE includes header + FCS, mtu doesn't */
mtu += 14 + 4;
if (mtu > MAX_FRAME_SIZE)
return -EINVAL;
/* Disable Rx/Tx MAC before configuring it. */
if (enabled)
pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH, mtu);
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE, mtu);
if (enabled)
pm3393_enable(cmac, enabled);
return 0;
}
static u32 calc_crc(u8 *b, int len)
{
int i;
u32 crc = (u32)~0;
/* calculate crc one bit at a time */
while (len--) {
crc ^= *b++;
for (i = 0; i < 8; i++) {
if (crc & 0x1)
crc = (crc >> 1) ^ 0xedb88320;
else
crc = (crc >> 1);
}
}
/* reverse bits */
crc = ((crc >> 4) & 0x0f0f0f0f) | ((crc << 4) & 0xf0f0f0f0);
crc = ((crc >> 2) & 0x33333333) | ((crc << 2) & 0xcccccccc);
crc = ((crc >> 1) & 0x55555555) | ((crc << 1) & 0xaaaaaaaa);
/* swap bytes */
crc = (crc >> 16) | (crc << 16);
crc = (crc >> 8 & 0x00ff00ff) | (crc << 8 & 0xff00ff00);
return crc;
}
static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
{
int enabled = cmac->instance->enabled & MAC_DIRECTION_RX;
u32 rx_mode;
/* Disable MAC RX before reconfiguring it */
if (enabled)
pm3393_disable(cmac, MAC_DIRECTION_RX);
pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, &rx_mode);
rx_mode &= ~(SUNI1x10GEXP_BITMSK_RXXG_PMODE |
SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2,
(u16)rx_mode);
if (t1_rx_mode_promisc(rm)) {
/* Promiscuous mode. */
rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_PMODE;
}
if (t1_rx_mode_allmulti(rm)) {
/* Accept all multicast. */
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, 0xffff);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, 0xffff);
rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
} else if (t1_rx_mode_mc_cnt(rm)) {
/* Accept one or more multicast(s). */
u8 *addr;
int bit;
u16 mc_filter[4] = { 0, };
while ((addr = t1_get_next_mcaddr(rm))) {
bit = (calc_crc(addr, ETH_ALEN) >> 23) & 0x3f; /* bit[23:28] */
mc_filter[bit >> 4] |= 1 << (bit & 0xf);
}
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, mc_filter[1]);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, mc_filter[2]);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, mc_filter[3]);
rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
}
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, (u16)rx_mode);
if (enabled)
pm3393_enable(cmac, MAC_DIRECTION_RX);
return 0;
}
static int pm3393_get_speed_duplex_fc(struct cmac *cmac, int *speed,
int *duplex, int *fc)
{
if (speed)
*speed = SPEED_10000;
if (duplex)
*duplex = DUPLEX_FULL;
if (fc)
*fc = cmac->instance->fc;
return 0;
}
static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex,
int fc)
{
if (speed >= 0 && speed != SPEED_10000)
return -1;
if (duplex >= 0 && duplex != DUPLEX_FULL)
return -1;
if (fc & ~(PAUSE_TX | PAUSE_RX))
return -1;
if (fc != cmac->instance->fc) {
cmac->instance->fc = (u8) fc;
if (cmac->instance->enabled & MAC_DIRECTION_TX)
pm3393_enable(cmac, MAC_DIRECTION_TX);
}
return 0;
}
#define RMON_UPDATE(mac, name, stat_name) \
{ \
t1_tpi_read((mac)->adapter, OFFSET(name), &val0); \
t1_tpi_read((mac)->adapter, OFFSET(((name)+1)), &val1); \
t1_tpi_read((mac)->adapter, OFFSET(((name)+2)), &val2); \
(mac)->stats.stat_name = ((u64)val0 & 0xffff) | \
(((u64)val1 & 0xffff) << 16) | \
(((u64)val2 & 0xff) << 32) | \
((mac)->stats.stat_name & \
(~(u64)0 << 40)); \
if (ro & \
((name - SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2)) \
(mac)->stats.stat_name += ((u64)1 << 40); \
}
static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
int flag)
{
u64 ro;
u32 val0, val1, val2, val3;
/* Snap the counters */
pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
SUNI1x10GEXP_BITMSK_MSTAT_SNAP);
/* Counter rollover, clear on read */
pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0, &val0);
pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1, &val1);
pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2, &val2);
pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3, &val3);
ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) |
(((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48);
/* Rx stats */
RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK);
RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK);
RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK);
RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK);
RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames);
RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors);
RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors,
RxInternalMACRcvError);
RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors);
RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors);
RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
RMON_UPDATE(mac, RxFragments, RxRuntErrors);
RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
/* Tx stats */
RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError,
TxInternalMACXmitError);
RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors);
RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK);
RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
return &mac->stats;
}
static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
{
memcpy(mac_addr, cmac->instance->mac_addr, 6);
return 0;
}
static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
{
u32 val, lo, mid, hi, enabled = cmac->instance->enabled;
/*
* MAC addr: 00:07:43:00:13:09
*
* ma[5] = 0x09
* ma[4] = 0x13
* ma[3] = 0x00
* ma[2] = 0x43
* ma[1] = 0x07
* ma[0] = 0x00
*
* The PM3393 requires byte swapping and reverse order entry
* when programming MAC addresses:
*
* low_bits[15:0] = ma[1]:ma[0]
* mid_bits[31:16] = ma[3]:ma[2]
* high_bits[47:32] = ma[5]:ma[4]
*/
/* Store local copy */
memcpy(cmac->instance->mac_addr, ma, 6);
lo = ((u32) ma[1] << 8) | (u32) ma[0];
mid = ((u32) ma[3] << 8) | (u32) ma[2];
hi = ((u32) ma[5] << 8) | (u32) ma[4];
/* Disable Rx/Tx MAC before configuring it. */
if (enabled)
pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
/* Set RXXG Station Address */
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_15_0, lo);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_31_16, mid);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_47_32, hi);
/* Set TXXG Station Address */
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_15_0, lo);
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_31_16, mid);
pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_47_32, hi);
/* Setup Exact Match Filter 1 with our MAC address
*
* Must disable exact match filter before configuring it.
*/
pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, &val);
val &= 0xff0f;
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW, lo);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID, mid);
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH, hi);
val |= 0x0090;
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
if (enabled)
pm3393_enable(cmac, enabled);
return 0;
}
static void pm3393_destroy(struct cmac *cmac)
{
kfree(cmac);
}
static struct cmac_ops pm3393_ops = {
.destroy = pm3393_destroy,
.reset = pm3393_reset,
.interrupt_enable = pm3393_interrupt_enable,
.interrupt_disable = pm3393_interrupt_disable,
.interrupt_clear = pm3393_interrupt_clear,
.interrupt_handler = pm3393_interrupt_handler,
.enable = pm3393_enable_port,
.disable = pm3393_disable,
.loopback_enable = pm3393_loopback_enable,
.loopback_disable = pm3393_loopback_disable,
.set_mtu = pm3393_set_mtu,
.set_rx_mode = pm3393_set_rx_mode,
.get_speed_duplex_fc = pm3393_get_speed_duplex_fc,
.set_speed_duplex_fc = pm3393_set_speed_duplex_fc,
.statistics_update = pm3393_update_statistics,
.macaddress_get = pm3393_macaddress_get,
.macaddress_set = pm3393_macaddress_set
};
static struct cmac *pm3393_mac_create(adapter_t *adapter, int index)
{
struct cmac *cmac;
cmac = kmalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL);
if (!cmac)
return NULL;
memset(cmac, 0, sizeof(*cmac));
cmac->ops = &pm3393_ops;
cmac->instance = (cmac_instance *) (cmac + 1);
cmac->adapter = adapter;
cmac->instance->fc = PAUSE_TX | PAUSE_RX;
t1_tpi_write(adapter, OFFSET(0x0001), 0x00008000);
t1_tpi_write(adapter, OFFSET(0x0001), 0x00000000);
t1_tpi_write(adapter, OFFSET(0x2308), 0x00009800);
t1_tpi_write(adapter, OFFSET(0x2305), 0x00001001); /* PL4IO Enable */
t1_tpi_write(adapter, OFFSET(0x2320), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2321), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2322), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2323), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2324), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2325), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2326), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2327), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2328), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x2329), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232a), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232b), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232c), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232d), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232e), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x232f), 0x00008800);
t1_tpi_write(adapter, OFFSET(0x230d), 0x00009c00);
t1_tpi_write(adapter, OFFSET(0x2304), 0x00000202); /* PL4IO Calendar Repetitions */
t1_tpi_write(adapter, OFFSET(0x3200), 0x00008080); /* EFLX Enable */
t1_tpi_write(adapter, OFFSET(0x3210), 0x00000000); /* EFLX Channel Deprovision */
t1_tpi_write(adapter, OFFSET(0x3203), 0x00000000); /* EFLX Low Limit */
t1_tpi_write(adapter, OFFSET(0x3204), 0x00000040); /* EFLX High Limit */
t1_tpi_write(adapter, OFFSET(0x3205), 0x000002cc); /* EFLX Almost Full */
t1_tpi_write(adapter, OFFSET(0x3206), 0x00000199); /* EFLX Almost Empty */
t1_tpi_write(adapter, OFFSET(0x3207), 0x00000240); /* EFLX Cut Through Threshold */
t1_tpi_write(adapter, OFFSET(0x3202), 0x00000000); /* EFLX Indirect Register Update */
t1_tpi_write(adapter, OFFSET(0x3210), 0x00000001); /* EFLX Channel Provision */
t1_tpi_write(adapter, OFFSET(0x3208), 0x0000ffff); /* EFLX Undocumented */
t1_tpi_write(adapter, OFFSET(0x320a), 0x0000ffff); /* EFLX Undocumented */
t1_tpi_write(adapter, OFFSET(0x320c), 0x0000ffff); /* EFLX enable overflow interrupt The other bit are undocumented */
t1_tpi_write(adapter, OFFSET(0x320e), 0x0000ffff); /* EFLX Undocumented */
t1_tpi_write(adapter, OFFSET(0x2200), 0x0000c000); /* IFLX Configuration - enable */
t1_tpi_write(adapter, OFFSET(0x2201), 0x00000000); /* IFLX Channel Deprovision */
t1_tpi_write(adapter, OFFSET(0x220e), 0x00000000); /* IFLX Low Limit */
t1_tpi_write(adapter, OFFSET(0x220f), 0x00000100); /* IFLX High Limit */
t1_tpi_write(adapter, OFFSET(0x2210), 0x00000c00); /* IFLX Almost Full Limit */
t1_tpi_write(adapter, OFFSET(0x2211), 0x00000599); /* IFLX Almost Empty Limit */
t1_tpi_write(adapter, OFFSET(0x220d), 0x00000000); /* IFLX Indirect Register Update */
t1_tpi_write(adapter, OFFSET(0x2201), 0x00000001); /* IFLX Channel Provision */
t1_tpi_write(adapter, OFFSET(0x2203), 0x0000ffff); /* IFLX Undocumented */
t1_tpi_write(adapter, OFFSET(0x2205), 0x0000ffff); /* IFLX Undocumented */
t1_tpi_write(adapter, OFFSET(0x2209), 0x0000ffff); /* IFLX Enable overflow interrupt. The other bit are undocumented */
t1_tpi_write(adapter, OFFSET(0x2241), 0xfffffffe); /* PL4MOS Undocumented */
t1_tpi_write(adapter, OFFSET(0x2242), 0x0000ffff); /* PL4MOS Undocumented */
t1_tpi_write(adapter, OFFSET(0x2243), 0x00000008); /* PL4MOS Starving Burst Size */
t1_tpi_write(adapter, OFFSET(0x2244), 0x00000008); /* PL4MOS Hungry Burst Size */
t1_tpi_write(adapter, OFFSET(0x2245), 0x00000008); /* PL4MOS Transfer Size */
t1_tpi_write(adapter, OFFSET(0x2240), 0x00000005); /* PL4MOS Disable */
t1_tpi_write(adapter, OFFSET(0x2280), 0x00002103); /* PL4ODP Training Repeat and SOP rule */
t1_tpi_write(adapter, OFFSET(0x2284), 0x00000000); /* PL4ODP MAX_T setting */
t1_tpi_write(adapter, OFFSET(0x3280), 0x00000087); /* PL4IDU Enable data forward, port state machine. Set ALLOW_NON_ZERO_OLB */
t1_tpi_write(adapter, OFFSET(0x3282), 0x0000001f); /* PL4IDU Enable Dip4 check error interrupts */
t1_tpi_write(adapter, OFFSET(0x3040), 0x0c32); /* # TXXG Config */
/* For T1 use timer based Mac flow control. */
t1_tpi_write(adapter, OFFSET(0x304d), 0x8000);
t1_tpi_write(adapter, OFFSET(0x2040), 0x059c); /* # RXXG Config */
t1_tpi_write(adapter, OFFSET(0x2049), 0x0001); /* # RXXG Cut Through */
t1_tpi_write(adapter, OFFSET(0x2070), 0x0000); /* # Disable promiscuous mode */
/* Setup Exact Match Filter 0 to allow broadcast packets.
*/
t1_tpi_write(adapter, OFFSET(0x206e), 0x0000); /* # Disable Match Enable bit */
t1_tpi_write(adapter, OFFSET(0x204a), 0xffff); /* # low addr */
t1_tpi_write(adapter, OFFSET(0x204b), 0xffff); /* # mid addr */
t1_tpi_write(adapter, OFFSET(0x204c), 0xffff); /* # high addr */
t1_tpi_write(adapter, OFFSET(0x206e), 0x0009); /* # Enable Match Enable bit */
t1_tpi_write(adapter, OFFSET(0x0003), 0x0000); /* # NO SOP/ PAD_EN setup */
t1_tpi_write(adapter, OFFSET(0x0100), 0x0ff0); /* # RXEQB disabled */
t1_tpi_write(adapter, OFFSET(0x0101), 0x0f0f); /* # No Preemphasis */
return cmac;
}
static int pm3393_mac_reset(adapter_t * adapter)
{
u32 val;
u32 x;
u32 is_pl4_reset_finished;
u32 is_pl4_outof_lock;
u32 is_xaui_mabc_pll_locked;
u32 successful_reset;
int i;
/* The following steps are required to properly reset
* the PM3393. This information is provided in the
* PM3393 datasheet (Issue 2: November 2002)
* section 13.1 -- Device Reset.
*
* The PM3393 has three types of components that are
* individually reset:
*
* DRESETB - Digital circuitry
* PL4_ARESETB - PL4 analog circuitry
* XAUI_ARESETB - XAUI bus analog circuitry
*
* Steps to reset PM3393 using RSTB pin:
*
* 1. Assert RSTB pin low ( write 0 )
* 2. Wait at least 1ms to initiate a complete initialization of device.
* 3. Wait until all external clocks and REFSEL are stable.
* 4. Wait minimum of 1ms. (after external clocks and REFEL are stable)
* 5. De-assert RSTB ( write 1 )
* 6. Wait until internal timers to expires after ~14ms.
* - Allows analog clock synthesizer(PL4CSU) to stabilize to
* selected reference frequency before allowing the digital
* portion of the device to operate.
* 7. Wait at least 200us for XAUI interface to stabilize.
* 8. Verify the PM3393 came out of reset successfully.
* Set successful reset flag if everything worked else try again
* a few more times.
*/
successful_reset = 0;
for (i = 0; i < 3 && !successful_reset; i++) {
/* 1 */
t1_tpi_read(adapter, A_ELMER0_GPO, &val);
val &= ~1;
t1_tpi_write(adapter, A_ELMER0_GPO, val);
/* 2 */
msleep(1);
/* 3 */
msleep(1);
/* 4 */
msleep(2 /*1 extra ms for safety */ );
/* 5 */
val |= 1;
t1_tpi_write(adapter, A_ELMER0_GPO, val);
/* 6 */
msleep(15 /*1 extra ms for safety */ );
/* 7 */
msleep(1);
/* 8 */
/* Has PL4 analog block come out of reset correctly? */
t1_tpi_read(adapter, OFFSET(SUNI1x10GEXP_REG_DEVICE_STATUS), &val);
is_pl4_reset_finished = (val & SUNI1x10GEXP_BITMSK_TOP_EXPIRED);
/* TBD XXX SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL gets locked later in the init sequence
* figure out why? */
/* Have all PL4 block clocks locked? */
x = (SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL
/*| SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL */ |
SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL |
SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL |
SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL);
is_pl4_outof_lock = (val & x);
/* ??? If this fails, might be able to software reset the XAUI part
* and try to recover... thus saving us from doing another HW reset */
/* Has the XAUI MABC PLL circuitry stablized? */
is_xaui_mabc_pll_locked =
(val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED);
successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock
&& is_xaui_mabc_pll_locked);
}
return successful_reset ? 0 : 1;
}
struct gmac t1_pm3393_ops = {
STATS_TICK_SECS,
pm3393_mac_create,
pm3393_mac_reset
};

468
drivers/net/chelsio/regs.h Normal file
View File

@ -0,0 +1,468 @@
/*****************************************************************************
* *
* File: regs.h *
* $Revision: 1.8 $ *
* $Date: 2005/06/21 18:29:48 $ *
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#ifndef _CXGB_REGS_H_
#define _CXGB_REGS_H_
/* SGE registers */
#define A_SG_CONTROL 0x0
#define S_CMDQ0_ENABLE 0
#define V_CMDQ0_ENABLE(x) ((x) << S_CMDQ0_ENABLE)
#define F_CMDQ0_ENABLE V_CMDQ0_ENABLE(1U)
#define S_CMDQ1_ENABLE 1
#define V_CMDQ1_ENABLE(x) ((x) << S_CMDQ1_ENABLE)
#define F_CMDQ1_ENABLE V_CMDQ1_ENABLE(1U)
#define S_FL0_ENABLE 2
#define V_FL0_ENABLE(x) ((x) << S_FL0_ENABLE)
#define F_FL0_ENABLE V_FL0_ENABLE(1U)
#define S_FL1_ENABLE 3
#define V_FL1_ENABLE(x) ((x) << S_FL1_ENABLE)
#define F_FL1_ENABLE V_FL1_ENABLE(1U)
#define S_CPL_ENABLE 4
#define V_CPL_ENABLE(x) ((x) << S_CPL_ENABLE)
#define F_CPL_ENABLE V_CPL_ENABLE(1U)
#define S_RESPONSE_QUEUE_ENABLE 5
#define V_RESPONSE_QUEUE_ENABLE(x) ((x) << S_RESPONSE_QUEUE_ENABLE)
#define F_RESPONSE_QUEUE_ENABLE V_RESPONSE_QUEUE_ENABLE(1U)
#define S_CMDQ_PRIORITY 6
#define M_CMDQ_PRIORITY 0x3
#define V_CMDQ_PRIORITY(x) ((x) << S_CMDQ_PRIORITY)
#define G_CMDQ_PRIORITY(x) (((x) >> S_CMDQ_PRIORITY) & M_CMDQ_PRIORITY)
#define S_DISABLE_CMDQ1_GTS 9
#define V_DISABLE_CMDQ1_GTS(x) ((x) << S_DISABLE_CMDQ1_GTS)
#define F_DISABLE_CMDQ1_GTS V_DISABLE_CMDQ1_GTS(1U)
#define S_DISABLE_FL0_GTS 10
#define V_DISABLE_FL0_GTS(x) ((x) << S_DISABLE_FL0_GTS)
#define F_DISABLE_FL0_GTS V_DISABLE_FL0_GTS(1U)
#define S_DISABLE_FL1_GTS 11
#define V_DISABLE_FL1_GTS(x) ((x) << S_DISABLE_FL1_GTS)
#define F_DISABLE_FL1_GTS V_DISABLE_FL1_GTS(1U)
#define S_ENABLE_BIG_ENDIAN 12
#define V_ENABLE_BIG_ENDIAN(x) ((x) << S_ENABLE_BIG_ENDIAN)
#define F_ENABLE_BIG_ENDIAN V_ENABLE_BIG_ENDIAN(1U)
#define S_ISCSI_COALESCE 14
#define V_ISCSI_COALESCE(x) ((x) << S_ISCSI_COALESCE)
#define F_ISCSI_COALESCE V_ISCSI_COALESCE(1U)
#define S_RX_PKT_OFFSET 15
#define V_RX_PKT_OFFSET(x) ((x) << S_RX_PKT_OFFSET)
#define S_VLAN_XTRACT 18
#define V_VLAN_XTRACT(x) ((x) << S_VLAN_XTRACT)
#define F_VLAN_XTRACT V_VLAN_XTRACT(1U)
#define A_SG_DOORBELL 0x4
#define A_SG_CMD0BASELWR 0x8
#define A_SG_CMD0BASEUPR 0xc
#define A_SG_CMD1BASELWR 0x10
#define A_SG_CMD1BASEUPR 0x14
#define A_SG_FL0BASELWR 0x18
#define A_SG_FL0BASEUPR 0x1c
#define A_SG_FL1BASELWR 0x20
#define A_SG_FL1BASEUPR 0x24
#define A_SG_CMD0SIZE 0x28
#define A_SG_FL0SIZE 0x2c
#define A_SG_RSPSIZE 0x30
#define A_SG_RSPBASELWR 0x34
#define A_SG_RSPBASEUPR 0x38
#define A_SG_FLTHRESHOLD 0x3c
#define A_SG_RSPQUEUECREDIT 0x40
#define A_SG_SLEEPING 0x48
#define A_SG_INTRTIMER 0x4c
#define A_SG_CMD1SIZE 0xb0
#define A_SG_FL1SIZE 0xb4
#define A_SG_INT_ENABLE 0xb8
#define S_RESPQ_EXHAUSTED 0
#define V_RESPQ_EXHAUSTED(x) ((x) << S_RESPQ_EXHAUSTED)
#define F_RESPQ_EXHAUSTED V_RESPQ_EXHAUSTED(1U)
#define S_RESPQ_OVERFLOW 1
#define V_RESPQ_OVERFLOW(x) ((x) << S_RESPQ_OVERFLOW)
#define F_RESPQ_OVERFLOW V_RESPQ_OVERFLOW(1U)
#define S_FL_EXHAUSTED 2
#define V_FL_EXHAUSTED(x) ((x) << S_FL_EXHAUSTED)
#define F_FL_EXHAUSTED V_FL_EXHAUSTED(1U)
#define S_PACKET_TOO_BIG 3
#define V_PACKET_TOO_BIG(x) ((x) << S_PACKET_TOO_BIG)
#define F_PACKET_TOO_BIG V_PACKET_TOO_BIG(1U)
#define S_PACKET_MISMATCH 4
#define V_PACKET_MISMATCH(x) ((x) << S_PACKET_MISMATCH)
#define F_PACKET_MISMATCH V_PACKET_MISMATCH(1U)
#define A_SG_INT_CAUSE 0xbc
#define A_SG_RESPACCUTIMER 0xc0
/* MC3 registers */
#define S_READY 1
#define V_READY(x) ((x) << S_READY)
#define F_READY V_READY(1U)
/* MC4 registers */
#define A_MC4_CFG 0x180
#define S_MC4_SLOW 25
#define V_MC4_SLOW(x) ((x) << S_MC4_SLOW)
#define F_MC4_SLOW V_MC4_SLOW(1U)
/* TPI registers */
#define A_TPI_ADDR 0x280
#define A_TPI_WR_DATA 0x284
#define A_TPI_RD_DATA 0x288
#define A_TPI_CSR 0x28c
#define S_TPIWR 0
#define V_TPIWR(x) ((x) << S_TPIWR)
#define F_TPIWR V_TPIWR(1U)
#define S_TPIRDY 1
#define V_TPIRDY(x) ((x) << S_TPIRDY)
#define F_TPIRDY V_TPIRDY(1U)
#define A_TPI_PAR 0x29c
#define S_TPIPAR 0
#define M_TPIPAR 0x7f
#define V_TPIPAR(x) ((x) << S_TPIPAR)
#define G_TPIPAR(x) (((x) >> S_TPIPAR) & M_TPIPAR)
/* TP registers */
#define A_TP_IN_CONFIG 0x300
#define S_TP_IN_CSPI_CPL 3
#define V_TP_IN_CSPI_CPL(x) ((x) << S_TP_IN_CSPI_CPL)
#define F_TP_IN_CSPI_CPL V_TP_IN_CSPI_CPL(1U)
#define S_TP_IN_CSPI_CHECK_IP_CSUM 5
#define V_TP_IN_CSPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_IP_CSUM)
#define F_TP_IN_CSPI_CHECK_IP_CSUM V_TP_IN_CSPI_CHECK_IP_CSUM(1U)
#define S_TP_IN_CSPI_CHECK_TCP_CSUM 6
#define V_TP_IN_CSPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_TCP_CSUM)
#define F_TP_IN_CSPI_CHECK_TCP_CSUM V_TP_IN_CSPI_CHECK_TCP_CSUM(1U)
#define S_TP_IN_ESPI_ETHERNET 8
#define V_TP_IN_ESPI_ETHERNET(x) ((x) << S_TP_IN_ESPI_ETHERNET)
#define F_TP_IN_ESPI_ETHERNET V_TP_IN_ESPI_ETHERNET(1U)
#define S_TP_IN_ESPI_CHECK_IP_CSUM 12
#define V_TP_IN_ESPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_IP_CSUM)
#define F_TP_IN_ESPI_CHECK_IP_CSUM V_TP_IN_ESPI_CHECK_IP_CSUM(1U)
#define S_TP_IN_ESPI_CHECK_TCP_CSUM 13
#define V_TP_IN_ESPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_TCP_CSUM)
#define F_TP_IN_ESPI_CHECK_TCP_CSUM V_TP_IN_ESPI_CHECK_TCP_CSUM(1U)
#define S_OFFLOAD_DISABLE 14
#define V_OFFLOAD_DISABLE(x) ((x) << S_OFFLOAD_DISABLE)
#define F_OFFLOAD_DISABLE V_OFFLOAD_DISABLE(1U)
#define A_TP_OUT_CONFIG 0x304
#define S_TP_OUT_CSPI_CPL 2
#define V_TP_OUT_CSPI_CPL(x) ((x) << S_TP_OUT_CSPI_CPL)
#define F_TP_OUT_CSPI_CPL V_TP_OUT_CSPI_CPL(1U)
#define S_TP_OUT_ESPI_ETHERNET 6
#define V_TP_OUT_ESPI_ETHERNET(x) ((x) << S_TP_OUT_ESPI_ETHERNET)
#define F_TP_OUT_ESPI_ETHERNET V_TP_OUT_ESPI_ETHERNET(1U)
#define S_TP_OUT_ESPI_GENERATE_IP_CSUM 10
#define V_TP_OUT_ESPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_IP_CSUM)
#define F_TP_OUT_ESPI_GENERATE_IP_CSUM V_TP_OUT_ESPI_GENERATE_IP_CSUM(1U)
#define S_TP_OUT_ESPI_GENERATE_TCP_CSUM 11
#define V_TP_OUT_ESPI_GENERATE_TCP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_TCP_CSUM)
#define F_TP_OUT_ESPI_GENERATE_TCP_CSUM V_TP_OUT_ESPI_GENERATE_TCP_CSUM(1U)
#define A_TP_GLOBAL_CONFIG 0x308
#define S_IP_TTL 0
#define M_IP_TTL 0xff
#define V_IP_TTL(x) ((x) << S_IP_TTL)
#define S_TCP_CSUM 11
#define V_TCP_CSUM(x) ((x) << S_TCP_CSUM)
#define F_TCP_CSUM V_TCP_CSUM(1U)
#define S_UDP_CSUM 12
#define V_UDP_CSUM(x) ((x) << S_UDP_CSUM)
#define F_UDP_CSUM V_UDP_CSUM(1U)
#define S_IP_CSUM 13
#define V_IP_CSUM(x) ((x) << S_IP_CSUM)
#define F_IP_CSUM V_IP_CSUM(1U)
#define S_PATH_MTU 15
#define V_PATH_MTU(x) ((x) << S_PATH_MTU)
#define F_PATH_MTU V_PATH_MTU(1U)
#define S_5TUPLE_LOOKUP 17
#define V_5TUPLE_LOOKUP(x) ((x) << S_5TUPLE_LOOKUP)
#define S_SYN_COOKIE_PARAMETER 26
#define V_SYN_COOKIE_PARAMETER(x) ((x) << S_SYN_COOKIE_PARAMETER)
#define A_TP_PC_CONFIG 0x348
#define S_DIS_TX_FILL_WIN_PUSH 12
#define V_DIS_TX_FILL_WIN_PUSH(x) ((x) << S_DIS_TX_FILL_WIN_PUSH)
#define F_DIS_TX_FILL_WIN_PUSH V_DIS_TX_FILL_WIN_PUSH(1U)
#define S_TP_PC_REV 30
#define M_TP_PC_REV 0x3
#define G_TP_PC_REV(x) (((x) >> S_TP_PC_REV) & M_TP_PC_REV)
#define A_TP_RESET 0x44c
#define S_TP_RESET 0
#define V_TP_RESET(x) ((x) << S_TP_RESET)
#define F_TP_RESET V_TP_RESET(1U)
#define A_TP_INT_ENABLE 0x470
#define A_TP_INT_CAUSE 0x474
#define A_TP_TX_DROP_CONFIG 0x4b8
#define S_ENABLE_TX_DROP 31
#define V_ENABLE_TX_DROP(x) ((x) << S_ENABLE_TX_DROP)
#define F_ENABLE_TX_DROP V_ENABLE_TX_DROP(1U)
#define S_ENABLE_TX_ERROR 30
#define V_ENABLE_TX_ERROR(x) ((x) << S_ENABLE_TX_ERROR)
#define F_ENABLE_TX_ERROR V_ENABLE_TX_ERROR(1U)
#define S_DROP_TICKS_CNT 4
#define V_DROP_TICKS_CNT(x) ((x) << S_DROP_TICKS_CNT)
#define S_NUM_PKTS_DROPPED 0
#define V_NUM_PKTS_DROPPED(x) ((x) << S_NUM_PKTS_DROPPED)
/* CSPI registers */
#define S_DIP4ERR 0
#define V_DIP4ERR(x) ((x) << S_DIP4ERR)
#define F_DIP4ERR V_DIP4ERR(1U)
#define S_RXDROP 1
#define V_RXDROP(x) ((x) << S_RXDROP)
#define F_RXDROP V_RXDROP(1U)
#define S_TXDROP 2
#define V_TXDROP(x) ((x) << S_TXDROP)
#define F_TXDROP V_TXDROP(1U)
#define S_RXOVERFLOW 3
#define V_RXOVERFLOW(x) ((x) << S_RXOVERFLOW)
#define F_RXOVERFLOW V_RXOVERFLOW(1U)
#define S_RAMPARITYERR 4
#define V_RAMPARITYERR(x) ((x) << S_RAMPARITYERR)
#define F_RAMPARITYERR V_RAMPARITYERR(1U)
/* ESPI registers */
#define A_ESPI_SCH_TOKEN0 0x880
#define A_ESPI_SCH_TOKEN1 0x884
#define A_ESPI_SCH_TOKEN2 0x888
#define A_ESPI_SCH_TOKEN3 0x88c
#define A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK 0x890
#define A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK 0x894
#define A_ESPI_CALENDAR_LENGTH 0x898
#define A_PORT_CONFIG 0x89c
#define S_RX_NPORTS 0
#define V_RX_NPORTS(x) ((x) << S_RX_NPORTS)
#define S_TX_NPORTS 8
#define V_TX_NPORTS(x) ((x) << S_TX_NPORTS)
#define A_ESPI_FIFO_STATUS_ENABLE 0x8a0
#define S_RXSTATUSENABLE 0
#define V_RXSTATUSENABLE(x) ((x) << S_RXSTATUSENABLE)
#define F_RXSTATUSENABLE V_RXSTATUSENABLE(1U)
#define S_INTEL1010MODE 4
#define V_INTEL1010MODE(x) ((x) << S_INTEL1010MODE)
#define F_INTEL1010MODE V_INTEL1010MODE(1U)
#define A_ESPI_MAXBURST1_MAXBURST2 0x8a8
#define A_ESPI_TRAIN 0x8ac
#define A_ESPI_INTR_STATUS 0x8c8
#define S_DIP2PARITYERR 5
#define V_DIP2PARITYERR(x) ((x) << S_DIP2PARITYERR)
#define F_DIP2PARITYERR V_DIP2PARITYERR(1U)
#define A_ESPI_INTR_ENABLE 0x8cc
#define A_RX_DROP_THRESHOLD 0x8d0
#define A_ESPI_RX_RESET 0x8ec
#define A_ESPI_MISC_CONTROL 0x8f0
#define S_OUT_OF_SYNC_COUNT 0
#define V_OUT_OF_SYNC_COUNT(x) ((x) << S_OUT_OF_SYNC_COUNT)
#define S_DIP2_PARITY_ERR_THRES 5
#define V_DIP2_PARITY_ERR_THRES(x) ((x) << S_DIP2_PARITY_ERR_THRES)
#define S_DIP4_THRES 9
#define V_DIP4_THRES(x) ((x) << S_DIP4_THRES)
#define S_MONITORED_PORT_NUM 25
#define V_MONITORED_PORT_NUM(x) ((x) << S_MONITORED_PORT_NUM)
#define S_MONITORED_DIRECTION 27
#define V_MONITORED_DIRECTION(x) ((x) << S_MONITORED_DIRECTION)
#define F_MONITORED_DIRECTION V_MONITORED_DIRECTION(1U)
#define S_MONITORED_INTERFACE 28
#define V_MONITORED_INTERFACE(x) ((x) << S_MONITORED_INTERFACE)
#define F_MONITORED_INTERFACE V_MONITORED_INTERFACE(1U)
#define A_ESPI_DIP2_ERR_COUNT 0x8f4
#define A_ESPI_CMD_ADDR 0x8f8
#define S_WRITE_DATA 0
#define V_WRITE_DATA(x) ((x) << S_WRITE_DATA)
#define S_REGISTER_OFFSET 8
#define V_REGISTER_OFFSET(x) ((x) << S_REGISTER_OFFSET)
#define S_CHANNEL_ADDR 12
#define V_CHANNEL_ADDR(x) ((x) << S_CHANNEL_ADDR)
#define S_MODULE_ADDR 16
#define V_MODULE_ADDR(x) ((x) << S_MODULE_ADDR)
#define S_BUNDLE_ADDR 20
#define V_BUNDLE_ADDR(x) ((x) << S_BUNDLE_ADDR)
#define S_SPI4_COMMAND 24
#define V_SPI4_COMMAND(x) ((x) << S_SPI4_COMMAND)
#define A_ESPI_GOSTAT 0x8fc
#define S_ESPI_CMD_BUSY 8
#define V_ESPI_CMD_BUSY(x) ((x) << S_ESPI_CMD_BUSY)
#define F_ESPI_CMD_BUSY V_ESPI_CMD_BUSY(1U)
/* PL registers */
#define A_PL_ENABLE 0xa00
#define S_PL_INTR_SGE_ERR 0
#define V_PL_INTR_SGE_ERR(x) ((x) << S_PL_INTR_SGE_ERR)
#define F_PL_INTR_SGE_ERR V_PL_INTR_SGE_ERR(1U)
#define S_PL_INTR_SGE_DATA 1
#define V_PL_INTR_SGE_DATA(x) ((x) << S_PL_INTR_SGE_DATA)
#define F_PL_INTR_SGE_DATA V_PL_INTR_SGE_DATA(1U)
#define S_PL_INTR_TP 6
#define V_PL_INTR_TP(x) ((x) << S_PL_INTR_TP)
#define F_PL_INTR_TP V_PL_INTR_TP(1U)
#define S_PL_INTR_ESPI 8
#define V_PL_INTR_ESPI(x) ((x) << S_PL_INTR_ESPI)
#define F_PL_INTR_ESPI V_PL_INTR_ESPI(1U)
#define S_PL_INTR_PCIX 10
#define V_PL_INTR_PCIX(x) ((x) << S_PL_INTR_PCIX)
#define F_PL_INTR_PCIX V_PL_INTR_PCIX(1U)
#define S_PL_INTR_EXT 11
#define V_PL_INTR_EXT(x) ((x) << S_PL_INTR_EXT)
#define F_PL_INTR_EXT V_PL_INTR_EXT(1U)
#define A_PL_CAUSE 0xa04
/* MC5 registers */
#define A_MC5_CONFIG 0xc04
#define S_TCAM_RESET 1
#define V_TCAM_RESET(x) ((x) << S_TCAM_RESET)
#define F_TCAM_RESET V_TCAM_RESET(1U)
#define S_M_BUS_ENABLE 5
#define V_M_BUS_ENABLE(x) ((x) << S_M_BUS_ENABLE)
#define F_M_BUS_ENABLE V_M_BUS_ENABLE(1U)
/* PCICFG registers */
#define A_PCICFG_PM_CSR 0x44
#define A_PCICFG_VPD_ADDR 0x4a
#define S_VPD_OP_FLAG 15
#define V_VPD_OP_FLAG(x) ((x) << S_VPD_OP_FLAG)
#define F_VPD_OP_FLAG V_VPD_OP_FLAG(1U)
#define A_PCICFG_VPD_DATA 0x4c
#define A_PCICFG_INTR_ENABLE 0xf4
#define A_PCICFG_INTR_CAUSE 0xf8
#define A_PCICFG_MODE 0xfc
#define S_PCI_MODE_64BIT 0
#define V_PCI_MODE_64BIT(x) ((x) << S_PCI_MODE_64BIT)
#define F_PCI_MODE_64BIT V_PCI_MODE_64BIT(1U)
#define S_PCI_MODE_PCIX 5
#define V_PCI_MODE_PCIX(x) ((x) << S_PCI_MODE_PCIX)
#define F_PCI_MODE_PCIX V_PCI_MODE_PCIX(1U)
#define S_PCI_MODE_CLK 6
#define M_PCI_MODE_CLK 0x3
#define G_PCI_MODE_CLK(x) (((x) >> S_PCI_MODE_CLK) & M_PCI_MODE_CLK)
#endif /* _CXGB_REGS_H_ */

1684
drivers/net/chelsio/sge.c Normal file

File diff suppressed because it is too large Load Diff

105
drivers/net/chelsio/sge.h Normal file
View File

@ -0,0 +1,105 @@
/*****************************************************************************
* *
* File: sge.h *
* $Revision: 1.11 $ *
* $Date: 2005/06/21 22:10:55 $ *
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#ifndef _CXGB_SGE_H_
#define _CXGB_SGE_H_
#include <linux/types.h>
#include <linux/interrupt.h>
#include <asm/byteorder.h>
#ifndef IRQ_RETVAL
#define IRQ_RETVAL(x)
typedef void irqreturn_t;
#endif
typedef irqreturn_t (*intr_handler_t)(int, void *, struct pt_regs *);
struct sge_intr_counts {
unsigned int respQ_empty; /* # times respQ empty */
unsigned int respQ_overflow; /* # respQ overflow (fatal) */
unsigned int freelistQ_empty; /* # times freelist empty */
unsigned int pkt_too_big; /* packet too large (fatal) */
unsigned int pkt_mismatch;
unsigned int cmdQ_full[3]; /* not HW IRQ, host cmdQ[] full */
unsigned int cmdQ_restarted[3];/* # of times cmdQ X was restarted */
unsigned int ethernet_pkts; /* # of Ethernet packets received */
unsigned int offload_pkts; /* # of offload packets received */
unsigned int offload_bundles; /* # of offload pkt bundles delivered */
unsigned int pure_rsps; /* # of non-payload responses */
unsigned int unhandled_irqs; /* # of unhandled interrupts */
unsigned int tx_ipfrags;
unsigned int tx_reg_pkts;
unsigned int tx_lso_pkts;
unsigned int tx_do_cksum;
};
struct sge_port_stats {
unsigned long rx_cso_good; /* # of successful RX csum offloads */
unsigned long tx_cso; /* # of TX checksum offloads */
unsigned long vlan_xtract; /* # of VLAN tag extractions */
unsigned long vlan_insert; /* # of VLAN tag extractions */
unsigned long tso; /* # of TSO requests */
unsigned long rx_drops; /* # of packets dropped due to no mem */
};
struct sk_buff;
struct net_device;
struct adapter;
struct sge_params;
struct sge;
struct sge *t1_sge_create(struct adapter *, struct sge_params *);
int t1_sge_configure(struct sge *, struct sge_params *);
int t1_sge_set_coalesce_params(struct sge *, struct sge_params *);
void t1_sge_destroy(struct sge *);
intr_handler_t t1_select_intr_handler(adapter_t *adapter);
unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
unsigned int qid, struct net_device *netdev);
int t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
void t1_set_vlan_accel(struct adapter *adapter, int on_off);
void t1_sge_start(struct sge *);
void t1_sge_stop(struct sge *);
int t1_sge_intr_error_handler(struct sge *);
void t1_sge_intr_enable(struct sge *);
void t1_sge_intr_disable(struct sge *);
void t1_sge_intr_clear(struct sge *);
const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge);
const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port);
#endif /* _CXGB_SGE_H_ */

812
drivers/net/chelsio/subr.c Normal file
View File

@ -0,0 +1,812 @@
/*****************************************************************************
* *
* File: subr.c *
* $Revision: 1.27 $ *
* $Date: 2005/06/22 01:08:36 $ *
* Description: *
* Various subroutines (intr,pio,etc.) used by Chelsio 10G Ethernet driver. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#include "common.h"
#include "elmer0.h"
#include "regs.h"
#include "gmac.h"
#include "cphy.h"
#include "sge.h"
#include "espi.h"
/**
* t1_wait_op_done - wait until an operation is completed
* @adapter: the adapter performing the operation
* @reg: the register to check for completion
* @mask: a single-bit field within @reg that indicates completion
* @polarity: the value of the field when the operation is completed
* @attempts: number of check iterations
* @delay: delay in usecs between iterations
*
* Wait until an operation is completed by checking a bit in a register
* up to @attempts times. Returns %0 if the operation completes and %1
* otherwise.
*/
static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity,
int attempts, int delay)
{
while (1) {
u32 val = readl(adapter->regs + reg) & mask;
if (!!val == polarity)
return 0;
if (--attempts == 0)
return 1;
if (delay)
udelay(delay);
}
}
#define TPI_ATTEMPTS 50
/*
* Write a register over the TPI interface (unlocked and locked versions).
*/
static int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
{
int tpi_busy;
writel(addr, adapter->regs + A_TPI_ADDR);
writel(value, adapter->regs + A_TPI_WR_DATA);
writel(F_TPIWR, adapter->regs + A_TPI_CSR);
tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
TPI_ATTEMPTS, 3);
if (tpi_busy)
CH_ALERT("%s: TPI write to 0x%x failed\n",
adapter->name, addr);
return tpi_busy;
}
int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
{
int ret;
spin_lock(&(adapter)->tpi_lock);
ret = __t1_tpi_write(adapter, addr, value);
spin_unlock(&(adapter)->tpi_lock);
return ret;
}
/*
* Read a register over the TPI interface (unlocked and locked versions).
*/
static int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
{
int tpi_busy;
writel(addr, adapter->regs + A_TPI_ADDR);
writel(0, adapter->regs + A_TPI_CSR);
tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
TPI_ATTEMPTS, 3);
if (tpi_busy)
CH_ALERT("%s: TPI read from 0x%x failed\n",
adapter->name, addr);
else
*valp = readl(adapter->regs + A_TPI_RD_DATA);
return tpi_busy;
}
int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
{
int ret;
spin_lock(&(adapter)->tpi_lock);
ret = __t1_tpi_read(adapter, addr, valp);
spin_unlock(&(adapter)->tpi_lock);
return ret;
}
/*
* Called when a port's link settings change to propagate the new values to the
* associated PHY and MAC. After performing the common tasks it invokes an
* OS-specific handler.
*/
/* static */ void link_changed(adapter_t *adapter, int port_id)
{
int link_ok, speed, duplex, fc;
struct cphy *phy = adapter->port[port_id].phy;
struct link_config *lc = &adapter->port[port_id].link_config;
phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
lc->speed = speed < 0 ? SPEED_INVALID : speed;
lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
if (!(lc->requested_fc & PAUSE_AUTONEG))
fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
/* Set MAC speed, duplex, and flow control to match PHY. */
struct cmac *mac = adapter->port[port_id].mac;
mac->ops->set_speed_duplex_fc(mac, speed, duplex, fc);
lc->fc = (unsigned char)fc;
}
t1_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
}
static int t1_pci_intr_handler(adapter_t *adapter)
{
u32 pcix_cause;
pci_read_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, &pcix_cause);
if (pcix_cause) {
pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE,
pcix_cause);
t1_fatal_err(adapter); /* PCI errors are fatal */
}
return 0;
}
/*
* Wait until Elmer's MI1 interface is ready for new operations.
*/
static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg)
{
int attempts = 100, busy;
do {
u32 val;
__t1_tpi_read(adapter, mi1_reg, &val);
busy = val & F_MI1_OP_BUSY;
if (busy)
udelay(10);
} while (busy && --attempts);
if (busy)
CH_ALERT("%s: MDIO operation timed out\n",
adapter->name);
return busy;
}
/*
* MI1 MDIO initialization.
*/
static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi)
{
u32 clkdiv = bi->clock_elmer0 / (2 * bi->mdio_mdc) - 1;
u32 val = F_MI1_PREAMBLE_ENABLE | V_MI1_MDI_INVERT(bi->mdio_mdiinv) |
V_MI1_MDI_ENABLE(bi->mdio_mdien) | V_MI1_CLK_DIV(clkdiv);
if (!(bi->caps & SUPPORTED_10000baseT_Full))
val |= V_MI1_SOF(1);
t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val);
}
static int mi1_mdio_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int *valp)
{
u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
spin_lock(&(adapter)->tpi_lock);
/* Write the address we want. */
__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr);
__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP,
MI1_OP_INDIRECT_ADDRESS);
mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
/* Write the operation we want. */
__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ);
mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
/* Read the data. */
__t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp);
spin_unlock(&(adapter)->tpi_lock);
return 0;
}
static int mi1_mdio_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int val)
{
u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
spin_lock(&(adapter)->tpi_lock);
/* Write the address we want. */
__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr);
__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP,
MI1_OP_INDIRECT_ADDRESS);
mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
/* Write the data. */
__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val);
__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE);
mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
spin_unlock(&(adapter)->tpi_lock);
return 0;
}
static struct mdio_ops mi1_mdio_ext_ops = {
mi1_mdio_init,
mi1_mdio_ext_read,
mi1_mdio_ext_write
};
enum {
CH_BRD_N110_1F,
CH_BRD_N210_1F,
};
static struct board_info t1_board[] = {
{ CHBT_BOARD_N110, 1/*ports#*/,
SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T1,
CHBT_MAC_PM3393, CHBT_PHY_88X2010,
125000000/*clk-core*/, 0/*clk-mc3*/, 0/*clk-mc4*/,
1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/,
0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops,
&t1_mv88x201x_ops, &mi1_mdio_ext_ops,
"Chelsio N110 1x10GBaseX NIC" },
{ CHBT_BOARD_N210, 1/*ports#*/,
SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T2,
CHBT_MAC_PM3393, CHBT_PHY_88X2010,
125000000/*clk-core*/, 0/*clk-mc3*/, 0/*clk-mc4*/,
1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/,
0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops,
&t1_mv88x201x_ops, &mi1_mdio_ext_ops,
"Chelsio N210 1x10GBaseX NIC" },
};
struct pci_device_id t1_pci_tbl[] = {
CH_DEVICE(7, 0, CH_BRD_N110_1F),
CH_DEVICE(10, 1, CH_BRD_N210_1F),
{ 0, }
};
MODULE_DEVICE_TABLE(pci, t1_pci_tbl);
/*
* Return the board_info structure with a given index. Out-of-range indices
* return NULL.
*/
const struct board_info *t1_get_board_info(unsigned int board_id)
{
return board_id < ARRAY_SIZE(t1_board) ? &t1_board[board_id] : NULL;
}
struct chelsio_vpd_t {
u32 format_version;
u8 serial_number[16];
u8 mac_base_address[6];
u8 pad[2]; /* make multiple-of-4 size requirement explicit */
};
#define EEPROMSIZE (8 * 1024)
#define EEPROM_MAX_POLL 4
/*
* Read SEEPROM. A zero is written to the flag register when the addres is
* written to the Control register. The hardware device will set the flag to a
* one when 4B have been transferred to the Data register.
*/
int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
{
int i = EEPROM_MAX_POLL;
u16 val;
if (addr >= EEPROMSIZE || (addr & 3))
return -EINVAL;
pci_write_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, (u16)addr);
do {
udelay(50);
pci_read_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, &val);
} while (!(val & F_VPD_OP_FLAG) && --i);
if (!(val & F_VPD_OP_FLAG)) {
CH_ERR("%s: reading EEPROM address 0x%x failed\n",
adapter->name, addr);
return -EIO;
}
pci_read_config_dword(adapter->pdev, A_PCICFG_VPD_DATA, data);
*data = le32_to_cpu(*data);
return 0;
}
static int t1_eeprom_vpd_get(adapter_t *adapter, struct chelsio_vpd_t *vpd)
{
int addr, ret = 0;
for (addr = 0; !ret && addr < sizeof(*vpd); addr += sizeof(u32))
ret = t1_seeprom_read(adapter, addr,
(u32 *)((u8 *)vpd + addr));
return ret;
}
/*
* Read a port's MAC address from the VPD ROM.
*/
static int vpd_macaddress_get(adapter_t *adapter, int index, u8 mac_addr[])
{
struct chelsio_vpd_t vpd;
if (t1_eeprom_vpd_get(adapter, &vpd))
return 1;
memcpy(mac_addr, vpd.mac_base_address, 5);
mac_addr[5] = vpd.mac_base_address[5] + index;
return 0;
}
/*
* Set up the MAC/PHY according to the requested link settings.
*
* If the PHY can auto-negotiate first decide what to advertise, then
* enable/disable auto-negotiation as desired and reset.
*
* If the PHY does not auto-negotiate we just reset it.
*
* If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
* otherwise do it later based on the outcome of auto-negotiation.
*/
int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
{
unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
if (lc->supported & SUPPORTED_Autoneg) {
lc->advertising &= ~(ADVERTISED_ASYM_PAUSE | ADVERTISED_PAUSE);
if (fc) {
lc->advertising |= ADVERTISED_ASYM_PAUSE;
if (fc == (PAUSE_RX | PAUSE_TX))
lc->advertising |= ADVERTISED_PAUSE;
}
phy->ops->advertise(phy, lc->advertising);
if (lc->autoneg == AUTONEG_DISABLE) {
lc->speed = lc->requested_speed;
lc->duplex = lc->requested_duplex;
lc->fc = (unsigned char)fc;
mac->ops->set_speed_duplex_fc(mac, lc->speed,
lc->duplex, fc);
/* Also disables autoneg */
phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
phy->ops->reset(phy, 0);
} else
phy->ops->autoneg_enable(phy); /* also resets PHY */
} else {
mac->ops->set_speed_duplex_fc(mac, -1, -1, fc);
lc->fc = (unsigned char)fc;
phy->ops->reset(phy, 0);
}
return 0;
}
/*
* External interrupt handler for boards using elmer0.
*/
int elmer0_ext_intr_handler(adapter_t *adapter)
{
struct cphy *phy;
int phy_cause;
u32 cause;
t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause);
switch (board_info(adapter)->board) {
case CHBT_BOARD_N210:
case CHBT_BOARD_N110:
if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */
phy = adapter->port[0].phy;
phy_cause = phy->ops->interrupt_handler(phy);
if (phy_cause & cphy_cause_link_change)
link_changed(adapter, 0);
}
break;
}
t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause);
return 0;
}
/* Enables all interrupts. */
void t1_interrupts_enable(adapter_t *adapter)
{
unsigned int i;
u32 pl_intr;
adapter->slow_intr_mask = F_PL_INTR_SGE_ERR;
t1_sge_intr_enable(adapter->sge);
if (adapter->espi) {
adapter->slow_intr_mask |= F_PL_INTR_ESPI;
t1_espi_intr_enable(adapter->espi);
}
/* Enable MAC/PHY interrupts for each port. */
for_each_port(adapter, i) {
adapter->port[i].mac->ops->interrupt_enable(adapter->port[i].mac);
adapter->port[i].phy->ops->interrupt_enable(adapter->port[i].phy);
}
/* Enable PCIX & external chip interrupts on ASIC boards. */
pl_intr = readl(adapter->regs + A_PL_ENABLE);
/* PCI-X interrupts */
pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE,
0xffffffff);
adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
writel(pl_intr, adapter->regs + A_PL_ENABLE);
}
/* Disables all interrupts. */
void t1_interrupts_disable(adapter_t* adapter)
{
unsigned int i;
t1_sge_intr_disable(adapter->sge);
if (adapter->espi)
t1_espi_intr_disable(adapter->espi);
/* Disable MAC/PHY interrupts for each port. */
for_each_port(adapter, i) {
adapter->port[i].mac->ops->interrupt_disable(adapter->port[i].mac);
adapter->port[i].phy->ops->interrupt_disable(adapter->port[i].phy);
}
/* Disable PCIX & external chip interrupts. */
writel(0, adapter->regs + A_PL_ENABLE);
/* PCI-X interrupts */
pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0);
adapter->slow_intr_mask = 0;
}
/* Clears all interrupts */
void t1_interrupts_clear(adapter_t* adapter)
{
unsigned int i;
u32 pl_intr;
t1_sge_intr_clear(adapter->sge);
if (adapter->espi)
t1_espi_intr_clear(adapter->espi);
/* Clear MAC/PHY interrupts for each port. */
for_each_port(adapter, i) {
adapter->port[i].mac->ops->interrupt_clear(adapter->port[i].mac);
adapter->port[i].phy->ops->interrupt_clear(adapter->port[i].phy);
}
/* Enable interrupts for external devices. */
pl_intr = readl(adapter->regs + A_PL_CAUSE);
writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX,
adapter->regs + A_PL_CAUSE);
/* PCI-X interrupts */
pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff);
}
/*
* Slow path interrupt handler for ASICs.
*/
int t1_slow_intr_handler(adapter_t *adapter)
{
u32 cause = readl(adapter->regs + A_PL_CAUSE);
cause &= adapter->slow_intr_mask;
if (!cause)
return 0;
if (cause & F_PL_INTR_SGE_ERR)
t1_sge_intr_error_handler(adapter->sge);
if (cause & F_PL_INTR_ESPI)
t1_espi_intr_handler(adapter->espi);
if (cause & F_PL_INTR_PCIX)
t1_pci_intr_handler(adapter);
if (cause & F_PL_INTR_EXT)
t1_elmer0_ext_intr(adapter);
/* Clear the interrupts just processed. */
writel(cause, adapter->regs + A_PL_CAUSE);
(void)readl(adapter->regs + A_PL_CAUSE); /* flush writes */
return 1;
}
/* Pause deadlock avoidance parameters */
#define DROP_MSEC 16
#define DROP_PKTS_CNT 1
static void set_csum_offload(adapter_t *adapter, u32 csum_bit, int enable)
{
u32 val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
if (enable)
val |= csum_bit;
else
val &= ~csum_bit;
writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
}
void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable)
{
set_csum_offload(adapter, F_IP_CSUM, enable);
}
void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable)
{
set_csum_offload(adapter, F_UDP_CSUM, enable);
}
void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable)
{
set_csum_offload(adapter, F_TCP_CSUM, enable);
}
static void t1_tp_reset(adapter_t *adapter, unsigned int tp_clk)
{
u32 val;
val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
val |= F_TP_IN_ESPI_CHECK_IP_CSUM |
F_TP_IN_ESPI_CHECK_TCP_CSUM;
writel(val, adapter->regs + A_TP_IN_CONFIG);
writel(F_TP_OUT_CSPI_CPL |
F_TP_OUT_ESPI_ETHERNET |
F_TP_OUT_ESPI_GENERATE_IP_CSUM |
F_TP_OUT_ESPI_GENERATE_TCP_CSUM,
adapter->regs + A_TP_OUT_CONFIG);
val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
val &= ~(F_IP_CSUM | F_UDP_CSUM | F_TCP_CSUM);
writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
/*
* Enable pause frame deadlock prevention.
*/
if (is_T2(adapter)) {
u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
V_DROP_TICKS_CNT(drop_ticks) |
V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
adapter->regs + A_TP_TX_DROP_CONFIG);
}
writel(F_TP_RESET, adapter->regs + A_TP_RESET);
}
int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
struct adapter_params *p)
{
p->chip_version = bi->chip_term;
if (p->chip_version == CHBT_TERM_T1 ||
p->chip_version == CHBT_TERM_T2) {
u32 val = readl(adapter->regs + A_TP_PC_CONFIG);
val = G_TP_PC_REV(val);
if (val == 2)
p->chip_revision = TERM_T1B;
else if (val == 3)
p->chip_revision = TERM_T2;
else
return -1;
} else
return -1;
return 0;
}
/*
* Enable board components other than the Chelsio chip, such as external MAC
* and PHY.
*/
static int board_init(adapter_t *adapter, const struct board_info *bi)
{
switch (bi->board) {
case CHBT_BOARD_N110:
case CHBT_BOARD_N210:
writel(V_TPIPAR(0xf), adapter->regs + A_TPI_PAR);
t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
break;
}
return 0;
}
/*
* Initialize and configure the Terminator HW modules. Note that external
* MAC and PHYs are initialized separately.
*/
int t1_init_hw_modules(adapter_t *adapter)
{
int err = -EIO;
const struct board_info *bi = board_info(adapter);
if (!bi->clock_mc4) {
u32 val = readl(adapter->regs + A_MC4_CFG);
writel(val | F_READY | F_MC4_SLOW, adapter->regs + A_MC4_CFG);
writel(F_M_BUS_ENABLE | F_TCAM_RESET,
adapter->regs + A_MC5_CONFIG);
}
if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac,
bi->espi_nports))
goto out_err;
t1_tp_reset(adapter, bi->clock_core);
err = t1_sge_configure(adapter->sge, &adapter->params.sge);
if (err)
goto out_err;
err = 0;
out_err:
return err;
}
/*
* Determine a card's PCI mode.
*/
static void __devinit get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p)
{
static unsigned short speed_map[] = { 33, 66, 100, 133 };
u32 pci_mode;
pci_read_config_dword(adapter->pdev, A_PCICFG_MODE, &pci_mode);
p->speed = speed_map[G_PCI_MODE_CLK(pci_mode)];
p->width = (pci_mode & F_PCI_MODE_64BIT) ? 64 : 32;
p->is_pcix = (pci_mode & F_PCI_MODE_PCIX) != 0;
}
/*
* Release the structures holding the SW per-Terminator-HW-module state.
*/
void t1_free_sw_modules(adapter_t *adapter)
{
unsigned int i;
for_each_port(adapter, i) {
struct cmac *mac = adapter->port[i].mac;
struct cphy *phy = adapter->port[i].phy;
if (mac)
mac->ops->destroy(mac);
if (phy)
phy->ops->destroy(phy);
}
if (adapter->sge)
t1_sge_destroy(adapter->sge);
if (adapter->espi)
t1_espi_destroy(adapter->espi);
}
static void __devinit init_link_config(struct link_config *lc,
const struct board_info *bi)
{
lc->supported = bi->caps;
lc->requested_speed = lc->speed = SPEED_INVALID;
lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
if (lc->supported & SUPPORTED_Autoneg) {
lc->advertising = lc->supported;
lc->autoneg = AUTONEG_ENABLE;
lc->requested_fc |= PAUSE_AUTONEG;
} else {
lc->advertising = 0;
lc->autoneg = AUTONEG_DISABLE;
}
}
/*
* Allocate and initialize the data structures that hold the SW state of
* the Terminator HW modules.
*/
int __devinit t1_init_sw_modules(adapter_t *adapter,
const struct board_info *bi)
{
unsigned int i;
adapter->params.brd_info = bi;
adapter->params.nports = bi->port_number;
adapter->params.stats_update_period = bi->gmac->stats_update_period;
adapter->sge = t1_sge_create(adapter, &adapter->params.sge);
if (!adapter->sge) {
CH_ERR("%s: SGE initialization failed\n",
adapter->name);
goto error;
}
if (bi->espi_nports && !(adapter->espi = t1_espi_create(adapter))) {
CH_ERR("%s: ESPI initialization failed\n",
adapter->name);
goto error;
}
board_init(adapter, bi);
bi->mdio_ops->init(adapter, bi);
if (bi->gphy->reset)
bi->gphy->reset(adapter);
if (bi->gmac->reset)
bi->gmac->reset(adapter);
for_each_port(adapter, i) {
u8 hw_addr[6];
struct cmac *mac;
int phy_addr = bi->mdio_phybaseaddr + i;
adapter->port[i].phy = bi->gphy->create(adapter, phy_addr,
bi->mdio_ops);
if (!adapter->port[i].phy) {
CH_ERR("%s: PHY %d initialization failed\n",
adapter->name, i);
goto error;
}
adapter->port[i].mac = mac = bi->gmac->create(adapter, i);
if (!mac) {
CH_ERR("%s: MAC %d initialization failed\n",
adapter->name, i);
goto error;
}
/*
* Get the port's MAC addresses either from the EEPROM if one
* exists or the one hardcoded in the MAC.
*/
if (vpd_macaddress_get(adapter, i, hw_addr)) {
CH_ERR("%s: could not read MAC address from VPD ROM\n",
adapter->port[i].dev->name);
goto error;
}
memcpy(adapter->port[i].dev->dev_addr, hw_addr, ETH_ALEN);
init_link_config(&adapter->port[i].link_config, bi);
}
get_pci_mode(adapter, &adapter->params.pci);
t1_interrupts_clear(adapter);
return 0;
error:
t1_free_sw_modules(adapter);
return -1;
}

View File

@ -0,0 +1,213 @@
/*****************************************************************************
* *
* File: suni1x10gexp_regs.h *
* $Revision: 1.9 $ *
* $Date: 2005/06/22 00:17:04 $ *
* Description: *
* PMC/SIERRA (pm3393) MAC-PHY functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: PMC/SIERRA *
* *
* History: *
* *
****************************************************************************/
#ifndef _CXGB_SUNI1x10GEXP_REGS_H_
#define _CXGB_SUNI1x10GEXP_REGS_H_
/******************************************************************************/
/** S/UNI-1x10GE-XP REGISTER ADDRESS MAP **/
/******************************************************************************/
/* Refer to the Register Bit Masks bellow for the naming of each register and */
/* to the S/UNI-1x10GE-XP Data Sheet for the signification of each bit */
/******************************************************************************/
#define SUNI1x10GEXP_REG_DEVICE_STATUS 0x0004
#define SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS 0x000D
#define SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE 0x000E
#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE 0x0102
#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS 0x0104
#define SUNI1x10GEXP_REG_RXXG_CONFIG_1 0x2040
#define SUNI1x10GEXP_REG_RXXG_CONFIG_3 0x2042
#define SUNI1x10GEXP_REG_RXXG_INTERRUPT 0x2043
#define SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH 0x2045
#define SUNI1x10GEXP_REG_RXXG_SA_15_0 0x2046
#define SUNI1x10GEXP_REG_RXXG_SA_31_16 0x2047
#define SUNI1x10GEXP_REG_RXXG_SA_47_32 0x2048
#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW 0x204D
#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID 0x204E
#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH 0x204F
#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW 0x206A
#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW 0x206B
#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH 0x206C
#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH 0x206D
#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0 0x206E
#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2 0x2070
#define SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE 0x2088
#define SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS 0x2089
#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE 0x208B
#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS 0x208C
#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE 0x20C7
#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS 0x20C8
#define SUNI1x10GEXP_REG_MSTAT_CONTROL 0x2100
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0 0x2101
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1 0x2102
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2 0x2103
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3 0x2104
#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0 0x2105
#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1 0x2106
#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2 0x2107
#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3 0x2108
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW 0x2110
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW 0x2114
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW 0x2120
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW 0x2124
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW 0x2128
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW 0x2130
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW 0x2138
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW 0x213C
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW 0x2140
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW 0x2144
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW 0x214C
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW 0x2150
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW 0x2154
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW 0x2158
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW 0x2194
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW 0x219C
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW 0x21A0
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW 0x21A8
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW 0x21B0
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW 0x21B8
#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW 0x21BC
#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE 0x2209
#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT 0x220A
#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK 0x2282
#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT 0x2283
#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS 0x2300
#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE 0x2301
#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK 0x2302
#define SUNI1x10GEXP_REG_TXXG_CONFIG_1 0x3040
#define SUNI1x10GEXP_REG_TXXG_CONFIG_3 0x3042
#define SUNI1x10GEXP_REG_TXXG_INTERRUPT 0x3043
#define SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE 0x3045
#define SUNI1x10GEXP_REG_TXXG_SA_15_0 0x3047
#define SUNI1x10GEXP_REG_TXXG_SA_31_16 0x3048
#define SUNI1x10GEXP_REG_TXXG_SA_47_32 0x3049
#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS 0x3084
#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE 0x3085
#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE 0x30C6
#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS 0x30C7
#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE 0x320C
#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION 0x320D
#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK 0x3282
#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT 0x3283
/******************************************************************************/
/* -- End register offset definitions -- */
/******************************************************************************/
/******************************************************************************/
/** SUNI-1x10GE-XP REGISTER BIT MASKS **/
/******************************************************************************/
/*----------------------------------------------------------------------------
* Register 0x0004: S/UNI-1x10GE-XP Device Status
* Bit 9 TOP_SXRA_EXPIRED
* Bit 8 TOP_MDIO_BUSY
* Bit 7 TOP_DTRB
* Bit 6 TOP_EXPIRED
* Bit 5 TOP_PAUSED
* Bit 4 TOP_PL4_ID_DOOL
* Bit 3 TOP_PL4_IS_DOOL
* Bit 2 TOP_PL4_ID_ROOL
* Bit 1 TOP_PL4_IS_ROOL
* Bit 0 TOP_PL4_OUT_ROOL
*----------------------------------------------------------------------------*/
#define SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED 0x0200
#define SUNI1x10GEXP_BITMSK_TOP_EXPIRED 0x0040
#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL 0x0010
#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL 0x0008
#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL 0x0004
#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL 0x0002
#define SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL 0x0001
/*----------------------------------------------------------------------------
* Register 0x000E:PM3393 Global interrupt enable
* Bit 15 TOP_INTE
*----------------------------------------------------------------------------*/
#define SUNI1x10GEXP_BITMSK_TOP_INTE 0x8000
/*----------------------------------------------------------------------------
* Register 0x2040: RXXG Configuration 1
* Bit 15 RXXG_RXEN
* Bit 14 RXXG_ROCF
* Bit 13 RXXG_PAD_STRIP
* Bit 10 RXXG_PUREP
* Bit 9 RXXG_LONGP
* Bit 8 RXXG_PARF
* Bit 7 RXXG_FLCHK
* Bit 5 RXXG_PASS_CTRL
* Bit 3 RXXG_CRC_STRIP
* Bit 2-0 RXXG_MIFG
*----------------------------------------------------------------------------*/
#define SUNI1x10GEXP_BITMSK_RXXG_RXEN 0x8000
#define SUNI1x10GEXP_BITMSK_RXXG_PUREP 0x0400
#define SUNI1x10GEXP_BITMSK_RXXG_FLCHK 0x0080
#define SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP 0x0008
/*----------------------------------------------------------------------------
* Register 0x2070: RXXG Address Filter Control 2
* Bit 1 RXXG_PMODE
* Bit 0 RXXG_MHASH_EN
*----------------------------------------------------------------------------*/
#define SUNI1x10GEXP_BITMSK_RXXG_PMODE 0x0002
#define SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN 0x0001
/*----------------------------------------------------------------------------
* Register 0x2100: MSTAT Control
* Bit 2 MSTAT_WRITE
* Bit 1 MSTAT_CLEAR
* Bit 0 MSTAT_SNAP
*----------------------------------------------------------------------------*/
#define SUNI1x10GEXP_BITMSK_MSTAT_CLEAR 0x0002
#define SUNI1x10GEXP_BITMSK_MSTAT_SNAP 0x0001
/*----------------------------------------------------------------------------
* Register 0x3040: TXXG Configuration Register 1
* Bit 15 TXXG_TXEN0
* Bit 13 TXXG_HOSTPAUSE
* Bit 12-7 TXXG_IPGT
* Bit 5 TXXG_32BIT_ALIGN
* Bit 4 TXXG_CRCEN
* Bit 3 TXXG_FCTX
* Bit 2 TXXG_FCRX
* Bit 1 TXXG_PADEN
* Bit 0 TXXG_SPRE
*----------------------------------------------------------------------------*/
#define SUNI1x10GEXP_BITMSK_TXXG_TXEN0 0x8000
#define SUNI1x10GEXP_BITOFF_TXXG_IPGT 7
#define SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN 0x0020
#define SUNI1x10GEXP_BITMSK_TXXG_CRCEN 0x0010
#define SUNI1x10GEXP_BITMSK_TXXG_FCTX 0x0008
#define SUNI1x10GEXP_BITMSK_TXXG_FCRX 0x0004
#define SUNI1x10GEXP_BITMSK_TXXG_PADEN 0x0002
#endif /* _CXGB_SUNI1x10GEXP_REGS_H_ */

View File

@ -1,7 +1,7 @@
/******************************************************************************* /*******************************************************************************
Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free under the terms of the GNU General Public License as published by the Free
@ -156,7 +156,7 @@
#define DRV_NAME "e100" #define DRV_NAME "e100"
#define DRV_EXT "-NAPI" #define DRV_EXT "-NAPI"
#define DRV_VERSION "3.4.8-k2"DRV_EXT #define DRV_VERSION "3.4.14-k2"DRV_EXT
#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation" #define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation"
#define PFX DRV_NAME ": " #define PFX DRV_NAME ": "
@ -785,6 +785,7 @@ static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
} }
#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
#define E100_WAIT_SCB_FAST 20 /* delay like the old code */
static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
{ {
unsigned long flags; unsigned long flags;
@ -798,7 +799,7 @@ static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
if(likely(!readb(&nic->csr->scb.cmd_lo))) if(likely(!readb(&nic->csr->scb.cmd_lo)))
break; break;
cpu_relax(); cpu_relax();
if(unlikely(i > (E100_WAIT_SCB_TIMEOUT >> 1))) if(unlikely(i > E100_WAIT_SCB_FAST))
udelay(5); udelay(5);
} }
if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) { if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
@ -902,8 +903,8 @@ static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
static void e100_get_defaults(struct nic *nic) static void e100_get_defaults(struct nic *nic)
{ {
struct param_range rfds = { .min = 16, .max = 256, .count = 64 }; struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
struct param_range cbs = { .min = 64, .max = 256, .count = 64 }; struct param_range cbs = { .min = 64, .max = 256, .count = 128 };
pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id); pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
/* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */ /* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
@ -1006,25 +1007,213 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
} }
/********************************************************/
/* Micro code for 8086:1229 Rev 8 */
/********************************************************/
/* Parameter values for the D101M B-step */
#define D101M_CPUSAVER_TIMER_DWORD 78
#define D101M_CPUSAVER_BUNDLE_DWORD 65
#define D101M_CPUSAVER_MIN_SIZE_DWORD 126
#define D101M_B_RCVBUNDLE_UCODE \
{\
0x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
0x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
0x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
0x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
0x00380438, 0x00000000, 0x00140000, 0x00380555, \
0x00308000, 0x00100662, 0x00100561, 0x000E0408, \
0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
0x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
0x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
0x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
0x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
0x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
0x00041000, 0x00010004, 0x00130826, 0x000C0006, \
0x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
0x00101210, 0x00380C34, 0x00000000, 0x00000000, \
0x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
0x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
0x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
0x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
0x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
0x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
0x00130826, 0x000C0001, 0x00220559, 0x00101313, \
0x00380559, 0x00000000, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00000000, 0x00130831, 0x0010090B, 0x00124813, \
0x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
0x003806A8, 0x00000000, 0x00000000, 0x00000000, \
}
/********************************************************/
/* Micro code for 8086:1229 Rev 9 */
/********************************************************/
/* Parameter values for the D101S */
#define D101S_CPUSAVER_TIMER_DWORD 78
#define D101S_CPUSAVER_BUNDLE_DWORD 67
#define D101S_CPUSAVER_MIN_SIZE_DWORD 128
#define D101S_RCVBUNDLE_UCODE \
{\
0x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
0x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
0x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
0x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
0x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
0x00308000, 0x00100610, 0x00100561, 0x000E0408, \
0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
0x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
0x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
0x003A047E, 0x00044010, 0x00380819, 0x00000000, \
0x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
0x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
0x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
0x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
0x00101313, 0x00380700, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
0x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
0x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
0x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
0x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
0x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
0x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
0x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
0x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
0x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00130831, \
0x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
0x00041000, 0x00010004, 0x00380700 \
}
/********************************************************/
/* Micro code for the 8086:1229 Rev F/10 */
/********************************************************/
/* Parameter values for the D102 E-step */
#define D102_E_CPUSAVER_TIMER_DWORD 42
#define D102_E_CPUSAVER_BUNDLE_DWORD 54
#define D102_E_CPUSAVER_MIN_SIZE_DWORD 46
#define D102_E_RCVBUNDLE_UCODE \
{\
0x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
0x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
0x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
0x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
0x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
0x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
0x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
0x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
0x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
0x00000000, 0x00000000, 0x00000000, 0x00000000, \
}
static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb) static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{ {
int i; /* *INDENT-OFF* */
static const u32 ucode[UCODE_SIZE] = { static struct {
/* NFS packets are misinterpreted as TCO packets and u32 ucode[UCODE_SIZE + 1];
* incorrectly routed to the BMC over SMBus. This u8 mac;
* microcode patch checks the fragmented IP bit in the u8 timer_dword;
* NFS/UDP header to distinguish between NFS and TCO. */ u8 bundle_dword;
0x0EF70E36, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF, u8 min_size_dword;
0x1FFF1FFF, 0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, } ucode_opts[] = {
0x00906EFD, 0x00900EFD, 0x00E00EF8, { D101M_B_RCVBUNDLE_UCODE,
}; mac_82559_D101M,
D101M_CPUSAVER_TIMER_DWORD,
D101M_CPUSAVER_BUNDLE_DWORD,
D101M_CPUSAVER_MIN_SIZE_DWORD },
{ D101S_RCVBUNDLE_UCODE,
mac_82559_D101S,
D101S_CPUSAVER_TIMER_DWORD,
D101S_CPUSAVER_BUNDLE_DWORD,
D101S_CPUSAVER_MIN_SIZE_DWORD },
{ D102_E_RCVBUNDLE_UCODE,
mac_82551_F,
D102_E_CPUSAVER_TIMER_DWORD,
D102_E_CPUSAVER_BUNDLE_DWORD,
D102_E_CPUSAVER_MIN_SIZE_DWORD },
{ D102_E_RCVBUNDLE_UCODE,
mac_82551_10,
D102_E_CPUSAVER_TIMER_DWORD,
D102_E_CPUSAVER_BUNDLE_DWORD,
D102_E_CPUSAVER_MIN_SIZE_DWORD },
{ {0}, 0, 0, 0, 0}
}, *opts;
/* *INDENT-ON* */
if(nic->mac == mac_82551_F || nic->mac == mac_82551_10) { #define BUNDLESMALL 1
for(i = 0; i < UCODE_SIZE; i++) #define BUNDLEMAX 50
cb->u.ucode[i] = cpu_to_le32(ucode[i]); #define INTDELAY 15000
cb->command = cpu_to_le16(cb_ucode);
} else opts = ucode_opts;
cb->command = cpu_to_le16(cb_nop);
/* do not load u-code for ICH devices */
if (nic->flags & ich)
return;
/* Search for ucode match against h/w rev_id */
while (opts->mac) {
if (nic->mac == opts->mac) {
int i;
u32 *ucode = opts->ucode;
/* Insert user-tunable settings */
ucode[opts->timer_dword] &= 0xFFFF0000;
ucode[opts->timer_dword] |=
(u16) INTDELAY;
ucode[opts->bundle_dword] &= 0xFFFF0000;
ucode[opts->bundle_dword] |= (u16) BUNDLEMAX;
ucode[opts->min_size_dword] &= 0xFFFF0000;
ucode[opts->min_size_dword] |=
(BUNDLESMALL) ? 0xFFFF : 0xFF80;
for(i = 0; i < UCODE_SIZE; i++)
cb->u.ucode[i] = cpu_to_le32(ucode[i]);
cb->command = cpu_to_le16(cb_ucode);
return;
}
opts++;
}
cb->command = cpu_to_le16(cb_nop);
} }
static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
@ -1307,14 +1496,15 @@ static inline void e100_xmit_prepare(struct nic *nic, struct cb *cb,
{ {
cb->command = nic->tx_command; cb->command = nic->tx_command;
/* interrupt every 16 packets regardless of delay */ /* interrupt every 16 packets regardless of delay */
if((nic->cbs_avail & ~15) == nic->cbs_avail) cb->command |= cb_i; if((nic->cbs_avail & ~15) == nic->cbs_avail)
cb->command |= cpu_to_le16(cb_i);
cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
cb->u.tcb.tcb_byte_count = 0; cb->u.tcb.tcb_byte_count = 0;
cb->u.tcb.threshold = nic->tx_threshold; cb->u.tcb.threshold = nic->tx_threshold;
cb->u.tcb.tbd_count = 1; cb->u.tcb.tbd_count = 1;
cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
skb->data, skb->len, PCI_DMA_TODEVICE)); skb->data, skb->len, PCI_DMA_TODEVICE));
// check for mapping failure? /* check for mapping failure? */
cb->u.tcb.tbd.size = cpu_to_le16(skb->len); cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
} }
@ -1539,7 +1729,7 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
/* Don't indicate if hardware indicates errors */ /* Don't indicate if hardware indicates errors */
nic->net_stats.rx_dropped++; nic->net_stats.rx_dropped++;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} else if(actual_size > nic->netdev->mtu + VLAN_ETH_HLEN) { } else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
/* Don't indicate oversized frames */ /* Don't indicate oversized frames */
nic->rx_over_length_errors++; nic->rx_over_length_errors++;
nic->net_stats.rx_dropped++; nic->net_stats.rx_dropped++;
@ -1706,6 +1896,7 @@ static int e100_poll(struct net_device *netdev, int *budget)
static void e100_netpoll(struct net_device *netdev) static void e100_netpoll(struct net_device *netdev)
{ {
struct nic *nic = netdev_priv(netdev); struct nic *nic = netdev_priv(netdev);
e100_disable_irq(nic); e100_disable_irq(nic);
e100_intr(nic->pdev->irq, netdev, NULL); e100_intr(nic->pdev->irq, netdev, NULL);
e100_tx_clean(nic); e100_tx_clean(nic);
@ -2108,6 +2299,8 @@ static void e100_diag_test(struct net_device *netdev,
} }
for(i = 0; i < E100_TEST_LEN; i++) for(i = 0; i < E100_TEST_LEN; i++)
test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0; test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
msleep_interruptible(4 * 1000);
} }
static int e100_phys_id(struct net_device *netdev, u32 data) static int e100_phys_id(struct net_device *netdev, u32 data)

1843
drivers/net/sis190.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -135,6 +135,18 @@ config DM9102
<file:Documentation/networking/net-modules.txt>. The module will <file:Documentation/networking/net-modules.txt>. The module will
be called dmfe. be called dmfe.
config ULI526X
tristate "ULi M526x controller support"
depends on NET_TULIP && PCI
select CRC32
---help---
This driver is for ULi M5261/M5263 10/100M Ethernet Controller
(<http://www.uli.com.tw/>).
To compile this driver as a module, choose M here and read
<file:Documentation/networking/net-modules.txt>. The module will
be called uli526x.
config PCMCIA_XIRCOM config PCMCIA_XIRCOM
tristate "Xircom CardBus support (new driver)" tristate "Xircom CardBus support (new driver)"
depends on NET_TULIP && CARDBUS depends on NET_TULIP && CARDBUS

View File

@ -9,6 +9,7 @@ obj-$(CONFIG_WINBOND_840) += winbond-840.o
obj-$(CONFIG_DE2104X) += de2104x.o obj-$(CONFIG_DE2104X) += de2104x.o
obj-$(CONFIG_TULIP) += tulip.o obj-$(CONFIG_TULIP) += tulip.o
obj-$(CONFIG_DE4X5) += de4x5.o obj-$(CONFIG_DE4X5) += de4x5.o
obj-$(CONFIG_ULI526X) += uli526x.o
# Declare multi-part drivers. # Declare multi-part drivers.

View File

@ -81,25 +81,6 @@ int tulip_mdio_read(struct net_device *dev, int phy_id, int location)
return retval & 0xffff; return retval & 0xffff;
} }
if(tp->chip_id == ULI526X && tp->revision >= 0x40) {
int value;
int i = 1000;
value = ioread32(ioaddr + CSR9);
iowrite32(value & 0xFFEFFFFF, ioaddr + CSR9);
value = (phy_id << 21) | (location << 16) | 0x08000000;
iowrite32(value, ioaddr + CSR10);
while(--i > 0) {
mdio_delay();
if(ioread32(ioaddr + CSR10) & 0x10000000)
break;
}
retval = ioread32(ioaddr + CSR10);
spin_unlock_irqrestore(&tp->mii_lock, flags);
return retval & 0xFFFF;
}
/* Establish sync by sending at least 32 logic ones. */ /* Establish sync by sending at least 32 logic ones. */
for (i = 32; i >= 0; i--) { for (i = 32; i >= 0; i--) {
iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
@ -159,23 +140,6 @@ void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val)
spin_unlock_irqrestore(&tp->mii_lock, flags); spin_unlock_irqrestore(&tp->mii_lock, flags);
return; return;
} }
if (tp->chip_id == ULI526X && tp->revision >= 0x40) {
int value;
int i = 1000;
value = ioread32(ioaddr + CSR9);
iowrite32(value & 0xFFEFFFFF, ioaddr + CSR9);
value = (phy_id << 21) | (location << 16) | 0x04000000 | (val & 0xFFFF);
iowrite32(value, ioaddr + CSR10);
while(--i > 0) {
if (ioread32(ioaddr + CSR10) & 0x10000000)
break;
}
spin_unlock_irqrestore(&tp->mii_lock, flags);
return;
}
/* Establish sync by sending 32 logic ones. */ /* Establish sync by sending 32 logic ones. */
for (i = 32; i >= 0; i--) { for (i = 32; i >= 0; i--) {

View File

@ -39,7 +39,6 @@ void tulip_timer(unsigned long data)
case MX98713: case MX98713:
case COMPEX9881: case COMPEX9881:
case DM910X: case DM910X:
case ULI526X:
default: { default: {
struct medialeaf *mleaf; struct medialeaf *mleaf;
unsigned char *p; unsigned char *p;

View File

@ -88,7 +88,6 @@ enum chips {
I21145, I21145,
DM910X, DM910X,
CONEXANT, CONEXANT,
ULI526X
}; };
@ -482,11 +481,8 @@ static inline void tulip_stop_rxtx(struct tulip_private *tp)
static inline void tulip_restart_rxtx(struct tulip_private *tp) static inline void tulip_restart_rxtx(struct tulip_private *tp)
{ {
if(!(tp->chip_id == ULI526X && tulip_stop_rxtx(tp);
(tp->revision == 0x40 || tp->revision == 0x50))) { udelay(5);
tulip_stop_rxtx(tp);
udelay(5);
}
tulip_start_rxtx(tp); tulip_start_rxtx(tp);
} }

View File

@ -199,9 +199,6 @@ struct tulip_chip_table tulip_tbl[] = {
{ "Conexant LANfinity", 256, 0x0001ebef, { "Conexant LANfinity", 256, 0x0001ebef,
HAS_MII | HAS_ACPI, tulip_timer }, HAS_MII | HAS_ACPI, tulip_timer },
/* ULi526X */
{ "ULi M5261/M5263", 128, 0x0001ebef,
HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI, tulip_timer },
}; };
@ -239,8 +236,6 @@ static struct pci_device_id tulip_pci_tbl[] = {
{ 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x10b9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ULI526X }, /* ALi 1563 integrated ethernet */
{ 0x10b9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ULI526X }, /* ALi 1563 integrated ethernet */
{ 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */ { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
{ 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */ { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
{ } /* terminate list */ { } /* terminate list */
@ -522,7 +517,7 @@ static void tulip_tx_timeout(struct net_device *dev)
dev->name); dev->name);
} else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142
|| tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 || tp->chip_id == MX98713 || tp->chip_id == COMPEX9881
|| tp->chip_id == DM910X || tp->chip_id == ULI526X) { || tp->chip_id == DM910X) {
printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, " printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, "
"SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n", "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12), dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
@ -1103,18 +1098,16 @@ static void set_rx_mode(struct net_device *dev)
entry = tp->cur_tx++ % TX_RING_SIZE; entry = tp->cur_tx++ % TX_RING_SIZE;
if (entry != 0) { if (entry != 0) {
/* Avoid a chip errata by prefixing a dummy entry. Don't do /* Avoid a chip errata by prefixing a dummy entry. */
this on the ULI526X as it triggers a different problem */ tp->tx_buffers[entry].skb = NULL;
if (!(tp->chip_id == ULI526X && (tp->revision == 0x40 || tp->revision == 0x50))) { tp->tx_buffers[entry].mapping = 0;
tp->tx_buffers[entry].skb = NULL; tp->tx_ring[entry].length =
tp->tx_buffers[entry].mapping = 0; (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
tp->tx_ring[entry].length = tp->tx_ring[entry].buffer1 = 0;
(entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0; /* Must set DescOwned later to avoid race with chip */
tp->tx_ring[entry].buffer1 = 0; dummy = entry;
/* Must set DescOwned later to avoid race with chip */ entry = tp->cur_tx++ % TX_RING_SIZE;
dummy = entry;
entry = tp->cur_tx++ % TX_RING_SIZE;
}
} }
tp->tx_buffers[entry].skb = NULL; tp->tx_buffers[entry].skb = NULL;
@ -1235,10 +1228,6 @@ static int tulip_uli_dm_quirk(struct pci_dev *pdev)
{ {
if (pdev->vendor == 0x1282 && pdev->device == 0x9102) if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
return 1; return 1;
if (pdev->vendor == 0x10b9 && pdev->device == 0x5261)
return 1;
if (pdev->vendor == 0x10b9 && pdev->device == 0x5263)
return 1;
return 0; return 0;
} }
@ -1680,7 +1669,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
switch (chip_idx) { switch (chip_idx) {
case DC21140: case DC21140:
case DM910X: case DM910X:
case ULI526X:
default: default:
if (tp->mtable) if (tp->mtable)
iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12); iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);

1749
drivers/net/tulip/uli526x.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -40,7 +40,7 @@
* FIXME: IO should be max 256 bytes. However, since we may * FIXME: IO should be max 256 bytes. However, since we may
* have a P2P bridge below a cardbus bridge, we need 4K. * have a P2P bridge below a cardbus bridge, we need 4K.
*/ */
#define CARDBUS_IO_SIZE (256) #define CARDBUS_IO_SIZE (4*1024)
#define CARDBUS_MEM_SIZE (32*1024*1024) #define CARDBUS_MEM_SIZE (32*1024*1024)
static void __devinit static void __devinit

View File

@ -189,7 +189,6 @@ static void ahci_irq_clear(struct ata_port *ap);
static void ahci_eng_timeout(struct ata_port *ap); static void ahci_eng_timeout(struct ata_port *ap);
static int ahci_port_start(struct ata_port *ap); static int ahci_port_start(struct ata_port *ap);
static void ahci_port_stop(struct ata_port *ap); static void ahci_port_stop(struct ata_port *ap);
static void ahci_host_stop(struct ata_host_set *host_set);
static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf); static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
static void ahci_qc_prep(struct ata_queued_cmd *qc); static void ahci_qc_prep(struct ata_queued_cmd *qc);
static u8 ahci_check_status(struct ata_port *ap); static u8 ahci_check_status(struct ata_port *ap);
@ -242,7 +241,6 @@ static struct ata_port_operations ahci_ops = {
.port_start = ahci_port_start, .port_start = ahci_port_start,
.port_stop = ahci_port_stop, .port_stop = ahci_port_stop,
.host_stop = ahci_host_stop,
}; };
static struct ata_port_info ahci_port_info[] = { static struct ata_port_info ahci_port_info[] = {
@ -296,17 +294,9 @@ static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int
return base + 0x100 + (port * 0x80); return base + 0x100 + (port * 0x80);
} }
static inline void *ahci_port_base (void *base, unsigned int port) static inline void __iomem *ahci_port_base (void __iomem *base, unsigned int port)
{ {
return (void *) ahci_port_base_ul((unsigned long)base, port); return (void __iomem *) ahci_port_base_ul((unsigned long)base, port);
}
static void ahci_host_stop(struct ata_host_set *host_set)
{
struct ahci_host_priv *hpriv = host_set->private_data;
kfree(hpriv);
ata_host_stop(host_set);
} }
static int ahci_port_start(struct ata_port *ap) static int ahci_port_start(struct ata_port *ap)
@ -314,8 +304,9 @@ static int ahci_port_start(struct ata_port *ap)
struct device *dev = ap->host_set->dev; struct device *dev = ap->host_set->dev;
struct ahci_host_priv *hpriv = ap->host_set->private_data; struct ahci_host_priv *hpriv = ap->host_set->private_data;
struct ahci_port_priv *pp; struct ahci_port_priv *pp;
void *mem, *mmio = ap->host_set->mmio_base; void __iomem *mmio = ap->host_set->mmio_base;
void *port_mmio = ahci_port_base(mmio, ap->port_no); void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
void *mem;
dma_addr_t mem_dma; dma_addr_t mem_dma;
pp = kmalloc(sizeof(*pp), GFP_KERNEL); pp = kmalloc(sizeof(*pp), GFP_KERNEL);
@ -383,8 +374,8 @@ static void ahci_port_stop(struct ata_port *ap)
{ {
struct device *dev = ap->host_set->dev; struct device *dev = ap->host_set->dev;
struct ahci_port_priv *pp = ap->private_data; struct ahci_port_priv *pp = ap->private_data;
void *mmio = ap->host_set->mmio_base; void __iomem *mmio = ap->host_set->mmio_base;
void *port_mmio = ahci_port_base(mmio, ap->port_no); void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
u32 tmp; u32 tmp;
tmp = readl(port_mmio + PORT_CMD); tmp = readl(port_mmio + PORT_CMD);
@ -546,8 +537,8 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
static void ahci_intr_error(struct ata_port *ap, u32 irq_stat) static void ahci_intr_error(struct ata_port *ap, u32 irq_stat)
{ {
void *mmio = ap->host_set->mmio_base; void __iomem *mmio = ap->host_set->mmio_base;
void *port_mmio = ahci_port_base(mmio, ap->port_no); void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
u32 tmp; u32 tmp;
int work; int work;
@ -595,8 +586,8 @@ static void ahci_intr_error(struct ata_port *ap, u32 irq_stat)
static void ahci_eng_timeout(struct ata_port *ap) static void ahci_eng_timeout(struct ata_port *ap)
{ {
struct ata_host_set *host_set = ap->host_set; struct ata_host_set *host_set = ap->host_set;
void *mmio = host_set->mmio_base; void __iomem *mmio = host_set->mmio_base;
void *port_mmio = ahci_port_base(mmio, ap->port_no); void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
struct ata_queued_cmd *qc; struct ata_queued_cmd *qc;
unsigned long flags; unsigned long flags;
@ -626,8 +617,8 @@ static void ahci_eng_timeout(struct ata_port *ap)
static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc) static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
{ {
void *mmio = ap->host_set->mmio_base; void __iomem *mmio = ap->host_set->mmio_base;
void *port_mmio = ahci_port_base(mmio, ap->port_no); void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
u32 status, serr, ci; u32 status, serr, ci;
serr = readl(port_mmio + PORT_SCR_ERR); serr = readl(port_mmio + PORT_SCR_ERR);
@ -663,7 +654,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
struct ata_host_set *host_set = dev_instance; struct ata_host_set *host_set = dev_instance;
struct ahci_host_priv *hpriv; struct ahci_host_priv *hpriv;
unsigned int i, handled = 0; unsigned int i, handled = 0;
void *mmio; void __iomem *mmio;
u32 irq_stat, irq_ack = 0; u32 irq_stat, irq_ack = 0;
VPRINTK("ENTER\n"); VPRINTK("ENTER\n");
@ -709,7 +700,7 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
static int ahci_qc_issue(struct ata_queued_cmd *qc) static int ahci_qc_issue(struct ata_queued_cmd *qc)
{ {
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
void *port_mmio = (void *) ap->ioaddr.cmd_addr; void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
writel(1, port_mmio + PORT_CMD_ISSUE); writel(1, port_mmio + PORT_CMD_ISSUE);
readl(port_mmio + PORT_CMD_ISSUE); /* flush */ readl(port_mmio + PORT_CMD_ISSUE); /* flush */
@ -894,7 +885,7 @@ static void ahci_print_info(struct ata_probe_ent *probe_ent)
{ {
struct ahci_host_priv *hpriv = probe_ent->private_data; struct ahci_host_priv *hpriv = probe_ent->private_data;
struct pci_dev *pdev = to_pci_dev(probe_ent->dev); struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
void *mmio = probe_ent->mmio_base; void __iomem *mmio = probe_ent->mmio_base;
u32 vers, cap, impl, speed; u32 vers, cap, impl, speed;
const char *speed_s; const char *speed_s;
u16 cc; u16 cc;
@ -967,7 +958,7 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
struct ata_probe_ent *probe_ent = NULL; struct ata_probe_ent *probe_ent = NULL;
struct ahci_host_priv *hpriv; struct ahci_host_priv *hpriv;
unsigned long base; unsigned long base;
void *mmio_base; void __iomem *mmio_base;
unsigned int board_idx = (unsigned int) ent->driver_data; unsigned int board_idx = (unsigned int) ent->driver_data;
int have_msi, pci_dev_busy = 0; int have_msi, pci_dev_busy = 0;
int rc; int rc;
@ -1004,8 +995,7 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
probe_ent->dev = pci_dev_to_dev(pdev); probe_ent->dev = pci_dev_to_dev(pdev);
INIT_LIST_HEAD(&probe_ent->node); INIT_LIST_HEAD(&probe_ent->node);
mmio_base = ioremap(pci_resource_start(pdev, AHCI_PCI_BAR), mmio_base = pci_iomap(pdev, AHCI_PCI_BAR, 0);
pci_resource_len(pdev, AHCI_PCI_BAR));
if (mmio_base == NULL) { if (mmio_base == NULL) {
rc = -ENOMEM; rc = -ENOMEM;
goto err_out_free_ent; goto err_out_free_ent;
@ -1049,7 +1039,7 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
err_out_hpriv: err_out_hpriv:
kfree(hpriv); kfree(hpriv);
err_out_iounmap: err_out_iounmap:
iounmap(mmio_base); pci_iounmap(pdev, mmio_base);
err_out_free_ent: err_out_free_ent:
kfree(probe_ent); kfree(probe_ent);
err_out_msi: err_out_msi:
@ -1089,7 +1079,8 @@ static void ahci_remove_one (struct pci_dev *pdev)
scsi_host_put(ap->host); scsi_host_put(ap->host);
} }
host_set->ops->host_stop(host_set); kfree(hpriv);
pci_iounmap(pdev, host_set->mmio_base);
kfree(host_set); kfree(host_set);
if (have_msi) if (have_msi)
@ -1106,7 +1097,6 @@ static int __init ahci_init(void)
return pci_module_init(&ahci_pci_driver); return pci_module_init(&ahci_pci_driver);
} }
static void __exit ahci_exit(void) static void __exit ahci_exit(void)
{ {
pci_unregister_driver(&ahci_pci_driver); pci_unregister_driver(&ahci_pci_driver);

View File

@ -583,8 +583,7 @@ static void pci_enable_intx(struct pci_dev *pdev)
#define AHCI_ENABLE (1 << 31) #define AHCI_ENABLE (1 << 31)
static int piix_disable_ahci(struct pci_dev *pdev) static int piix_disable_ahci(struct pci_dev *pdev)
{ {
void *mmio; void __iomem *mmio;
unsigned long addr;
u32 tmp; u32 tmp;
int rc = 0; int rc = 0;
@ -592,11 +591,11 @@ static int piix_disable_ahci(struct pci_dev *pdev)
* works because this device is usually set up by BIOS. * works because this device is usually set up by BIOS.
*/ */
addr = pci_resource_start(pdev, AHCI_PCI_BAR); if (!pci_resource_start(pdev, AHCI_PCI_BAR) ||
if (!addr || !pci_resource_len(pdev, AHCI_PCI_BAR)) !pci_resource_len(pdev, AHCI_PCI_BAR))
return 0; return 0;
mmio = ioremap(addr, 64); mmio = pci_iomap(pdev, AHCI_PCI_BAR, 64);
if (!mmio) if (!mmio)
return -ENOMEM; return -ENOMEM;
@ -610,7 +609,7 @@ static int piix_disable_ahci(struct pci_dev *pdev)
rc = -EIO; rc = -EIO;
} }
iounmap(mmio); pci_iounmap(pdev, mmio);
return rc; return rc;
} }

View File

@ -4204,6 +4204,15 @@ ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port)
#ifdef CONFIG_PCI
void ata_pci_host_stop (struct ata_host_set *host_set)
{
struct pci_dev *pdev = to_pci_dev(host_set->dev);
pci_iounmap(pdev, host_set->mmio_base);
}
/** /**
* ata_pci_init_native_mode - Initialize native-mode driver * ata_pci_init_native_mode - Initialize native-mode driver
* @pdev: pci device to be initialized * @pdev: pci device to be initialized
@ -4216,7 +4225,6 @@ ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port)
* ata_probe_ent structure should then be freed with kfree(). * ata_probe_ent structure should then be freed with kfree().
*/ */
#ifdef CONFIG_PCI
struct ata_probe_ent * struct ata_probe_ent *
ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port) ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port)
{ {
@ -4599,6 +4607,7 @@ EXPORT_SYMBOL_GPL(ata_scsi_simulate);
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
EXPORT_SYMBOL_GPL(pci_test_config_bits); EXPORT_SYMBOL_GPL(pci_test_config_bits);
EXPORT_SYMBOL_GPL(ata_pci_host_stop);
EXPORT_SYMBOL_GPL(ata_pci_init_native_mode); EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
EXPORT_SYMBOL_GPL(ata_pci_init_one); EXPORT_SYMBOL_GPL(ata_pci_init_one);
EXPORT_SYMBOL_GPL(ata_pci_remove_one); EXPORT_SYMBOL_GPL(ata_pci_remove_one);

View File

@ -351,6 +351,7 @@ static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
static void nv_host_stop (struct ata_host_set *host_set) static void nv_host_stop (struct ata_host_set *host_set)
{ {
struct nv_host *host = host_set->private_data; struct nv_host *host = host_set->private_data;
struct pci_dev *pdev = to_pci_dev(host_set->dev);
// Disable hotplug event interrupts. // Disable hotplug event interrupts.
if (host->host_desc->disable_hotplug) if (host->host_desc->disable_hotplug)
@ -358,7 +359,8 @@ static void nv_host_stop (struct ata_host_set *host_set)
kfree(host); kfree(host);
ata_host_stop(host_set); if (host_set->mmio_base)
pci_iounmap(pdev, host_set->mmio_base);
} }
static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
@ -420,8 +422,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) { if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) {
unsigned long base; unsigned long base;
probe_ent->mmio_base = ioremap(pci_resource_start(pdev, 5), probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
pci_resource_len(pdev, 5));
if (probe_ent->mmio_base == NULL) { if (probe_ent->mmio_base == NULL) {
rc = -EIO; rc = -EIO;
goto err_out_free_host; goto err_out_free_host;
@ -457,7 +458,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
err_out_iounmap: err_out_iounmap:
if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
iounmap(probe_ent->mmio_base); pci_iounmap(pdev, probe_ent->mmio_base);
err_out_free_host: err_out_free_host:
kfree(host); kfree(host);
err_out_free_ent: err_out_free_ent:

View File

@ -92,6 +92,7 @@ static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf);
static void pdc_irq_clear(struct ata_port *ap); static void pdc_irq_clear(struct ata_port *ap);
static int pdc_qc_issue_prot(struct ata_queued_cmd *qc); static int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
static Scsi_Host_Template pdc_ata_sht = { static Scsi_Host_Template pdc_ata_sht = {
.module = THIS_MODULE, .module = THIS_MODULE,
.name = DRV_NAME, .name = DRV_NAME,
@ -132,7 +133,7 @@ static struct ata_port_operations pdc_sata_ops = {
.scr_write = pdc_sata_scr_write, .scr_write = pdc_sata_scr_write,
.port_start = pdc_port_start, .port_start = pdc_port_start,
.port_stop = pdc_port_stop, .port_stop = pdc_port_stop,
.host_stop = ata_host_stop, .host_stop = ata_pci_host_stop,
}; };
static struct ata_port_operations pdc_pata_ops = { static struct ata_port_operations pdc_pata_ops = {
@ -153,7 +154,7 @@ static struct ata_port_operations pdc_pata_ops = {
.port_start = pdc_port_start, .port_start = pdc_port_start,
.port_stop = pdc_port_stop, .port_stop = pdc_port_stop,
.host_stop = ata_host_stop, .host_stop = ata_pci_host_stop,
}; };
static struct ata_port_info pdc_port_info[] = { static struct ata_port_info pdc_port_info[] = {
@ -282,7 +283,7 @@ static void pdc_port_stop(struct ata_port *ap)
static void pdc_reset_port(struct ata_port *ap) static void pdc_reset_port(struct ata_port *ap)
{ {
void *mmio = (void *) ap->ioaddr.cmd_addr + PDC_CTLSTAT; void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT;
unsigned int i; unsigned int i;
u32 tmp; u32 tmp;
@ -418,7 +419,7 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap,
u8 status; u8 status;
unsigned int handled = 0, have_err = 0; unsigned int handled = 0, have_err = 0;
u32 tmp; u32 tmp;
void *mmio = (void *) ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL; void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL;
tmp = readl(mmio); tmp = readl(mmio);
if (tmp & PDC_ERR_MASK) { if (tmp & PDC_ERR_MASK) {
@ -447,7 +448,7 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap,
static void pdc_irq_clear(struct ata_port *ap) static void pdc_irq_clear(struct ata_port *ap)
{ {
struct ata_host_set *host_set = ap->host_set; struct ata_host_set *host_set = ap->host_set;
void *mmio = host_set->mmio_base; void __iomem *mmio = host_set->mmio_base;
readl(mmio + PDC_INT_SEQMASK); readl(mmio + PDC_INT_SEQMASK);
} }
@ -459,7 +460,7 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
u32 mask = 0; u32 mask = 0;
unsigned int i, tmp; unsigned int i, tmp;
unsigned int handled = 0; unsigned int handled = 0;
void *mmio_base; void __iomem *mmio_base;
VPRINTK("ENTER\n"); VPRINTK("ENTER\n");
@ -581,7 +582,7 @@ static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base)
static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe) static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
{ {
void *mmio = pe->mmio_base; void __iomem *mmio = pe->mmio_base;
u32 tmp; u32 tmp;
/* /*
@ -624,7 +625,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
static int printed_version; static int printed_version;
struct ata_probe_ent *probe_ent = NULL; struct ata_probe_ent *probe_ent = NULL;
unsigned long base; unsigned long base;
void *mmio_base; void __iomem *mmio_base;
unsigned int board_idx = (unsigned int) ent->driver_data; unsigned int board_idx = (unsigned int) ent->driver_data;
int pci_dev_busy = 0; int pci_dev_busy = 0;
int rc; int rc;
@ -663,8 +664,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
probe_ent->dev = pci_dev_to_dev(pdev); probe_ent->dev = pci_dev_to_dev(pdev);
INIT_LIST_HEAD(&probe_ent->node); INIT_LIST_HEAD(&probe_ent->node);
mmio_base = ioremap(pci_resource_start(pdev, 3), mmio_base = pci_iomap(pdev, 3, 0);
pci_resource_len(pdev, 3));
if (mmio_base == NULL) { if (mmio_base == NULL) {
rc = -ENOMEM; rc = -ENOMEM;
goto err_out_free_ent; goto err_out_free_ent;

View File

@ -538,11 +538,12 @@ static void qs_port_stop(struct ata_port *ap)
static void qs_host_stop(struct ata_host_set *host_set) static void qs_host_stop(struct ata_host_set *host_set)
{ {
void __iomem *mmio_base = host_set->mmio_base; void __iomem *mmio_base = host_set->mmio_base;
struct pci_dev *pdev = to_pci_dev(host_set->dev);
writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */ writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */ writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
ata_host_stop(host_set); pci_iounmap(pdev, mmio_base);
} }
static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe) static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
@ -646,8 +647,7 @@ static int qs_ata_init_one(struct pci_dev *pdev,
goto err_out_regions; goto err_out_regions;
} }
mmio_base = ioremap(pci_resource_start(pdev, 4), mmio_base = pci_iomap(pdev, 4, 0);
pci_resource_len(pdev, 4));
if (mmio_base == NULL) { if (mmio_base == NULL) {
rc = -ENOMEM; rc = -ENOMEM;
goto err_out_regions; goto err_out_regions;
@ -697,7 +697,7 @@ static int qs_ata_init_one(struct pci_dev *pdev,
return 0; return 0;
err_out_iounmap: err_out_iounmap:
iounmap(mmio_base); pci_iounmap(pdev, mmio_base);
err_out_regions: err_out_regions:
pci_release_regions(pdev); pci_release_regions(pdev);
err_out: err_out:

View File

@ -86,6 +86,7 @@ static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
static void sil_post_set_mode (struct ata_port *ap); static void sil_post_set_mode (struct ata_port *ap);
static struct pci_device_id sil_pci_tbl[] = { static struct pci_device_id sil_pci_tbl[] = {
{ 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
{ 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w }, { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
@ -172,7 +173,7 @@ static struct ata_port_operations sil_ops = {
.scr_write = sil_scr_write, .scr_write = sil_scr_write,
.port_start = ata_port_start, .port_start = ata_port_start,
.port_stop = ata_port_stop, .port_stop = ata_port_stop,
.host_stop = ata_host_stop, .host_stop = ata_pci_host_stop,
}; };
static struct ata_port_info sil_port_info[] = { static struct ata_port_info sil_port_info[] = {
@ -231,6 +232,7 @@ MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, sil_pci_tbl); MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
MODULE_VERSION(DRV_VERSION); MODULE_VERSION(DRV_VERSION);
static unsigned char sil_get_device_cache_line(struct pci_dev *pdev) static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
{ {
u8 cache_line = 0; u8 cache_line = 0;
@ -242,7 +244,8 @@ static void sil_post_set_mode (struct ata_port *ap)
{ {
struct ata_host_set *host_set = ap->host_set; struct ata_host_set *host_set = ap->host_set;
struct ata_device *dev; struct ata_device *dev;
void *addr = host_set->mmio_base + sil_port[ap->port_no].xfer_mode; void __iomem *addr =
host_set->mmio_base + sil_port[ap->port_no].xfer_mode;
u32 tmp, dev_mode[2]; u32 tmp, dev_mode[2];
unsigned int i; unsigned int i;
@ -375,7 +378,7 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
static int printed_version; static int printed_version;
struct ata_probe_ent *probe_ent = NULL; struct ata_probe_ent *probe_ent = NULL;
unsigned long base; unsigned long base;
void *mmio_base; void __iomem *mmio_base;
int rc; int rc;
unsigned int i; unsigned int i;
int pci_dev_busy = 0; int pci_dev_busy = 0;
@ -425,8 +428,7 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
probe_ent->irq_flags = SA_SHIRQ; probe_ent->irq_flags = SA_SHIRQ;
probe_ent->host_flags = sil_port_info[ent->driver_data].host_flags; probe_ent->host_flags = sil_port_info[ent->driver_data].host_flags;
mmio_base = ioremap(pci_resource_start(pdev, 5), mmio_base = pci_iomap(pdev, 5, 0);
pci_resource_len(pdev, 5));
if (mmio_base == NULL) { if (mmio_base == NULL) {
rc = -ENOMEM; rc = -ENOMEM;
goto err_out_free_ent; goto err_out_free_ent;

View File

@ -318,7 +318,7 @@ static struct ata_port_operations k2_sata_ops = {
.scr_write = k2_sata_scr_write, .scr_write = k2_sata_scr_write,
.port_start = ata_port_start, .port_start = ata_port_start,
.port_stop = ata_port_stop, .port_stop = ata_port_stop,
.host_stop = ata_host_stop, .host_stop = ata_pci_host_stop,
}; };
static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base) static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base)
@ -346,7 +346,7 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
static int printed_version; static int printed_version;
struct ata_probe_ent *probe_ent = NULL; struct ata_probe_ent *probe_ent = NULL;
unsigned long base; unsigned long base;
void *mmio_base; void __iomem *mmio_base;
int pci_dev_busy = 0; int pci_dev_busy = 0;
int rc; int rc;
int i; int i;
@ -392,8 +392,7 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
probe_ent->dev = pci_dev_to_dev(pdev); probe_ent->dev = pci_dev_to_dev(pdev);
INIT_LIST_HEAD(&probe_ent->node); INIT_LIST_HEAD(&probe_ent->node);
mmio_base = ioremap(pci_resource_start(pdev, 5), mmio_base = pci_iomap(pdev, 5, 0);
pci_resource_len(pdev, 5));
if (mmio_base == NULL) { if (mmio_base == NULL) {
rc = -ENOMEM; rc = -ENOMEM;
goto err_out_free_ent; goto err_out_free_ent;

Some files were not shown because too many files have changed in this diff Show More