diff -u --recursive --new-file linux.old/Documentation/Configure.help linux/Documentation/Configure.help --- linux.old/Documentation/Configure.help Fri Dec 21 17:41:53 2001 +++ linux/Documentation/Configure.help Sun Feb 3 17:51:35 2002 @@ -5642,6 +5642,19 @@ at least version 1.27 of dmascc_cfg, as older versions will not work with the current driver. +PCISCC-4 driver for AX.25 +CONFIG_PCISCC + PCISCC-4 is a medium-speed Serial Communication Controller (SCC) card with + PCI interface including DMA busmaster support and four channels. It is based + on the Siemens PEB 20534H ("DSCC-4") multi protocol controller. There are + two versions of this device: -10 and -52 respectively working up to 10 and + 52 Mbps per channel. This driver supports both versions. + If unsure, say N. + For more information and configuration tool take a look at + http://www.afthd.tu-darmstadt.de/~dg1kjd/pciscc4. (To browse the WWW, you + need to have access to a machine on the Internet that has a program + like lynx or netscape). + Z8530 SCC driver for AX.25 CONFIG_SCC These cards are used to connect your Linux box to an amateur radio diff -u --recursive --new-file linux.old/drivers/net/hamradio/Config.in linux/drivers/net/hamradio/Config.in --- linux.old/drivers/net/hamradio/Config.in Sat Oct 2 14:38:27 1999 +++ linux/drivers/net/hamradio/Config.in Sun Feb 3 17:49:43 2002 @@ -29,5 +29,6 @@ bool ' soundmodem support for 9600 baud FSK G3RUH modulation' CONFIG_SOUNDMODEM_FSK9600 fi +dep_tristate 'PCISCC driver for AX.25' CONFIG_PCISCC $CONFIG_AX25 dep_tristate 'YAM driver for AX.25' CONFIG_YAM $CONFIG_AX25 diff -u --recursive --new-file linux.old/drivers/net/hamradio/Makefile linux/drivers/net/hamradio/Makefile --- linux.old/drivers/net/hamradio/Makefile Thu Jan 4 20:50:12 2001 +++ linux/drivers/net/hamradio/Makefile Tue Feb 5 08:16:54 2002 @@ -20,6 +20,7 @@ obj-$(CONFIG_SCC) += scc.o obj-$(CONFIG_MKISS) += mkiss.o obj-$(CONFIG_6PACK) += 6pack.o +obj-$(CONFIG_PCISCC) += pciscc4.o obj-$(CONFIG_YAM) += yam.o obj-$(CONFIG_BPQETHER) += bpqether.o obj-$(CONFIG_BAYCOM_SER_FDX) += baycom_ser_fdx.o hdlcdrv.o diff -u --recursive --new-file linux.old/drivers/net/hamradio/pciscc4.c linux/drivers/net/hamradio/pciscc4.c --- linux.old/drivers/net/hamradio/pciscc4.c Thu Jan 1 00:00:00 1970 +++ linux/drivers/net/hamradio/pciscc4.c Sun Feb 3 17:49:26 2002 @@ -0,0 +1,2643 @@ +/***************************************************************************** + * + * pciscc4.c This is the device driver for the PCISCC-4 card or any other + * board based on the Siemens PEB-20534H (DSCC-4) communication + * controller. The DSCC-4 is a four-channel medium-speed (up + * to 10 respectively 52 Mbps/channel) synchronous serial + * interface controller with HDLC protocol processor and + * busmaster-DMA facilities, and furthermore it is undoubtly + * the most broken piece of silicon I ever saw in my whole + * life. + * + * Info: http://www.afthd.tu-darmstadt.de/~dg1kjd/pciscc4 + * + * Authors: (c) 1999-2000 Jens David + * + * Policy: Please contact me before making structural changes. + * Before applying changes to the communication with + * the DSCC-4 please read: + * - Data Sheet 05/00 PEB-20534 Version 2.1 + * - Delta Sheet Chip Rev. 2.0-2.1 + * - DSCC-4 Errata Book PEB-20534H Rev 2.0 DS7 05/23/2000 + * - DSCC-4 Errata Book PED-20534H Rev 2.1 DS5 05/23/2000 + * - Sample driver source code as of 07/27/99 + * All these documents are available from Infineon on + * request or from http://www.infineon.com/cgi/ecrm.dll/\ + * ecrm/scripts/prod_ov.jsp?oid=13604&cat_oid=-8059 . + * At least the current version of this beast likes to be + * treated _very_ carefully. If you don't do this, it crashes + * itself or the system. I have made comments on these common + * traps where appropriate. No, there isn't such thing as a + * "master reset" bit. If the DMAC crashes the device needs to + * be power cycled or /RES asserted. + * + * CVS: $Id: pciscc4.c,v 1.84 2001/07/09 18:12:27 dg1kjd Exp $ + * + * Changelog: Please log any changes here. + * | 08/23/99 Initial version Jens + * | 08/25/99 Reworked buffer concept to use last-mode Jens + * | policy and implemented Siemens' workarounds + * | 08/27/99 Reworked transmitter to use internal timers Jens + * | for better resolution at txdelay/txtail + * | 09/01/99 Ioctl()s implemented Jens + * | 09/10/99 Descriptor chain reworked. RX hold condition Jens + * | can't occur any more. TX descriptors are not Jens + * | re-initialized after transmit any more. + * | 09/12/99 TX reworked. TX-Timeout added. Jens + * | 09/13/99 TX timeout fixed Jens + * | 10/09/99 Cosmetic fixes and comments added Jens + * | 10/16/99 Cosmetic stuff and non-module mode fixed Jens + * | 10/21/99 TX-skbs are not freed in xmit()-statemachine Jens + * | 10/25/99 Default configuration more sensible now Jens + * | 02/13/00 Converted to new driver interface Jens + * | 08/06/00 Some more chip-bug workarounds mask rev. 2.0 + * | and made locking SMP safe Jens + * | 08/16/00 Structural changes in TX-state and RTS handing + * | to work around SCC-register readback problem, + * | SIOCPCISCCSDCFG now even works when iface is + * | up, as do proc/sysctl controls. Introduced + * | dev->tbusy flag. More workarounds for 2.0 . Jens + * | 12/24/00 Cosmetic cleanups, debugging defaults to off Jens + * | 07/09/01 mark_bh(NET_BH) on resetting dev->tbusy; + * | enabled txd in full duplex mode on request Jens + * | 01/18/02 Adapted to linux 2.4 standard kernel + * Jean-Paul Roubelat F6FBB + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + *---------------------------------------------------------------------------- + * + * | Please note that the GPL allows you to use the driver, NOT the radio. | + * | In order to use the radio, you need a license from the communications | + * | authority of your country. | + * + *---------------------------------------------------------------------------- + * + ***************************************************************************** + * + * Concept of Operation + * -------------------- + * + * I. SCCs + * We use all SCC cores in HDLC mode. Asyncronous and BiSync operation is not + * supported and probably never will. We do not make use of the built-in + * LAPB/LAPD protocol processor features (Auto Mode). Moreover we can't use + * automatic address recognition either because it is implemented in a way + * which allows only operation with fixed header sizes and address fields on + * static positions. Thus we use HDLC address mode 0. As far as the clock modes + * are concerned we make use of mode 0a (for DF9IC-like modems, RX and TX clock + * from pin header), 0b (G3RUH-"like", external RX clock, internal TX clock + * from BRG but unfornately without oversampling), 6b (for TCM-3105-like simple + * modems, using on-chip DPLL for RX clock recovery. Internal TX clock from BRG + * which can optionaly be provided on TxClk and/or RTS pins. No oversampling.) + * and 4 (external RX and TX clock like DF9IC, but with clock gapping + * function, see Data Book). Channel coding is user-selectable + * on a per-channel basis. DF9IC-type modems like NRZ (conversion NRZ->NRZI + * done internally), TCM-3105-like modems want NRZI coding. + * Moreover manchester, FM0 and FM1 can be selected (untested). + * The internal SCC-DMAC interface seems to obey the KISS-concept. The drawback + * of this fact is, that the chip fills our data buffers in memory completely + * sequential. If at the end of a frame the SCC realizes, that the FCS comparison + * failed, it does not "discard" the frame. That is, it requests an interrupt and + * uses up a packet buffer as if the frame was valid. The frame is, however, + * marked invalid, but of cause the host processor still needs to clean the + * mess up, which costs time. Now consider that every six ones in series on a + * empty channel will cause an interrupt and work to the handler. The only + * way around this is to gate the receive data with the DCD signal. Of cause + * the modem's DCD needs to be very fast to accomplish this. The standard-DCD + * on DF9IC-type modems currently isn't. As far as modem handshake is concerned + * we program the DCD input of each channel as general purpose input and read + * its state whenever L2 requests us to do so. TX handshake can be used in two + * modes I called "hard" and "soft". Hard mode is reserved for future + * (smart) modems which tell the controller when they are ready to transmit + * using the CTS (clear to send) signal. In soft mode we use each channel's + * internal timer to generate txdelay and txtail. The advantage of this concept + * is, that we have a resolution of one bit since the timers are clocked with + * the effective TxClk, which also allows us to probe the TX-bitrate in external + * clock modes (L2 likes this information). The SCC cores have some other + * goodies, as preample transmission, one insertion after 7 consecutive zeros + * and stuff like this which we make user selectable. + * + * II. DMA Controller and IRQs + * For maximum performance and least host processor load, the design of the + * DMA controller is descriptor orientated. For both, RX and TX channels + * descriptor "queues" are set up on device initialization. Each descriptor + * contains a link to its subsequent desriptor, a pointer to the buffer + * associated with it and the buffer's size. The buffer itself is _not_ part + * of the descriptor, but can be located anywhere else in address space. + * Thus, in TX case all we have to do when a packet to be sent arrives from + * L2, is painting out a descriptor (pointer to the sk_buf's data buffer, + * length of the frame and so on) and telling the DMAC to process it. We do + * not have to move the data around in memory. When the descriptor is finished + * (i.e. packet sent out completely or at least data completely in FIFO), the + * DMAC sets a flag (C) in the descriptor and issues an IRQ. We check the flag + * and if it is set, we can skb_free up the packet. Both descriptor queues (RX + * and TX) are organized circular with a user setable size and allocated + * statically at device initialization. As far as the RX queue ("ring") is + * concerned we also already allocate the sk_buffs associated with them. + * Whenever the DMAC processed a RX descriptor (thus "filled" the buffer + * associated with it) we release the buffer to L2 and allocate a new one. + * No copying. The structure of the RX descriptor chain never changes either. + * It stays the same since inititalization on device initialization and + * descriptor memory itself is only freed when the device destructor is called. + * The fact that both descriptor queues are kept statically has two advantages: + * It is save, because the DMAC can not "escape" due to a race condition and + * mess up our memory and it works around a hardware bug in the DSCC-4. + * A few words on linux mm: + * When a device driver allocates memory using functions like malloc() or + * alloc_skb(), the returned address pointers are pointers to virtual memory. + * In case of access to this memory, the MMU, as part of the CPU translates + * the virtual addresses to physical ones, which are e.g. used to drive the + * RAM address bus lines. If a PCI bus master accesses the same memory, it + * needs to know the right address vom _its_ point of view, the so-called + * "bus" address. On most architectures this is the same as the physical + * address. We use the funktion virt_to_bus() and bus_to_virt() to translate + * them. The descriptor structures contain both types, just to make the code + * faster and more readable. The symbol names for "bus"-pointers end on + * "ptr", for example rx_desc_t.next --(virt-to-bus)--> rx_desc_t.nextptr. + * When we accessed "common" memory (i.e. descriptor or buffer memory) we + * issue a flush_cache_all() due to the fact that some architectures (not PC) + * don't keep memory caches consistent on DMAs. Where it isn't apropriate gcc + * will optimize it away for us. Read and write memory barriers rmb() / wmb() + * fit in the same category. i386 family CPUs do not perform instruction + * reordering as far as memory access is concerned. + * + * Another word on IRQ management: + * The DMAC is not only responsible for moving around network data from/to + * the SCC cores, but also maintains 10 so-called "interrupt queues" (IQs). + * These are intended to help speeding up operation and minimize load on the + * host CPU. There is the configuration queue (IQCFG) which is responsible + * for answers to DMAC configuration commands, the peripheral queue (IQPER), + * which cares about interrupt sources on the local bus, SSC (not SCC!) or GPP + * if enabled, and one TX and one RX queue per channel (IQRX and IQTX + * respectively), which are responsible for RX and TX paths and not only + * indicate DMAC exceptions (packet finished etc.) but also SCC exceptions + * (FIFO over/underrun, frame length exceeded etc.). Each element in the + * queues is a dword. The queues are "naturally" organized circular as well. + * Naturally means, that there is no such thing as a "next pointer" as in + * the frame descriptors, but that we tell the DMAC the length of each queue + * (user configurable in 32 dword-steps) and it does the wrap-around + * automagically. Whenever an element is added to a queue an IRQ is issued. + * The IRQ handler acks all interrupts by writing back the global status + * register (GSTAR) and starts processing ALL queues, independent of who + * caused the IRQ. + * Update 08/16/2000: Unfortunately spuriously interrupt vectors seem to + * get lost with some PCI bus arbiters, especially the AMD-751's (Irongate) + * controller. I spent about one week searching for this problem but due to + * lack of an PCI bus analyzer I did not succeed. This problem occurs with + * 2.0 as well as 2.1 mask revision and only under *very* high PCI bus and + * SCC bitrate load. I introduced a work-around which detects suspicious + * interrupt vector losts and scans the whole queue. This compensates completely + * for this symptom, but even more unfortunately if the lost of interrupt + * vectors occurs, usually the DMAC also forgets to write the "result" + * field of completed TX descriptors effectively locking up the transmitter + * of the concerned channel. I could implement a similar work-around + * here, but I rather recommend using another mainboard. I suspect that + * what we see here is a result of non-consistent memory-cache controlling. + * Reprogramming the PCI bus arbiter block does not seem to help either (at + * least not in the case of AMD-751). As last resort I implemented a new + * ioctl() (SIOCPCISCCKICKTX) to kick the transmitter manually. Use with + * care, and only if TX is really locked up, otherwise internal synchronization + * between descriptor cleanup process and DMAC-transmitter is lost, resulting + * in system lockup, DMAC lockup and transmission of arbitrary data. Kicking + * can now also be triggered by L2 via tbusy-mechanism. You can adjust + * the time interval neccessary to consider a transmitter "hung" via + * txkick module parameter (default is 30 seconds). + * + * III. General Purpose Port (GPP) + * The general purpose port is used for status LED connection. We support + * only 2 LEDs per port. These can be controlled with an ioctl(). We do not + * care about it, this ought to be done by a user-space daemon. The SSC port + * is not used. The LBI can be configured with the global settings and + * controlled by an own ioctl(). + * + * IV. Configuration + * We divide configuration into global (i.e. concerning all ports, chipwide) + * and local. We have one template for each, chipcfg_default and devcfg_default + * which is hardcoded and never changes. On module load it is copied for each + * chip and each device respectively (chipctl_t.cfg and devctl_t.cfg). The + * silicon is initialized with these values only in chip_open() and + * device_open() and the structures themselves can only be changed when the + * corresponding interface (or all interfaces for global config) is down. + * Changes take effect when the interface is brought up the next time. + * + * V. Initialization + * When module_init is called, the PCI driver already took care of assigning + * two pieces of memory space and an IRQ to each board. On module load we do + * nothing else than building up our internal structures (devctl_t and + * chipctl_t), grabbing the interface names and registering them with the + * network subsystem. Chip_open() and device_open() are called only upon uping + * a device and perform IRQ grabbing, memory mapping and allocation and + * hardware initialization. + * + * VI. RX Handling + * When a frame is received completely, the C flag in the corresponding + * descriptor is set by the DSCC-4, an interrupt vector transfered to the + * channel's RX IQ, and the IRQ line asserted. The IRQ handler takes control + * of the system. The first step it performs is reading the global status + * register and writing it back, thus ack'ing the IRQ. Then it is analyzed + * bit-by-bit to find out where it originated from. The channel's RX IQ + * is examined and the function pciscc_isr_receiver() called for the + * corresponding port. This functions processes the rx descriptor queue + * starting with the element (devctl_t.dq_rx_next) following the last element + * processed the last time the function was called. All descriptors with the + * C-flag ("C"omplete) set are processed. And at the end the dq_rx_next pointer + * is updated to the next to last element processed. During "processing" at + * first two pieces of information are examined: The status field of the + * descriptor, mainly containing the length of the received frame and a flag + * telling us wether the frame reception was aborted or not (RA), which + * was written by the DMAC and the so-called Receive Data Section Status Byte + * (RSTA) which was appended to the end of the data buffer by the channel's + * SCC core. Both are checked and if they yield a valid frame and we success- + * fully allocated a new skb we remove the old one from the descriptor and + * hand it off to pciscc_rx_skb() which paints out some of the skb's members + * and fires it up to the MAC layer. The descriptor's fields are re-initialized + * anyway to prepare it for the next reception. + * After all complete descriptors were processed we must tell the DMAC that the + * last ready-to-fill descriptor (LRDA, Last Receive Descriptor Address) is the + * one pre-previous to the last one processed. In fact experiments show that + * normally this is not neccessary since we get an interrupt for _every_ + * received frame so we can re-prepare the descriptor then. This is just to + * prevent the DMAC from "running around" uncontrolled in the circular + * structure, eventually losing sync with the devctl_t.dq_rx_next pointer in + * case of _very_ high bus/interrupt latency on heavy system load conditions. + * + * VII. TX Handling + * We assume we are in half duplex mode with software txdelay since everything + * else is a lot easier. The current TX state is kept track of in the + * devctl_t.txstate variable. When L2 hands us a frame, the first thing we + * do is check whether there is a free TX descriptor ready in the device's + * ring. The element dq_tx_last is a pointer to the last descriptor which + * is currently to be sent or already is sent. Thus, the element next to this + * element is the one we would like to fill. The variable dq_tx_cleanup + * of the device control structure tells us the next element to be "cleaned + * up" (free skb etc.) after transmission. If it points not to the element + * we like to fill, the element we like to fill is free. If it does, we must + * discard the new frame due to the full descriptor queue. Now that we are + * sure to have found our descriptor candidate will can paint it out, but + * we can't start transmission yet. We check what state the TX is in. If + * it is idle, we load the timer with the txdelay value and start it. Of cause + * we also need to manually assert the RTS line. If we were already + * transmitting, or sending trailing flags we immediately schedule the + * descriptor for process by the DMAC by pointing LTDA (Last Transmit + * Descriptor Address) to it. In the first case, the txdelay-timer will run + * out after the txdelay period is over, causing an IRQ. The interrupt handler + * can now schedule the transmission. During transmission we use the timer + * as some kind of software watchdog. When transmission finishes, we again + * program and start the timer, this time with the txtail value. The txstate + * variable is set to TX_TAIL and as soon as the timer runs out we can + * deassert the RTS line and reset txstate to TX_IDLE. The frame(s) were + * (hopefully) transmitted successfully. Anyway, each transmitted frame + * causes a ALLS TX IRQ. The only thing we need to do then, is to call + * tx_cleanup() which walks through the tx descriptor ring, cleaning up + * all finished entries (freeing up the skbs and things like this). + * + ***************************************************************************** + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include + +/* --------------------------------------------------------------------------------------------- */ +/* user serviceable area or module parameter */ + + +static int xtal = 19660800; /* oscillator frequency in HZ */ +/*static int probebit = 9600; / number of cycles for tx_bitrate test */ +static int txtimeout = 120; /* TX watchdog - timeout in seconds to take iface down */ +static int txkick = 30; /* TX watchdog - timeout in seconds to kick iface */ +#undef PCISCC_DEBUG /* enable debugging */ +#undef PCISCC_VDEBUG /* enable verbose buffer debugging */ + +/* --------------------------------------------------------------------------------------------- */ +/* global variables */ + +static const char PCISCC_VERSION[] = "driver v 2.00 for Linux 2.4 - 2002/01/28 - Jean-Paul ROUBELAT (F6FBB)"; + +static struct tq_struct txto_task; +static struct tq_struct txreset_task; +static unsigned char * volatile dummybuf; + +/* --------------------------------------------------------------------------------------------- */ + +volatile int atom_check_counter = 0; + +#define ATOMICY_CHECK {\ + if ((++atom_check_counter) > 1)\ + printk(KERN_ERR "PCISCC: Atomicy check failed in function:%s line:%u.\n", __FUNCTION__, __LINE__);\ +} + +#define ATOMICY_CHECK_END {\ + atom_check_counter--;\ +} + +/* --------------------------------------------------------------------------------------------- */ + +/* template for global configuration, copied on driver init */ +static struct chipcfg_t chipcfg_default = { + 0, /* LBI mode */ + 1, /* oscillator power */ + 16, /* number of RX descriptors and buffers per channel */ + 8, /* number of TX descriptors per channel */ + 32, /* interrupt queue length */ + -1, /* priority channel */ + 16, /* RX main fifo DMA threshold */ +}; + +/* template for device configuration, copied on driver init */ +static struct devcfg_t devcfg_default = { + CFG_CHCODE_NRZ, /* channel coding */ + CFG_CM_DF9IC, /* clocking mode */ + CFG_DUPLEX_HALF, /* duplex mode */ + 0, /* DPLL */ + 10, /* BRG "M" */ + 0, /* BRG "N" (N+1)*2^M; M=10, N=0 => 9600 baud */ + 0, /* clock-out enable */ + 0, /* data inversion */ + CFG_TXDDRIVE_TP, /* TxD driver type */ + 0, /* carrier detect inversion */ + 0, /* test loop */ + CFG_TXDEL_SOFT, /* TX-delay mode */ + 2000, /* software TX-delay in bitclk cycles => default 250 ms @9600 baud */ + 400, /* TX-tail, see above */ + 1, /* shared flags */ + 0, /* CRC mode */ + 0, /* preamble repetitions */ + 0, /* preamble */ + 0, /* HDLC extensions */ + 100, /* DCD SlotTime (bits) */ + 64 /* DCD WaitValue (0..255) */ +}; + +/* --------------------------------------------------------------------------------------------- */ + +#ifdef PCISCC_DEBUG +static void pciscc_pci_regdump(struct chipctl_t *cctlp) +{ + int i; + unsigned int bus = cctlp->pcidev->bus->number; + unsigned int devfn = cctlp->pcidev->devfn; + unsigned int value; + + printk(KERN_INFO "PCISCC: PCI Register dump for device %u:%u follows:\nPCISCC:", bus, devfn); + for (i=0; i<16; i++) { + pcibios_read_config_dword(bus, devfn, i*4, &value); + printk(KERN_INFO " 0x%08X", value); + } + printk(KERN_INFO "\n"); + return; +} +#endif + +/* --------------------------------------------------------------------------------------------- */ + +#ifdef PCISCC_DEBUG +/* dump DMAC's register to syslog */ +static void pciscc_dmac_regdump(struct chipctl_t *cctlp) +{ + printk(KERN_INFO "PCISCC: ---------- begin DMAC register dump ----------\n"); + printk(KERN_INFO "CH BRDA LRDA FRDA BTDA LTDA FTDA CFG\n"); + printk(KERN_INFO " 0 B0x%08lx B0x%08lx B0x%08lx B0x%08lx B0x%08lx B0x%08lx %08lx\n", + (unsigned long) readl(cctlp->io_base+CH0BRDA), + (unsigned long) readl(cctlp->io_base+CH0LRDA), + (unsigned long) readl(cctlp->io_base+CH0FRDA), + (unsigned long) readl(cctlp->io_base+CH0BTDA), + (unsigned long) readl(cctlp->io_base+CH0LTDA), + (unsigned long) readl(cctlp->io_base+CH0FTDA), + (unsigned long) readl(cctlp->io_base+CH0CFG)); + printk(KERN_INFO " 1 B0x%08lx B0x%08lx B0x%08lx B0x%08lx B0x%08lx B0x%08lx %08lx\n", + (unsigned long) readl(cctlp->io_base+CH1BRDA), + (unsigned long) readl(cctlp->io_base+CH1LRDA), + (unsigned long) readl(cctlp->io_base+CH1FRDA), + (unsigned long) readl(cctlp->io_base+CH1BTDA), + (unsigned long) readl(cctlp->io_base+CH1LTDA), + (unsigned long) readl(cctlp->io_base+CH1FTDA), + (unsigned long) readl(cctlp->io_base+CH1CFG)); + printk(KERN_INFO " 2 B0x%08lx B0x%08lx B0x%08lx B0x%08lx B0x%08lx B0x%08lx %08lx\n", + (unsigned long) readl(cctlp->io_base+CH2BRDA), + (unsigned long) readl(cctlp->io_base+CH2LRDA), + (unsigned long) readl(cctlp->io_base+CH2FRDA), + (unsigned long) readl(cctlp->io_base+CH2BTDA), + (unsigned long) readl(cctlp->io_base+CH2LTDA), + (unsigned long) readl(cctlp->io_base+CH2FTDA), + (unsigned long) readl(cctlp->io_base+CH2CFG)); + printk(KERN_INFO " 3 B0x%08lx B0x%08lx B0x%08lx B0x%08lx B0x%08lx B0x%08lx %08lx\n", + (unsigned long) readl(cctlp->io_base+CH3BRDA), + (unsigned long) readl(cctlp->io_base+CH3LRDA), + (unsigned long) readl(cctlp->io_base+CH3FRDA), + (unsigned long) readl(cctlp->io_base+CH3BTDA), + (unsigned long) readl(cctlp->io_base+CH3LTDA), + (unsigned long) readl(cctlp->io_base+CH3FTDA), + (unsigned long) readl(cctlp->io_base+CH3CFG)); + printk(KERN_INFO "PCISCC: ---------- end DMAC register dump ----------\n"); + return; +} +#endif /* PCISCC_DEBUG */ + +/* --------------------------------------------------------------------------------------------- */ + +#ifdef PCISCC_DEBUG +/* dump descriptor rings to syslog */ +static void pciscc_queuedump(struct devctl_t *dctlp) +{ + unsigned int i; + struct rx_desc_t *rdp; + struct tx_desc_t *tdp; + + if (!dctlp) return; + printk(KERN_INFO "PCISCC: ---------- begin queue dump RX iface %s ----------\n", dctlp->name); + printk(KERN_INFO "%s->dq_rx=V0x%08lx %s->dq_rx_next=V0x%08lx.\n", + dctlp->name, (unsigned long) dctlp->dq_rx, + dctlp->name, (unsigned long) dctlp->dq_rx_next); + printk(KERN_INFO "# &desc &next &prev &nextptr &dataptr &skb &feptr flags result\n"); + for (rdp=dctlp->dq_rx,i=0; ((rdp!=dctlp->dq_rx) || (i==0)) && (i<256); rdp=rdp->next,i++) { + if (!rdp) break; + printk(KERN_INFO "%3u V0x%08lx V0x%08lx V0x%08lx B0x%08lx B0x%08lx V0x%08lx B0x%08lx 0x%08lx 0x%08lx\n", + i, + (unsigned long) rdp, + (unsigned long) rdp->next, + (unsigned long) rdp->prev, + (unsigned long) rdp->nextptr, + (unsigned long) rdp->dataptr, + (unsigned long) rdp->skb, + (unsigned long) rdp->feptr, + (unsigned long) rdp->flags, + (unsigned long) rdp->result); + } + printk(KERN_INFO "PCISCC: ---------- end queue dump RX iface %s ----------\n", dctlp->name); + printk(KERN_INFO "PCISCC: ---------- begin queue dump TX iface %s ----------\n", dctlp->name); + printk(KERN_INFO "%s->dq_tx=V0x%08lx %s->dq_tx_cleanup=V0x%08lx %s->dq_tx_last=V0x%08lx.\n", + dctlp->name, (unsigned long) dctlp->dq_tx, + dctlp->name, (unsigned long) dctlp->dq_tx_cleanup, + dctlp->name, (unsigned long) dctlp->dq_tx_last); + printk(KERN_INFO "# &desc &next &prev &nextptr &dataptr &skb flags result\n"); + for (tdp=dctlp->dq_tx,i=0; ((tdp!=dctlp->dq_tx) || (i==0)) && (i<256); tdp=tdp->next,i++) { + if (!tdp) break; + printk(KERN_INFO "%3u V0x%08lx V0x%08lx V0x%08lx B0x%08lx B0x%08lx V0x%08lx 0x%08lx 0x%08lx\n", + i, + (unsigned long) tdp, + (unsigned long) tdp->next, + (unsigned long) tdp->prev, + (unsigned long) tdp->nextptr, + (unsigned long) tdp->dataptr, + (unsigned long) tdp->skb, + (unsigned long) tdp->flags, + (unsigned long) tdp->result); + } + printk(KERN_INFO "PCISCC: ---------- end queue dump TX iface %s ----------\n", dctlp->name); + return; +} +#endif /* PCISCC_DEBUG */ + +/* --------------------------------------------------------------------------------------------- */ + +/* + * Correct mainboard memory data path misconfigurations / design flaws + * on certain systems which otherwise could jeopardize memory consistency. + */ +void pciscc_cpu_bridge_fixups(void) +{ + unsigned short bridge_vendor; + unsigned short bridge_device_id; + unsigned short s; + + pcibios_read_config_word(0, 0, PCI_VENDOR_ID, &bridge_vendor); + pcibios_read_config_word(0, 0, PCI_DEVICE_ID, &bridge_device_id); + + if ((bridge_vendor == PCI_VENDOR_ID_AMD) && (bridge_device_id == 0x7006)) { + printk(KERN_INFO "PCISCC: Detected AMD 751 (Irongate) Northbridge. Fixups enabled.\n"); + pcibios_read_config_word(0, 0, 0x84, &s); /* PCI arbitration control register */ + s = (1<<12) | (1<<11) | (1<<10) | (1<<9) | (1<<7) | (1<<3); + pcibios_write_config_word(0, 0, 0x84, s); + pcibios_read_config_word(0, 0, 0x86, &s); + s &= ~((1<<0) | (1<<1)); + pcibios_write_config_word(0, 0, 0x86, s); + } + return; +} + +/* --------------------------------------------------------------------------------------------- */ + +/* + * initialize chip, called when first interface of a chip is brought up + * action sequency was carefully chosen, don't mess with it + */ +static int pciscc_chip_open(struct chipctl_t *cctlp) +{ + unsigned long start_time; + volatile unsigned long gcc_optimizer_safe; + unsigned int i; + unsigned char pci_latency; + unsigned short cmd; + + if (cctlp->initialized) return 0; +#ifdef PCISCC_DEBUG + printk(KERN_INFO "PCISCC: chip_open().\n"); +#endif + /* enable bus mastering */ + pci_set_master(cctlp->pcidev); + /* tweak latency */ + pcibios_read_config_byte(cctlp->pcidev->bus->number, cctlp->pcidev->devfn, PCI_LATENCY_TIMER, &pci_latency); + i = pci_latency; +#ifdef PCISCC_DEBUG + printk(KERN_INFO "PCISCC: chip_open(): Old PCI latency timer: %u.\n", i); +#endif + pcibios_write_config_byte(cctlp->pcidev->bus->number, cctlp->pcidev->devfn, PCI_LATENCY_TIMER, 0xf8); + pcibios_read_config_word(cctlp->pcidev->bus->number, cctlp->pcidev->devfn, PCI_COMMAND, &cmd); + cmd &= ~(1L<<9); /* reset Fast Back-to-Back enable flag (Errata Sheet 03/99 1.2) */ + cmd |= (1<<6); /* set PER bit (=normal response to parity errors) */ + cmd |= (1<<8); /* enable /SERR driver */ + pcibios_write_config_word(cctlp->pcidev->bus->number, cctlp->pcidev->devfn, PCI_COMMAND, cmd); + pciscc_cpu_bridge_fixups(); +#ifdef PCISCC_DEBUG + pciscc_pci_regdump(cctlp); +#endif + /* request IRQ */ + if (request_irq(cctlp->pcidev->irq, pciscc_isr, SA_SHIRQ, "pciscc", (void *) cctlp)) { + printk(KERN_ERR "PCISCC: chip_open(): Could not get IRQ #%u.\n", cctlp->pcidev->irq); + pciscc_chip_close(cctlp); + return -EAGAIN; + } + cctlp->irq=cctlp->pcidev->irq; + /* allocate and initialize peripheral queue */ + if (!(cctlp->iq_per = kmalloc(cctlp->cfg.iqlen*4, GFP_DMA | GFP_KERNEL))) { + printk(KERN_ERR "PCISCC: chip_open(): Out of memory allocating peripheral interrupt queue.\n"); + return -ENOMEM; + } + memset(cctlp->iq_per, 0, cctlp->cfg.iqlen*4); + cctlp->iq_per_next = cctlp->iq_per; + /* configuration interrupt queue */ + if (!(cctlp->iq_cfg = kmalloc(cctlp->cfg.iqlen*4, GFP_DMA | GFP_KERNEL))) { + printk(KERN_ERR "PCISCC: chip_open(): Out of memory allocating configuration interrupt queue.\n"); + return -ENOMEM; + } + memset(cctlp->iq_cfg, 0, cctlp->cfg.iqlen*4); + cctlp->iq_cfg_next = cctlp->iq_cfg; + /* global hardware initialization */ + spin_lock_irq(&cctlp->chip_lock); +#if defined(__LITTLE_ENDIAN) + writel(0 /* no endian swapping */ + | (cctlp->cfg.prichan != -1 ? (SPRI | (cctlp->cfg.prichan * CHN)) : 0) + | (4 * PERCFG) + | (3 * LCD) + | CMODE + | (cctlp->cfg.oscpwr ? 0 : OSCPD), cctlp->io_base+GMODE); +#elif defined(__BIG_ENDIAN) + writel(ENDIAN /* endian swapping for packet data activated */ + | (cctlp->cfg.prichan != -1 ? (SPRI | (cctlp->cfg.prichan * CHN)) : 0) + | (4 * PERCFG) + | (3 * LCD) + | CMODE + | (cctlp->cfg.oscpwr ? 0 : OSCPD), cctlp->io_base+GMODE); +#else +#error endianess undefined - please fix your system +#endif + writel(virt_to_bus(cctlp->iq_per), cctlp->io_base+IQPBAR); + writel(virt_to_bus(cctlp->iq_cfg), cctlp->io_base+IQCFGBAR); + writel((((cctlp->cfg.iqlen/32)-1)*IQCFGLEN) | (((cctlp->cfg.iqlen/32)-1)*IQPLEN), cctlp->io_base+IQLENR2); + writel(((32/4)*TFSIZE0) + | ((32/4)*TFSIZE1) + | ((32/4)*TFSIZE2) + | ((32/4)*TFSIZE3), cctlp->io_base+FIFOCR1); /* 32 dwords FIFO per channel; now fixed */ + writel(((24/4)*TFRTHRES0) + | ((24/4)*TFRTHRES1) + | ((24/4)*TFRTHRES2) + | ((24/4)*TFRTHRES3) + | M4_0 | M4_1 | M4_2 | M4_3, cctlp->io_base+FIFOCR2); /* 24 dwords; fixed */ + writel(cctlp->cfg.mfifo_rx_t, cctlp->io_base+FIFOCR3); + writel((20*TFFTHRES0) + | (20*TFFTHRES1) + | (20*TFFTHRES2) + | (20*TFFTHRES3), cctlp->io_base+FIFOCR4); /* 20 dwords; fixed */ + /* mask out all DMAC interrupts */ + writel((MRFI | MTFI | MRERR | MTERR), cctlp->io_base+CH0CFG); + writel((MRFI | MTFI | MRERR | MTERR), cctlp->io_base+CH1CFG); + writel((MRFI | MTFI | MRERR | MTERR), cctlp->io_base+CH2CFG); + writel((MRFI | MTFI | MRERR | MTERR), cctlp->io_base+CH3CFG); + /* all SCC cores in reset state */ + writel(0x00000000, cctlp->io_base+SCCBASE[0]+CCR0); + writel(0x00000000, cctlp->io_base+SCCBASE[1]+CCR0); + writel(0x00000000, cctlp->io_base+SCCBASE[2]+CCR0); + writel(0x00000000, cctlp->io_base+SCCBASE[3]+CCR0); + /* mask out all SCC interrupts */ + writel(0xffffffff, cctlp->io_base+SCCBASE[0]+IMR); + writel(0xffffffff, cctlp->io_base+SCCBASE[1]+IMR); + writel(0xffffffff, cctlp->io_base+SCCBASE[2]+IMR); + writel(0xffffffff, cctlp->io_base+SCCBASE[3]+IMR); + /* peripheral configuration */ + writel((BTYP*3), cctlp->io_base+LCONF); + writel(0x00000000, cctlp->io_base+SSCCON); + writel(0x00000000, cctlp->io_base+SSCIM); + writel(0x000000ff, cctlp->io_base+GPDIR); + writel(0x00000000, cctlp->io_base+GPDATA); + writel(0x00000000, cctlp->io_base+GPIM); + spin_unlock_irq(&cctlp->chip_lock); + /* initialize configuration and peripheral IQs */ + start_time = jiffies; + cctlp->mailbox = MAILBOX_NONE; + writel(CFGIQCFG | CFGIQP | AR, cctlp->io_base+GCMDR); + do { + gcc_optimizer_safe=(jiffies-start_time)<20 && !cctlp->mailbox; + } while (gcc_optimizer_safe); /* timeout 20 jiffies */ + switch (cctlp->mailbox) { /* mailbox was written by isr */ + case MAILBOX_OK: +#ifdef PCISCC_DEBUG + printk(KERN_INFO "PCISCC: chip_open(): Success on IQ config request.\n"); +#endif + break; + case MAILBOX_NONE: + printk(KERN_ERR "PCISCC: chip_open(): Timeout on IQ config request. Sync HDDs and hardware-reset NOW!\n"); + pciscc_chip_close(cctlp); + return -EIO; + case MAILBOX_FAILURE: + printk(KERN_ERR "PCISCC: chip_open(): Failure on IQ config request. Sync HDDs and hardware-reset NOW!\n"); + pciscc_chip_close(cctlp); + return -EIO; + } + cctlp->initialized=1; + return 0; +} + +/* --------------------------------------------------------------------------------------------- */ + +/* + * close chip, called when last device (channel) of a chip was closed. + * don't mess up. + */ +static int pciscc_chip_close(struct chipctl_t *cctlp) +{ + if (cctlp->usecnt) { + printk(KERN_ERR "PCISCC: chip_close() called while channels active.\n"); + return -EBUSY; + } +#ifdef PCISCC_DEBUG + printk(KERN_INFO "PCISCC: chip_close().\n"); +#endif + /* global configuration to reset state */ + spin_lock_irq(&cctlp->chip_lock); + writel((4 * PERCFG) + | (3 * LCD) + | CMODE + | OSCPD, cctlp->io_base+GMODE); + /* mask all DMAC interrupts */ + writel((MRFI | MTFI | MRERR | MTERR), cctlp->io_base+CH0CFG); + writel((MRFI | MTFI | MRERR | MTERR), cctlp->io_base+CH1CFG); + writel((MRFI | MTFI | MRERR | MTERR), cctlp->io_base+CH2CFG); + writel((MRFI | MTFI | MRERR | MTERR), cctlp->io_base+CH3CFG); + /* SCC cores to reset state */ + writel(0x00000000, cctlp->io_base+SCCBASE[0]+CCR0); + writel(0x00000000, cctlp->io_base+SCCBASE[1]+CCR0); + writel(0x00000000, cctlp->io_base+SCCBASE[2]+CCR0); + writel(0x00000000, cctlp->io_base+SCCBASE[3]+CCR0); + /* mask all SCC interrupts */ + writel(0xffffffff, cctlp->io_base+SCCBASE[0]+IMR); + writel(0xffffffff, cctlp->io_base+SCCBASE[1]+IMR); + writel(0xffffffff, cctlp->io_base+SCCBASE[2]+IMR); + writel(0xffffffff, cctlp->io_base+SCCBASE[3]+IMR); + spin_unlock_irq(&cctlp->chip_lock); + /* free IQs, free IRQ, unmap control address space */ + if (cctlp->iq_per) { + kfree(cctlp->iq_per); + cctlp->iq_per=0; + cctlp->iq_per_next=0; + } + if (cctlp->iq_cfg) { + kfree(cctlp->iq_cfg); + cctlp->iq_cfg=0; + cctlp->iq_cfg_next=0; + } + if (cctlp->irq) { + free_irq(cctlp->irq, (void *) cctlp); + cctlp->irq=0; + } + /* + if (cctlp->io_base) { + iounmap(cctlp->io_base); + cctlp->io_base=0; + } + if (cctlp->lbi_base) { + iounmap(cctlp->lbi_base); + cctlp->lbi_base=0; + } + */ + cctlp->initialized=0; + return 0; +} + +/* --------------------------------------------------------------------------------------------- */ + +/* + * open one channel, chip must have been initialized by chip_open() before. + * the sequence of actions done here was carefully chosen, don't mess with + * it unless you know exactly what you are doing... + */ +static int pciscc_channel_open(struct devctl_t *dctlp) +{ + struct chipctl_t *cctlp = dctlp->chip; + int channel = dctlp->channel; + struct rx_desc_t *rdp, *last_rdp; + struct tx_desc_t *tdp, *last_tdp; + unsigned long l; + unsigned long start_time; + volatile unsigned long gcc_optimizer_safe; + int i; + unsigned char *data; + + if (dctlp->start) return 0; +#ifdef PCISCC_DEBUG + printk(KERN_INFO "PCISCC: channel_open().\n"); +#endif + /* allocate and initialize RX and TX IQs */ + if (!(dctlp->iq_rx = kmalloc(cctlp->cfg.iqlen*4, GFP_DMA | GFP_KERNEL))) { + printk(KERN_ERR "PCISCC: channel_open(): Out of memory allocating rx interrupt queue.\n"); + return -ENOMEM; + } + memset(dctlp->iq_rx, 0, cctlp->cfg.iqlen*4); + dctlp->iq_rx_next = dctlp->iq_rx; + if (!(dctlp->iq_tx = kmalloc(cctlp->cfg.iqlen*4, GFP_DMA | GFP_KERNEL))) { + printk(KERN_ERR "PCISCC: channel_open(): Out of memory allocating tx interrupt queue.\n"); + return -ENOMEM; + } + memset(dctlp->iq_tx, 0, cctlp->cfg.iqlen*4); + dctlp->iq_tx_next = dctlp->iq_tx; + spin_lock_irq(&dctlp->dev_lock); + writel(0, cctlp->io_base+SCCBASE[channel]+CCR1); /* stop SCC */ + writel(0, cctlp->io_base+SCCBASE[channel]+CCR2); + writel(0, cctlp->io_base+SCCBASE[channel]+CCR0); + dctlp->ccr0 = dctlp->ccr1 = dctlp->ccr2 = 0; + writel(0, cctlp->io_base+SCCBASE[channel]+TIMR); + /* set IQ lengths and base addresses */ + l = readl(cctlp->io_base+IQLENR1); + switch (channel) { + case 0: writel(MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH0CFG); + writel(virt_to_bus(dctlp->iq_rx), cctlp->io_base+IQSCC0RXBAR); + writel(virt_to_bus(dctlp->iq_tx), cctlp->io_base+IQSCC0TXBAR); + l &= 0x0fff0fff; + l |= (((cctlp->cfg.iqlen/32)-1)*IQSCC0RXLEN) | (((cctlp->cfg.iqlen/32)-1)*IQSCC0TXLEN); + break; + case 1: writel(MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH1CFG); + writel(virt_to_bus(dctlp->iq_rx), cctlp->io_base+IQSCC1RXBAR); + writel(virt_to_bus(dctlp->iq_tx), cctlp->io_base+IQSCC1TXBAR); + l &= 0xf0fff0ff; + l |= (((cctlp->cfg.iqlen/32)-1)*IQSCC1RXLEN) | (((cctlp->cfg.iqlen/32)-1)*IQSCC1TXLEN); + break; + case 2: writel(MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH2CFG); + writel(virt_to_bus(dctlp->iq_rx), cctlp->io_base+IQSCC2RXBAR); + writel(virt_to_bus(dctlp->iq_tx), cctlp->io_base+IQSCC2TXBAR); + l &= 0xff0fff0f; + l |= (((cctlp->cfg.iqlen/32)-1)*IQSCC2RXLEN) | (((cctlp->cfg.iqlen/32)-1)*IQSCC2TXLEN); + break; + case 3: writel(MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH3CFG); + writel(virt_to_bus(dctlp->iq_rx), cctlp->io_base+IQSCC3RXBAR); + writel(virt_to_bus(dctlp->iq_tx), cctlp->io_base+IQSCC3TXBAR); + l &= 0xfff0fff0; + l |= (((cctlp->cfg.iqlen/32)-1)*IQSCC3RXLEN) | (((cctlp->cfg.iqlen/32)-1)*IQSCC3TXLEN); + break; + } + writel(l, cctlp->io_base+IQLENR1); + spin_unlock_irq(&dctlp->dev_lock); + start_time = jiffies; + cctlp->mailbox = MAILBOX_NONE; + writel(AR /* Action Request */ + | (channel == 0 ? (CFGIQSCC0RX | CFGIQSCC0TX) : 0) + | (channel == 1 ? (CFGIQSCC1RX | CFGIQSCC1TX) : 0) + | (channel == 2 ? (CFGIQSCC2RX | CFGIQSCC2TX) : 0) + | (channel == 3 ? (CFGIQSCC3RX | CFGIQSCC3TX) : 0), cctlp->io_base+GCMDR); + do { + gcc_optimizer_safe=(jiffies-start_time)<20 && !cctlp->mailbox; + } while (gcc_optimizer_safe); /* timeout 20 jiffies */ + switch (cctlp->mailbox) { /* mailbox was written by isr */ + case MAILBOX_OK: +#ifdef PCISCC_DEBUG + printk(KERN_INFO "PCISCC: channel_open(): Success on IQSCC config request.\n"); +#endif + break; + case MAILBOX_NONE: + printk(KERN_ERR "PCISCC: channel_open(): Timeout on IQSCC config request. Sync HDDs and hardware-reset NOW!\n"); + pciscc_channel_close(dctlp); + return -EIO; + case MAILBOX_FAILURE: + printk(KERN_ERR "PCISCC: channel_open(): Failure on IQSCC config request. Sync HDDs and hardware-reset NOW!\n"); + pciscc_channel_close(dctlp); + return -EIO; + } + /* initialize channel's SCC core */ + spin_lock_irq(&dctlp->dev_lock); + dctlp->ccr0 = PU + | ((dctlp->cfg.coding == CFG_CHCODE_NRZ) ? 0*SC : 0) + | ((dctlp->cfg.coding == CFG_CHCODE_NRZI) ? 2*SC : 0) + | ((dctlp->cfg.coding == CFG_CHCODE_FM0) ? 4*SC : 0) + | ((dctlp->cfg.coding == CFG_CHCODE_FM1) ? 5*SC : 0) + | ((dctlp->cfg.coding == CFG_CHCODE_MANCH) ? 6*SC : 0) + | VIS + | ((dctlp->cfg.dpll & CFG_DPLL_PS) ? 0 : PSD) + | ((dctlp->cfg.clkout & CFG_TXTXCLK) ? TOE : 0) + | ((dctlp->cfg.clockmode == CFG_CM_G3RUH) ? SSEL : 0) + | ((dctlp->cfg.clockmode == CFG_CM_TCM3105) ? SSEL : 0) + | ((dctlp->cfg.clockmode == CFG_CM_HS) ? HS : 0) + | ((dctlp->cfg.clockmode == CFG_CM_DF9IC) ? 0*CM : 0) /* clockmode 0a */ + | ((dctlp->cfg.clockmode == CFG_CM_G3RUH) ? 0*CM : 0) /* clockmode 0b */ + | ((dctlp->cfg.clockmode == CFG_CM_TCM3105) ? 6*CM : 0) /* clockmode 6b */ + | ((dctlp->cfg.clockmode == CFG_CM_HS) ? 4*CM : 0); /* clockmode 4 */ + writel(dctlp->ccr0, cctlp->io_base+SCCBASE[channel]+CCR0); + dctlp->ccr1 = (dctlp->cfg.datainv ? DIV : 0) + | ((dctlp->cfg.txddrive & CFG_TXDDRIVE_TP) ? ODS : 0) + | (dctlp->cfg.cdinv ? 0 : ICD) + | ((dctlp->cfg.clkout & CFG_TXRTS) ? TCLKO : 0) + | ((dctlp->cfg.txdelmode == CFG_TXDEL_HARD) ? 0 : FCTS) + | MDS1 + | (dctlp->cfg.testloop ? TLP : 0) + | (dctlp->cfg.sharedflg ? SFLAG : 0) + | ((dctlp->cfg.crcmode & CFG_CRCMODE_RESET_0000) ? CRL : 0) + | ((dctlp->cfg.crcmode & CFG_CRCMODE_CRC32) ? C32 : 0); + writel(dctlp->ccr1, cctlp->io_base+SCCBASE[channel]+CCR1); + dctlp->ccr2 = RAC + | ((dctlp->cfg.crcmode & CFG_CRCMODE_RXCD) ? DRCRC : 0) + | ((dctlp->cfg.crcmode & CFG_CRCMODE_RXCRCFWD) ? RCRC : 0) + | ((dctlp->cfg.crcmode & CFG_CRCMODE_TXNOCRC) ? XCRC : 0) + | (3*RFTH) /* 24 dwords rx treshold, fixed */ + | (dctlp->cfg.preamble*PRE) + | (dctlp->cfg.preamb_rpt ? EPT : 0) + | ((dctlp->cfg.preamb_rpt == 2) ? PRE0 : 0) + | ((dctlp->cfg.preamb_rpt == 4) ? PRE1 : 0) + | ((dctlp->cfg.preamb_rpt == 8) ? PRE0|PRE1 : 0) + | ((dctlp->cfg.hdlcext & CFG_HDLCEXT_ONEFILL) ? 0 : ITF) + | ((dctlp->cfg.hdlcext & CFG_HDLCEXT_ONEINS) ? OIN : 0); + writel(dctlp->ccr2, cctlp->io_base+SCCBASE[channel]+CCR2); + writel((dctlp->cfg.brate_m*BRM) | (dctlp->cfg.brate_n*BRN), cctlp->io_base+SCCBASE[channel]+BRR); + writel(RCE | ((dctlp->dev.mtu+AX25_MAX_HEADER_LEN)*RL), cctlp->io_base+SCCBASE[channel]+RLCR); + /* + * all sent | tx device underrun | timer int | tx message repeat | + * tx pool ready | rx device overflow | receive FIFO overflow | carrier detect | + * frame length exceeded => interrupt mask register + */ + writel(~(ALLS | XDU | TIN | XMR | XPR | RDO | RFO | CDSC | FLEX), cctlp->io_base+SCCBASE[channel]+IMR); + spin_unlock_irq(&dctlp->dev_lock); + /* wait until command_executing (CEC) is clear */ + start_time=jiffies; + do { + l=readl(cctlp->io_base+SCCBASE[channel]+STAR); + gcc_optimizer_safe=(jiffies-start_time)<20 && (l & CEC); + } while (gcc_optimizer_safe); + if (l & CEC) { + /* not ready, but we will execute reset anyway */ + printk(KERN_ERR "PCISCC: channel_open(): Timeout waiting for SCC being ready for reset.\n"); + } + /* execute channel's SCC core RX and TX reset */ + writel(RRES | XRES, cctlp->io_base+SCCBASE[channel]+CMDR); + start_time = jiffies; + dctlp->tx_mailbox = 0xffffffff; + do { + gcc_optimizer_safe=(jiffies-start_time)<20 && (dctlp->tx_mailbox==0xffffffff); + } while (gcc_optimizer_safe); /* timeout 20 jiffies */ + switch (dctlp->tx_mailbox & 0x03ffffff) { /* mailbox was written by isr */ + case 0x02001000: /* SCC XPR interrupt received */ +#ifdef PCISCC_DEBUG + printk(KERN_INFO "PCISCC: channel_open(): Success on SCC reset.\n"); +#endif + break; + case 0xffffffff: + printk(KERN_ERR "PCISCC: channel_open(): Timeout on SCC reset. Clocking problem?\n"); + break; + default: + printk(KERN_ERR "PCISCC: channel_open(): Failure on SCC reset: mailbox=0x%0lx.\n", dctlp->tx_mailbox); + break; + } + /* + * Prepare circular RX and TX descriptor queues ("FIFO" rings) + * Attention: + * This beast gets _very_ angry if you try to hand it a + * descriptor with a data length of 0. In fact it crashes + * the system by asserting /SERR or something. + */ + spin_lock_irq(&dctlp->dev_lock); + rdp = last_rdp = NULL; + for (i=0; icfg.rxbufcnt; i++, last_rdp=rdp) { + if (!(rdp=kmalloc(sizeof(struct rx_desc_t), GFP_DMA | GFP_KERNEL))) { + printk(KERN_ERR "PCISCC: channel_open(): Out of memory allocating rx descriptor chain.\n"); + spin_unlock_irq(&dctlp->dev_lock); + pciscc_channel_close(dctlp); + return -ENOMEM; + } + memset(rdp, 0, sizeof(struct rx_desc_t)); + if (i==0) { + dctlp->dq_rx=rdp; /* queue (ring) "head" */ + } else { + rdp->prev=last_rdp; + last_rdp->next=rdp; + last_rdp->nextptr=(void *) virt_to_bus(rdp); + } + if (!(rdp->skb=alloc_skb(dctlp->dev.mtu+AX25_MAX_HEADER_LEN+10+1+SKB_HEADROOM, GFP_DMA | GFP_KERNEL))) { + printk(KERN_ERR "PCISCC: channel_open(): Out of memory allocating socket buffers.\n"); + spin_unlock_irq(&dctlp->dev_lock); + pciscc_channel_close(dctlp); + return -ENOMEM; + } + skb_reserve(rdp->skb, SKB_HEADROOM); + rdp->dataptr=(void *) virt_to_bus(data=skb_put(rdp->skb, dctlp->dev.mtu+AX25_MAX_HEADER_LEN+10)); /* we will skb_trim() it after */ + rdp->flags=((dctlp->dev.mtu+AX25_MAX_HEADER_LEN)*NO); /* reception when we know frame length */ + } + rdp->next=dctlp->dq_rx; /* close ring structure */ + rdp->nextptr=(void *) virt_to_bus(dctlp->dq_rx); + dctlp->dq_rx->prev=rdp; + dctlp->dq_rx_next=dctlp->dq_rx; /* first descriptor to be processed = "first" descriptor in chain */ + /* TX queue */ + tdp = last_tdp = NULL; + for (i=0; icfg.txbufcnt; i++, last_tdp=tdp) { + if (!(tdp=kmalloc(sizeof(struct tx_desc_t), GFP_DMA | GFP_KERNEL))) { + printk(KERN_ERR "PCISCC: channel_open(): Out of memory allocating tx descriptor chain.\n"); + spin_unlock_irq(&dctlp->dev_lock); + pciscc_channel_close(dctlp); + return -ENOMEM; + } + memset(tdp, 0, sizeof(struct tx_desc_t)); + if (i==0) { + dctlp->dq_tx=tdp; + } else { + tdp->prev=last_tdp; + last_tdp->next=tdp; + last_tdp->nextptr=(void *) virt_to_bus(tdp); + } + tdp->skb=NULL; + tdp->dataptr=(void *) virt_to_bus(dummybuf); + tdp->flags=(8*NO) | FE; + } + tdp->next=dctlp->dq_tx; /* close ring structure */ + tdp->nextptr=(void *) virt_to_bus(dctlp->dq_tx); + dctlp->dq_tx->prev=tdp; + dctlp->dq_tx_last=dctlp->dq_tx; /* last descriptor to be transmitted */ + dctlp->dq_tx_cleanup=dctlp->dq_tx; /* first descriptor to be cleaned up after transmission */ + flush_cache_all(); + /* initialize DMAC channel's RX */ + switch (channel) { + case 0: writel(IDR, cctlp->io_base+CH0CFG); + writel(virt_to_bus(dctlp->dq_rx), cctlp->io_base+CH0BRDA); + writel(virt_to_bus(dctlp->dq_rx), cctlp->io_base+CH0FRDA); + writel(virt_to_bus(dctlp->dq_rx_next->prev->prev), cctlp->io_base+CH0LRDA); + break; + case 1: writel(IDR, cctlp->io_base+CH1CFG); + writel(virt_to_bus(dctlp->dq_rx), cctlp->io_base+CH1BRDA); + writel(virt_to_bus(dctlp->dq_rx), cctlp->io_base+CH1FRDA); + writel(virt_to_bus(dctlp->dq_rx_next->prev->prev), cctlp->io_base+CH1LRDA); + break; + case 2: writel(IDR, cctlp->io_base+CH2CFG); + writel(virt_to_bus(dctlp->dq_rx), cctlp->io_base+CH2BRDA); + writel(virt_to_bus(dctlp->dq_rx), cctlp->io_base+CH2FRDA); + writel(virt_to_bus(dctlp->dq_rx_next->prev->prev), cctlp->io_base+CH2LRDA); + break; + case 3: writel(IDR, cctlp->io_base+CH3CFG); + writel(virt_to_bus(dctlp->dq_rx), cctlp->io_base+CH3BRDA); + writel(virt_to_bus(dctlp->dq_rx), cctlp->io_base+CH3FRDA); + writel(virt_to_bus(dctlp->dq_rx_next->prev->prev), cctlp->io_base+CH3LRDA); + break; + } + spin_unlock_irq(&dctlp->dev_lock); + start_time=jiffies; + cctlp->mailbox=MAILBOX_NONE; + writel(AR, cctlp->io_base+GCMDR); + do { + gcc_optimizer_safe=(jiffies-start_time)<20 && !cctlp->mailbox; + } while (gcc_optimizer_safe); + switch (cctlp->mailbox) { /* mailbox was written by isr */ + case MAILBOX_OK: +#ifdef PCISCC_DEBUG + printk(KERN_INFO "PCISCC: channel_open(): Success on DMAC-RX config request.\n"); +#endif + dctlp->dmac_rx=DMAC_RX_INIT; + break; + case MAILBOX_NONE: + printk(KERN_ERR "PCISCC: channel_open(): Timeout on DMAC-RX config request. Sync HDDs and hardware-reset NOW!\n"); + break; + case MAILBOX_FAILURE: + printk(KERN_ERR "PCISCC: channel_open(): Failure on DMAC-RX config request. Sync HDDs and hardware-reset NOW!\n"); + break; + } + /* mask all DMAC interrupts (needed) */ + switch (channel) { + case 0: writel(MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH0CFG); + break; + case 1: writel(MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH1CFG); + break; + case 2: writel(MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH2CFG); + break; + case 3: writel(MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH3CFG); + break; + } + /* SCC core TX reset (again) */ + start_time=jiffies; + do { + l=readl(cctlp->io_base+SCCBASE[channel]+STAR); + gcc_optimizer_safe=(jiffies-start_time)<20 && (l & CEC); + } while (gcc_optimizer_safe); + if (l & CEC) { + /* not ready, but we will execute reset anyway */ + printk(KERN_ERR "PCISCC: channel_open(): Timeout waiting for SCC being ready for TX-reset.\n"); + } + writel(XRES, cctlp->io_base+SCCBASE[channel]+CMDR); + start_time = jiffies; + dctlp->tx_mailbox = 0xffffffff; + do { + gcc_optimizer_safe=(jiffies-start_time)<20 && (dctlp->tx_mailbox==0xffffffff); + } while (gcc_optimizer_safe); /* timeout 20 jiffies */ + switch (dctlp->tx_mailbox & 0x03ffffff) { /* mailbox was written by isr */ + case 0x02001000: /* SCC XPR interrupt received */ +#ifdef PCISCC_DEBUG + printk(KERN_INFO "PCISCC: channel_open(): Success on SCC TX-reset.\n"); +#endif + break; + case 0xffffffff: + printk(KERN_ERR "PCISCC: channel_open(): Timeout on SCC TX-reset. Clocking problem?\n"); + break; + default: + printk(KERN_ERR "PCISCC: channel_open(): Failure on SCC TX-reset: mailbox=0x%0lx.\n", dctlp->tx_mailbox); + break; + } + /* + * initialize DMAC's TX channel, FI must stay masked all the time + * even during operation, see device errata 03/99 + */ + switch (channel) { + case 0: writel(IDT | MTFI, cctlp->io_base+CH0CFG); + writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH0BTDA); + writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH0FTDA); + writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH0LTDA); + break; + case 1: writel(IDT | MTFI, cctlp->io_base+CH1CFG); + writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH1BTDA); + writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH1FTDA); + writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH1LTDA); + break; + case 2: writel(IDT | MTFI, cctlp->io_base+CH2CFG); + writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH2BTDA); + writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH2FTDA); + writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH2LTDA); + break; + case 3: writel(IDT | MTFI, cctlp->io_base+CH3CFG); + writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH3BTDA); + writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH3FTDA); + writel(virt_to_bus(dctlp->dq_tx), cctlp->io_base+CH3LTDA); + break; + } + start_time=jiffies; + cctlp->mailbox=MAILBOX_NONE; + writel(AR, cctlp->io_base+GCMDR); + do { + gcc_optimizer_safe=(jiffies-start_time)<20 && !cctlp->mailbox; + } while (gcc_optimizer_safe); + switch (cctlp->mailbox) { /* mailbox was written by isr */ + case MAILBOX_OK: +#ifdef PCISCC_DEBUG + printk(KERN_INFO "PCISCC: channel_open(): Success on DMAC-TX config request.\n"); +#endif + break; + case MAILBOX_NONE: + printk(KERN_ERR "PCISCC: channel_open(): Timeout on DMAC-TX config request. Sync HDDs and hardware-reset NOW!\n"); + break; + case MAILBOX_FAILURE: + printk(KERN_ERR "PCISCC: channel_open(): Failure on DMAC-TX config request. Sync HDDs and hardware-reset NOW!\n"); + break; + } + pciscc_set_txstate(dctlp, TX_IDLE); + flush_cache_all(); +#ifdef PCISCC_DEBUG + pciscc_dmac_regdump(cctlp); + pciscc_queuedump(dctlp); +#endif + dctlp->chip->usecnt++; + dctlp->start = 1; + dctlp->tbusy = 0; + /* clear statistics */ + mdelay(10); + memset(&dctlp->stats, 0, sizeof(struct net_device_stats)); + /* some housekeeping */ + return 0; +} + +/* --------------------------------------------------------------------------------------------- */ + +/* close one channel - don't mess with it either */ +static void pciscc_channel_close(struct devctl_t *dctlp) +{ + struct chipctl_t *cctlp = dctlp->chip; + int channel = dctlp->channel; + struct rx_desc_t *rdp, *last_rdp; + struct tx_desc_t *tdp, *last_tdp; + unsigned long l; + unsigned long start_time; + volatile unsigned long gcc_optimizer_safe; + +#ifdef PCISCC_DEBUG + pciscc_dmac_regdump(cctlp); + pciscc_queuedump(dctlp); +#endif + /* at first stop timer */ + writel(0, cctlp->io_base+SCCBASE[channel]+TIMR); + /* wait until command_executing (CEC) is clear */ + start_time=jiffies; + do { + l=readl(cctlp->io_base+SCCBASE[channel]+STAR); + gcc_optimizer_safe=(jiffies-start_time)<20 && (l & CEC); + } while (gcc_optimizer_safe); + if (l & CEC) { + /* not ready, but we will execute reset anyway */ + printk(KERN_ERR "PCISCC: channel_close(): Timeout waiting for SCC being ready for reset.\n"); + } + /* RX and TX SCC reset */ + writel(RRES | XRES, cctlp->io_base+SCCBASE[channel]+CMDR); + start_time = jiffies; + dctlp->tx_mailbox = 0xffffffff; + do { + gcc_optimizer_safe=(jiffies-start_time)<20 && (dctlp->tx_mailbox==0xffffffff); + } while (gcc_optimizer_safe); /* timeout 20 jiffies */ + switch (dctlp->tx_mailbox & 0x03ffffff) { /* mailbox was written by isr */ + case 0x02001000: /* SCC XPR interrupt received */ +#ifdef PCISCC_DEBUG + printk(KERN_INFO "PCISCC: channel_close(): Success on SCC reset.\n"); +#endif + break; + case 0xffffffff: + printk(KERN_ERR "PCISCC: channel_close(): Timeout on SCC reset.\n"); + break; + default: + printk(KERN_ERR "PCISCC: channel_close(): Failure on SCC reset: mailbox=0x%0lx.\n", dctlp->tx_mailbox); + break; + } + /* stop SCC core */ + writel(0, cctlp->io_base+SCCBASE[channel]+CCR1); + writel(0, cctlp->io_base+SCCBASE[channel]+CCR2); + writel(0, cctlp->io_base+SCCBASE[channel]+CCR0); + dctlp->ccr0 = dctlp->ccr1 = dctlp->ccr2 = 0; + /* + * Give the isr some time to "refill" the rx-dq + * we _MUST_ guarantee that the DMAC-RX is _NOT_ in + * hold state when issuing the RESET command, otherwise the DMAC + * will crash. (DSCC-4 Rev. <= 2.1) + * In addition to that we may only issue a RESET if channel is + * currently really initialized, otherwise something horrible will + * result. + */ + start_time=jiffies; + do { + gcc_optimizer_safe=(jiffies-start_time)<5; + } while (gcc_optimizer_safe); + /* OK, now we should be ready to put the DMAC into reset state */ + switch (channel) { + case 0: writel(RDR | RDT | MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH0CFG); + break; + case 1: writel(RDR | RDT | MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH1CFG); + break; + case 2: writel(RDR | RDT | MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH2CFG); + break; + case 3: writel(RDR | RDT | MRFI | MTFI | MRERR | MTERR, cctlp->io_base+CH3CFG); + break; + } + start_time = jiffies; + cctlp->mailbox = MAILBOX_NONE; + writel(AR, cctlp->io_base+GCMDR); + do { + gcc_optimizer_safe=(jiffies-start_time)<20 && !cctlp->mailbox; + } while (gcc_optimizer_safe); /* timeout 20 jiffies */ + switch (cctlp->mailbox) { /* mailbox was written by isr */ + case MAILBOX_OK: +#ifdef PCISCC_DEBUG + printk(KERN_INFO "PCISCC: channel_close(): Success on DMAC reset channel %u.\n", channel); +#endif + dctlp->dmac_rx=DMAC_RX_RESET; + pciscc_set_txstate(dctlp, TX_RESET); + break; + case MAILBOX_NONE: + printk(KERN_ERR "PCISCC: channel_close(): Timeout on DMAC reset channel %u. Sync HDDs and hardware-reset NOW!\n", channel); + break; + case MAILBOX_FAILURE: + printk(KERN_ERR "PCISCC: channel_close(): Failure on DMAC reset channel %u. Sync HDDs and hardware-reset NOW!\n", channel); + break; + } + /* clear IQs */ + l = readl(cctlp->io_base+IQLENR1); + switch (channel) { + case 0: l &= 0x0fff0fff; + writel(l, cctlp->io_base+IQLENR1); + writel(virt_to_bus(dummybuf), cctlp->io_base+IQSCC0RXBAR); + writel(virt_to_bus(dummybuf), cctlp->io_base+IQSCC0TXBAR); + break; + case 1: l &= 0xf0fff0ff; + writel(l, cctlp->io_base+IQLENR1); + writel(virt_to_bus(dummybuf), cctlp->io_base+IQSCC1RXBAR); + writel(virt_to_bus(dummybuf), cctlp->io_base+IQSCC1TXBAR); + break; + case 2: l &= 0xff0fff0f; + writel(l, cctlp->io_base+IQLENR1); + writel(virt_to_bus(dummybuf), cctlp->io_base+IQSCC2RXBAR); + writel(virt_to_bus(dummybuf), cctlp->io_base+IQSCC2TXBAR); + break; + case 3: l &= 0xfff0fff0; + writel(l, cctlp->io_base+IQLENR1); + writel(virt_to_bus(dummybuf), cctlp->io_base+IQSCC3RXBAR); + writel(virt_to_bus(dummybuf), cctlp->io_base+IQSCC3TXBAR); + break; + } + start_time = jiffies; + cctlp->mailbox = MAILBOX_NONE; + writel(AR + | (channel == 0 ? (CFGIQSCC0RX | CFGIQSCC0TX) : 0) + | (channel == 1 ? (CFGIQSCC1RX | CFGIQSCC1TX) : 0) + | (channel == 2 ? (CFGIQSCC2RX | CFGIQSCC2TX) : 0) + | (channel == 3 ? (CFGIQSCC3RX | CFGIQSCC3TX) : 0), cctlp->io_base+GCMDR); + do { + gcc_optimizer_safe=(jiffies-start_time)<20 && !cctlp->mailbox; + } while (gcc_optimizer_safe); /* timeout 20 jiffies */ + switch (cctlp->mailbox) { /* mailbox was written by isr */ + case MAILBOX_OK: +#ifdef PCISCC_DEBUG + printk(KERN_INFO "PCISCC: channel_close(): Success on IQSCC config request.\n"); +#endif + break; + case MAILBOX_NONE: + printk(KERN_ERR "PCISCC: channel_close(): Timeout on IQSCC config request. Sync HDDs and hardware-reset NOW!\n"); + break; + case MAILBOX_FAILURE: + printk(KERN_ERR "PCISCC: channel_close(): Failure on IQSCC config request. Sync HDDs and hardware-reset NOW!\n"); + break; + } + if (dctlp->dq_rx) { + rdp=dctlp->dq_rx; /* free descriptor chains and buffers */ + do { + if (rdp->skb) { + kfree_skb(rdp->skb); + rdp->skb=NULL; + } + last_rdp=rdp; + rdp=rdp->next; + kfree(last_rdp); + } while (rdp!=dctlp->dq_rx); + dctlp->dq_rx=NULL; + } + dctlp->dq_rx_next=NULL; + if (dctlp->dq_tx) { + tdp=dctlp->dq_tx; + do { + if (tdp->skb) { + kfree_skb(tdp->skb); + tdp->skb=NULL; + } + last_tdp=tdp; + tdp=tdp->next; + kfree(last_tdp); + } while (tdp!=dctlp->dq_tx); + dctlp->dq_tx=NULL; + } + dctlp->dq_tx_cleanup=NULL; + dctlp->dq_tx_last=NULL; + if (dctlp->iq_rx) { /* free IQs */ + kfree(dctlp->iq_rx); + dctlp->iq_rx=NULL; + } + if (dctlp->iq_tx) { + kfree(dctlp->iq_tx); + dctlp->iq_tx=NULL; + } + dctlp->start=0; + dctlp->chip->usecnt--; + return; +} + +/* Returns a random number between 0 and 255 */ +static unsigned short random_seed; +static inline unsigned short random_num(void) +{ + random_seed = 28629 * random_seed + 157; + return random_seed % 256; +} + +/* --------------------------------------------------------------------------------------------- */ + +/* interrupt handler root */ +static void pciscc_isr(int irq, void *dev_id, struct pt_regs *regs) +{ + struct chipctl_t *cctlp = (struct chipctl_t *) dev_id; + struct devctl_t *dctlp; + unsigned long status; + unsigned long iv; + int channel; + unsigned long * volatile iqp; + int processed; + int i; + + status = readl(cctlp->io_base+GSTAR); + writel(status, cctlp->io_base+GSTAR); /* ack' irq */ + rmb(); + wmb(); + if (!status) return; + /* do not disturb... */ + spin_lock(&cctlp->chip_lock); + ATOMICY_CHECK; + if (status & (IIPGPP | IIPLBI | IIPSSC)) { + /* process peripheral queue */ + processed = 0; + iqp = cctlp->iq_per_next; + while ((iv = *iqp) != 0) { + *iqp = 0; + flush_cache_all(); + rmb(); + wmb(); + printk(KERN_INFO "PCISCC: isr: IQPER vector: 0x%0lx.\n", iv); + iqp = ((iqp==(cctlp->iq_per+cctlp->cfg.iqlen-1)) ? cctlp->iq_per : iqp+1); /* wrap-arround */ + processed++; + } + cctlp->iq_per_next = iqp; + } + if (status & IICFG) { + /* process configuration queue */ + cctlp->mailbox = MAILBOX_NONE; + processed = 0; + iqp = cctlp->iq_cfg_next; + while ((iv = *iqp) != 0) { + *iqp = 0; + flush_cache_all(); + rmb(); + wmb(); +#ifdef PCISCC_DEBUG + printk(KERN_INFO "PCISCC: isr: IQCFG vector: 0x%0lx.\n", iv); +#endif + if ((iv) & ARACK) { + cctlp->mailbox = MAILBOX_OK; + processed++; + } + if ((iv) & ARF) { + cctlp->mailbox = MAILBOX_FAILURE; + processed++; + } + iqp = ((iqp==(cctlp->iq_cfg+cctlp->cfg.iqlen-1)) ? cctlp->iq_cfg : iqp+1); /* wrap-around */ + } + cctlp->iq_cfg_next = iqp; + if (processed != 1) { + printk(KERN_ERR "PCISCC: isr: Something weird going on... IICFG:processed=%u.\n", processed); + } + } + for (channel = 0 ; channel < dev_per_card ; channel++) if (status & (1<<(24+channel))) { + /* process TX queue */ + dctlp=cctlp->device[channel]; + if (!dctlp->iq_tx || !dctlp->iq_tx_next) continue; + processed = 0; + iqp = dctlp->iq_tx_next; + while ((iv = *iqp) != 0) { + *iqp = 0; + flush_cache_all(); + rmb(); + wmb(); + if (iv & TIN) { + /* timer interrupt */ + writel(0, cctlp->io_base+SCCBASE[channel]+TIMR); + /* now, which state are we in? */ + switch (dctlp->txstate) { + case TX_WAIT: + /* End of the slottime. Do we key ? */ + if (dctlp->dcd || random_num() > dctlp->cfg.persist) { + /* No ... One more slottime */ + writel(dctlp->cfg.slottime*TVALUE, cctlp->io_base+SCCBASE[channel]+TIMR); + writel(STI, cctlp->io_base+SCCBASE[channel]+CMDR); + } + else { + /* OK ... PTT on - > txdelay */ + pciscc_set_txstate(dctlp, TX_DELAY); + writel(dctlp->cfg.txdelval*TVALUE, cctlp->io_base+SCCBASE[channel]+TIMR); + writel(STI, cctlp->io_base+SCCBASE[channel]+CMDR); + } + break; + case TX_DELAY: + /* data transmit */ + pciscc_set_txstate(dctlp, TX_XMIT); + switch (channel) { + case 0: writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH0LTDA); + break; + case 1: writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH1LTDA); + break; + case 2: writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH2LTDA); + break; + case 3: writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH3LTDA); + break; + } + writel(txtimeout*dctlp->tx_bitrate*TVALUE, cctlp->io_base+SCCBASE[channel]+TIMR); + writel(STI, cctlp->io_base+SCCBASE[channel]+CMDR); + break; + case TX_TAIL: + /* transmitting tail */ + pciscc_set_txstate(dctlp, TX_IDLE); + break; + case TX_PROBE: + /* tx bitrate test in execution */ + do_gettimeofday(&dctlp->tv); + pciscc_set_txstate(dctlp, TX_RESET); + dctlp->probe_mailbox=1; + break; + case TX_CAL: + /* we are (i.e. were) calibrating */ + if (dctlp->dq_tx_last != dctlp->dq_tx_cleanup) { + /* we have something in the tx queue */ + pciscc_set_txstate(dctlp, TX_XMIT); + switch (channel) { + case 0: writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH0LTDA); + break; + case 1: writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH1LTDA); + break; + case 2: writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH2LTDA); + break; + case 3: writel(virt_to_bus(dctlp->dq_tx_last), cctlp->io_base+CH3LTDA); + break; + } + writel(txtimeout*dctlp->tx_bitrate*TVALUE, cctlp->io_base+SCCBASE[channel]+TIMR); + writel(STI, cctlp->io_base+SCCBASE[channel]+CMDR); + } else { + pciscc_set_txstate(dctlp, TX_IDLE); + } + break; + case TX_XMIT: + /* watchdog just ran out */ + pciscc_set_txstate(dctlp, TX_IDLE); + txto_task.routine=pciscc_bh_txto; + txto_task.data=(void *) dctlp; + /* queue_task(&txto_task, &tq_scheduler); Is it needed ? */ + break; + default: +#ifdef PCISCC_DEBUG + printk(KERN_ERR "PCISCC: isr: Timer interrupt while txstate=%u.\n", dctlp->txstate); +#endif + pciscc_set_txstate(dctlp, TX_IDLE); + } + } + if (iv & ALLS) { + /* a TX frame was just completed */ + pciscc_isr_txcleanup(dctlp); + if ((dctlp->dq_tx_cleanup == dctlp->dq_tx_last) && (dctlp->txstate != TX_PROBE)) { + /* complete TX-queue sent out */ + if (dctlp->cfg.duplex == CFG_DUPLEX_FULLPTT) { + /* just update txstate */ + pciscc_set_txstate(dctlp, TX_IDLE); + } else if (dctlp->cfg.txdelmode == CFG_TXDEL_SOFT) { + /* "normal" full duplex mode or half duplex and soft TXDELAY: start txtail */ + pciscc_set_txstate(dctlp, TX_TAIL); + writel(dctlp->cfg.txtailval*TVALUE, cctlp->io_base+SCCBASE[channel]+TIMR); + writel(STI, cctlp->io_base+SCCBASE[channel]+CMDR); + } else if (dctlp->cfg.txdelmode == CFG_TXDEL_HARD) { + /* will deassert RTS immediately */ + pciscc_set_txstate(dctlp, TX_IDLE); + } + } + } + if (iv & XDU) { + /* + * TX stall - now we are _really_ in trouble. + * We must reset the SCC core and re-init DMAC-TX. + * This includes delay loops and we are in interrupt + * context, with all interrupts disabled... So we need + * to schedule a bottom half task for this. + */ +#ifdef PCISCC_DEBUG + printk(KERN_ERR "PCISCC: isr: TX data underrun occured iface=%s.\n", dctlp->name); +#endif + dctlp->stats.tx_fifo_errors++; + txreset_task.routine=pciscc_bh_txreset; + txto_task.data=(void *) dctlp; + /* queue_task(&txreset_task, &tq_scheduler); */ + } + if (iv & XMR) { + /* + * TX message repeat - not critical, since + * resolved automagically by abort sequence + * and retransmit. + */ +#ifdef PCISCC_DEBUG + printk(KERN_ERR "PCISCC: isr: TX message repeat interrupt iface=%s.\n", dctlp->name); +#endif + } + dctlp->tx_mailbox = iv; + iqp = ((iqp==(dctlp->iq_tx+cctlp->cfg.iqlen-1)) ? dctlp->iq_tx : iqp+1); /* wrap-arround */ + processed++; + } + dctlp->iq_tx_next = iqp; +#ifdef PCISCC_VDEBUG + if (processed != 1) printk(KERN_INFO "PCISCC: isr: TX: iface=%s processed=%u\n", dctlp->name, processed); +#endif + if (processed == 0) { + /* + * If this happens we either already processed the IV belonging to this + * IRQ last time, or our honored DMAC messed up again and managed + * to "loose" this IV and advanced to the next position in the + * queue. We check for the latter case. If we didnt do that, TX would "hang". + */ + for (i=1; icfg.iqlen; i++) { + iqp = ((iqp==(dctlp->iq_tx+cctlp->cfg.iqlen-1)) ? dctlp->iq_tx : iqp+1); /* wrap-arround */ + rmb(); + wmb(); + if ((*iqp) != 0 && (*(dctlp->iq_tx_next)) == 0) { /* note order is important */ + dctlp->iq_tx_next = iqp; +#ifdef PCISCC_DEBUG + printk(KERN_INFO "PCISCC: isr: TX skipped %u IVs iface=%s.\n", i, dctlp->name); +#endif + break; + /* next IRQ will clean it up now */ + } + } + } + } + for (channel = 0 ; channel < dev_per_card ; channel++) if (status & (1<<(28+channel))) { + dctlp=cctlp->device[channel]; + /* process RX queue */ + if (!dctlp->iq_rx || !dctlp->iq_rx_next) { + printk(KERN_ERR "PCISCC: isr: IQCHAN%uRX interrupt for non-initialized queue!\n", channel); + continue; + } + processed = 0; + iqp = dctlp->iq_rx_next; + while ((iv = *iqp) != 0) { + *iqp = 0; + flush_cache_all(); + rmb(); + wmb(); + /* statistics only */ + if ((iv & SCCIV_SCC) && (iv & SCCIV_CDSC)) { + unsigned long l; + l = readl(cctlp->io_base+SCCBASE[channel]+STAR); + l = readl(cctlp->io_base+SCCBASE[channel]+STAR); + dctlp->dcd = dctlp->cfg.cdinv ? !!(l & CD) : !(l & CD); + } + if ((iv & SCCIV_SCC) && (iv & SCCIV_RDO)) { + dctlp->stats.rx_fifo_errors++; + } + if ((iv & SCCIV_SCC) && (iv & SCCIV_RFO)) { + dctlp->stats.rx_over_errors++; +#ifdef PCISCC_DEBUG + if (dctlp->stats.rx_over_errors == 1) { + printk(KERN_ERR "PCISCC: First RFO IV occurred. iface=%s.\n", dctlp->name); + pciscc_dmac_regdump(cctlp); + for (i = 0; i < dev_per_card ; i++) { + /* dump all interfaces since anyone could be responsible */ + if (cctlp->device[i]->start) pciscc_queuedump(cctlp->device[i]); + } + } +#endif + } + if ((iv & SCCIV_SCC) && (iv & SCCIV_FLEX)) { + dctlp->stats.rx_length_errors++; + } + if (!(iv & SCCIV_SCC) && (iv & SCCIV_ERR)) { + dctlp->stats.rx_errors++; + } + if (!(iv & SCCIV_SCC) && (iv & SCCIV_HI)) { + printk(KERN_ERR "PCISCC: isr: Weird... received HI interrupt.\n"); + } + if (!(iv & SCCIV_SCC) && (iv & SCCIV_FI)) { + } + dctlp->rx_mailbox = iv; + iqp = ((iqp==(dctlp->iq_rx+cctlp->cfg.iqlen-1)) ? dctlp->iq_rx : iqp+1); /* wrap-around */ + processed++; + } + /* in any case check RX descriptor queue for received frames */ + if (dctlp->start) pciscc_isr_receiver(dctlp); + dctlp->iq_rx_next=iqp; + } + ATOMICY_CHECK_END; + spin_unlock(&cctlp->chip_lock); + return; +} + +/* --------------------------------------------------------------------------------------------- */ + +/* called by interrupt handler root when RX interrupt occurred */ +static __inline__ void pciscc_isr_receiver(struct devctl_t *dctlp) +{ + struct chipctl_t *cctlp = dctlp->chip; + int channel = dctlp->channel; + struct rx_desc_t * volatile rdp; + long status; + volatile unsigned char rdsb; /* receive data status byte, generated by DMAC at buffer end */ + volatile int bno; + volatile int valid; + struct sk_buff *new_skb; + int processed; + +#ifdef PCISCC_DEBUG + if (!dctlp->start) { + printk(KERN_INFO "PCISCC: isr_receiver: frame received while !start.\n"); + } +#endif + for (rdp=dctlp->dq_rx_next, processed=0; (rdp->result & C); rdp=rdp->next, processed++) { +#ifdef PCISCC_DEBUG + if ((rdp->nextptr != (void *) virt_to_bus(rdp->next)) || (rdp->dataptr != (void *) virt_to_bus(rdp->skb->data))) { + panic("PCISCC: isr_receiver(): mm fucked with our buffers"); + } +#endif + status = rdp->result; + bno = (status >> 16) & 0x1fff; + status &= 0xe000ffff; + valid = 1; /* we assume frame valid */ + if ((status & RA) || (bno <= 0) || (bno > (dctlp->dev.mtu+AX25_MAX_HEADER_LEN)) || !(status & FE) || (rdp->feptr != (void *) virt_to_bus(rdp))) { + /* aborted or invalid length */ + valid = 0; + } else { + rdsb = rdp->skb->data[bno-1]; + if (!(rdsb & SB_VFR)) { /* incorrect bit length */ + valid = 0; + dctlp->stats.rx_frame_errors++; + } + if (rdsb & SB_RDO) { /* data overflow */ + valid = 0; /* alreadly counted */ + } + if (!(rdsb & SB_CRC) && !(dctlp->cfg.crcmode & CFG_CRCMODE_RXCD)) { + valid = 0; /* CRC error */ + dctlp->stats.rx_crc_errors++; + } + if (rdsb & SB_RAB) { /* receive message aborted */ + valid = 0; + } + } +#ifdef PCISCC_VDEBUG + printk(KERN_INFO "PCISCC: isr_receiver: status=%lx valid=%d rdsb=%x bno=%d.\n", status, valid, rdsb, bno); +#endif + /* OK, this is a little bit tricky. We have to make sure + * that every descriptor has a buffer assigned. Thus we + * can only release a buffer to the link layer if we get + * a new one in turn from mm before. */ + if (valid) { + if ((new_skb = alloc_skb(dctlp->dev.mtu+AX25_MAX_HEADER_LEN+10+1+SKB_HEADROOM, GFP_DMA | GFP_ATOMIC))) { /* 1 more byte for KISS */ + unsigned char *ptr; + +#ifdef PCISCC_VDEBUG + printk(KERN_INFO "PCISCC: isr_receiver: Processing frame len=%d bno=%d.\n", rdp->skb->len, bno); +#endif + skb_reserve(new_skb, SKB_HEADROOM); + skb_trim(rdp->skb, bno-1); +#ifdef PCISCC_VDEBUG + printk(KERN_INFO "PCISCC: isr_receiver: Processed frame len=%d.\n", rdp->skb->len); +#endif + ptr = skb_push(rdp->skb, 1); /* Prefix the data with the KISS byte */ + *ptr = 0; + pciscc_rx_skb(rdp->skb, dctlp); + rdp->skb = new_skb; + rdp->dataptr=(void *) virt_to_bus(skb_put(rdp->skb, dctlp->dev.mtu+AX25_MAX_HEADER_LEN+10)); + } else { +#ifdef PCISCC_DEBUG + printk(KERN_INFO "PCISCC: isr_receiver: Out of memory allocating new skb!\n"); +#endif + } + } + rdp->flags=(dctlp->dev.mtu+AX25_MAX_HEADER_LEN)*NO; /* prepare descriptor for next time */ + rdp->result=0; + rdp->feptr=0; + flush_cache_all(); + } +#ifdef PCISCC_VDEBUG + printk(KERN_INFO "PCISCC: isr_receiver: Processed %u frames at once.\n", processed); +#endif + dctlp->dq_rx_next = rdp; + wmb(); /* no instruction reordering beyond this point */ + /* + * tell DMAC last available descriptor - keep up one + * descriptor space for security (paranoia) (...->prev->prev) + */ + switch (channel) { + case 0: writel(virt_to_bus(rdp->prev->prev), cctlp->io_base+CH0LRDA); + break; + case 1: writel(virt_to_bus(rdp->prev->prev), cctlp->io_base+CH1LRDA); + break; + case 2: writel(virt_to_bus(rdp->prev->prev), cctlp->io_base+CH2LRDA); + break; + case 3: writel(virt_to_bus(rdp->prev->prev), cctlp->io_base+CH3LRDA); + break; + } + return; +} + +/* --------------------------------------------------------------------------------------------- */ + +/* called by IRQ handler root when a TX descriptor was completed */ +static int pciscc_isr_txcleanup(struct devctl_t *dctlp) +{ + struct tx_desc_t * volatile tdp; + int processed; + + processed=0; + tdp=dctlp->dq_tx_cleanup; + while ((tdp->result & C) && (tdp != dctlp->dq_tx_last)) { + /* clean up all (C)omplete descriptors */ + if (tdp->skb) { +#ifdef PCISCC_DEBUG + if ((tdp->nextptr != (void *) virt_to_bus(tdp->next)) || (tdp->dataptr != (void *) virt_to_bus(tdp->skb->data))) { + /* + * paranoia check - + * this should _never_ever_occur_ . + * if it does, the memory subsystem moved around + * our buffers in address space, and it's the + * last you will see. + */ + printk(KERN_ERR "PCISCC: isr_txcleanup(): mm fucked with our buffers.\n"); + } +#endif + dctlp->stats.tx_packets++; + dctlp->stats.tx_bytes += tdp->skb->len; + kfree_skb(tdp->skb); + tdp->skb = NULL; + } + tdp->flags = (FE | (NO*8)); /* dummy */ + tdp->dataptr = (void *) virt_to_bus(dummybuf); /* paranoia */ + tdp->result = 0; + tdp = tdp->next; + processed++; + if (processed>100) { +#ifdef PCISCC_DEBUG + printk(KERN_ERR "PCISCC: trouble in isr_txcleanup or please reduce bit rate by 20 dB.\n"); + dctlp->start=0; +#endif + break; + } + } + dctlp->dq_tx_cleanup = tdp; + wmb(); + flush_cache_all(); +#ifdef PCISCC_VDEBUG + printk(KERN_INFO "PCISCC: isr_txcleanup: Processed %u frames.\n", processed); +#endif + if (processed > 0) { + dctlp->tbusy=0; + /* mark_bh(NET_BH); */ + } + return processed; +} + +/* --------------------------------------------------------------------------------------------- */ + +/* + * BOTTOM HALF + * Called by TIN ISR when TX timeout has occured (watchdog) + */ +static void pciscc_bh_txto(void *arg) +{ + struct devctl_t *dctlp = (struct devctl_t *) arg; + + printk(KERN_ERR "PCISCC: Taking interface %s down due to TX hang. Clocking problem?\n", dctlp->name); + dev_close(&dctlp->dev); + return; +} + +/* --------------------------------------------------------------------------------------------- */ + +/* + * BOTTOM HALF + * Called by ISR root when TX underrun forces us to reset + * a given channel's transmitter. + */ +static void pciscc_bh_txreset(void *arg) +{ + struct devctl_t *dctlp = (struct devctl_t *) arg; + int channel = dctlp->channel; + struct chipctl_t *cctlp = dctlp->chip; + volatile unsigned long gcc_optimizer_safe; + unsigned long start_time; + + /* reset SCC core TX */ + writel(XRES, cctlp->io_base+SCCBASE[channel]+CMDR); + start_time = jiffies; + do { + gcc_optimizer_safe=((jiffies-start_time) < 20); + } while (gcc_optimizer_safe); /* 20 jiffies delay */ + return; +} + +/* --------------------------------------------------------------------------------------------- */ + +/* *REALLY* ugly work-around for timer race */ +static __inline__ void pciscc_clear_timer(struct devctl_t *dctlp) +{ + struct chipctl_t *cctlp = dctlp->chip; + unsigned long * volatile iqp; + + /* walk through TX queue eliminating TINs FIXME */ + if (!dctlp->iq_tx || !dctlp->iq_tx_next) return; + for (iqp=dctlp->iq_tx_next; *iqp!=0; iqp=((iqp==(dctlp->iq_tx+cctlp->cfg.iqlen-1)) ? dctlp->iq_tx : iqp+1)) { /* note wrap-arround */ + if (*iqp & TIN) *iqp = SCCIV_IGNORE; + } + return; +} + +/* --------------------------------------------------------------------------------------------- */ + +/* + * probe TX bitrate of a channel, called from device_open() + * idea behind it: + * load the channels timer with a known value and measure how long it + * takes to reach zero, using the system timer + */ +/* static long pciscc_probe_txrate(struct devctl_t *dctlp) +{ + struct chipctl_t *cctlp = dctlp->chip; + struct timeval tv_start; + volatile unsigned long gcc_optimizer_safe; + unsigned long start_time; + long delta_us; + unsigned long long tx_bitrate; + + pciscc_set_txstate(dctlp, TX_PROBE); + dctlp->probe_mailbox = 0; + start_time = jiffies; + writel((probebit*TVALUE), cctlp->io_base+SCCBASE[dctlp->channel]+TIMR); + do_gettimeofday(&tv_start); + writel(STI, cctlp->io_base+SCCBASE[dctlp->channel]+CMDR); + do { + gcc_optimizer_safe = (dctlp->probe_mailbox != 1) && ((jiffies-start_time)<1000); + } while (gcc_optimizer_safe); + pciscc_set_txstate(dctlp, TX_IDLE); + if (dctlp->probe_mailbox != 1) { + printk(KERN_ERR "PCISCC: probe_txrate(): Timeout probing %s-TxClk. Clocking problem?\n", dctlp->dev.name); + return 9600; / default / + } else { + delta_us = (dctlp->tv.tv_sec - tv_start.tv_sec)*1000000+(dctlp->tv.tv_usec - tv_start.tv_usec); + } + tx_bitrate = 10000*probebit/(delta_us/100); +#ifdef PCISCC_DEBUG + printk(KERN_INFO "PCISCC: probe_txrate(): tx_bitrate=%ld.\n", (long) tx_bitrate); +#endif + return tx_bitrate; +} +*/ + +/* --------------------------------------------------------------------------------------------- */ + +/* + * Set new transmitter state. + * This function will be called from various places and is responsible for + * keeping the dctlp->txstate variable and RTS line up to date. + * Call only when holding big device lock and with IRQs off to avoid race conditions. + * Note we may not read back SCC core register contents because of a chip bug. + */ +static __inline__ void pciscc_set_txstate(struct devctl_t *dctlp, int state) +{ + struct chipctl_t *cctlp = dctlp->chip; + int channel = dctlp->channel; + int ptt; + +#ifdef PCISCC_DEBUG + /* printk(KERN_INFO "PCISCC: pciscc_set_txstate(%s, %d).\n", dctlp->name, state); */ + /* paranoia */ + if (state < TX_MIN || state > TX_MAX) { + printk(KERN_ERR "PCISCC: pciscc_set_txstate(%s, %d) illegal state.\n", dctlp->name, state); + state = TX_RESET; + } +#endif + switch (state) { + case TX_RESET: + case TX_WAIT: + ptt = 0; + case TX_IDLE: + if (dctlp->cfg.duplex == CFG_DUPLEX_FULLPTT) { + ptt = 1; + } else { + ptt = 0; + } + break; + case TX_DELAY: + case TX_XMIT: + case TX_TAIL: + case TX_PROBE: + case TX_CAL: + default: + ptt = 1; + } + writel(dctlp->ccr1 | (ptt ? RTS : 0), cctlp->io_base+SCCBASE[channel]+CCR1); + dctlp->txstate = state; + return; +} + +/* --------------------------------------------------------------------------------------------- */ + +/* netdevice UP -> DOWN routine */ +static int pciscc_dev_close(struct net_device *dev) +{ + struct devctl_t *dctlp = (struct devctl_t *) dev->priv; + + if (dctlp->start) { + pciscc_channel_close(dctlp); + } + if (dctlp->chip->initialized && !dctlp->chip->usecnt) { + pciscc_chip_close(dctlp->chip); + } + MOD_DEC_USE_COUNT; + return 0; +} + +/* --------------------------------------------------------------------------------------------- */ + +/* netdevice DOWN -> UP routine */ +static int pciscc_dev_open(struct net_device *dev) +{ + struct devctl_t *dctlp = (struct devctl_t *) dev->priv; + int res; + + if (!dctlp->chip->initialized) { + if ((res=pciscc_chip_open(dctlp->chip))) return res; + } + if (!dctlp->start) { + if ((res=pciscc_channel_open(dctlp))) return res; + dctlp->tx_bitrate = (xtal >> 4) / ((dctlp->cfg.brate_n+1)*(1 << dctlp->cfg.brate_m)); +/* dctlp->tx_bitrate = pciscc_probe_txrate(dctlp); */ + printk(KERN_INFO "PCISCC: tx_bitrate=%ld (%d %d).\n", dctlp->tx_bitrate, dctlp->cfg.brate_m, dctlp->cfg.brate_n); + } + MOD_INC_USE_COUNT; + return 0; +} + +/* --------------------------------------------------------------------------------------------- */ + +/* netdevice change MTU request */ +static int pciscc_change_mtu(struct net_device *dev, int new_mtu) +{ + dev->mtu=new_mtu; + return 0; +} + +/* --------------------------------------------------------------------------------------------- */ + +/* netdevice get statistics request */ +static struct net_device_stats *pciscc_get_stats(struct net_device *dev) +{ + struct devctl_t *dctlp; + + if (!dev || !dev->priv) return NULL; /* paranoia */ + dctlp = (struct devctl_t *) dev->priv; + return &dctlp->stats; +} + +/* --------------------------------------------------------------------------------------------- */ + +static unsigned char ax25_bcast[AX25_ADDR_LEN] = +{'Q' << 1, 'S' << 1, 'T' << 1, ' ' << 1, ' ' << 1, ' ' << 1, '0' << 1}; +static unsigned char ax25_nocall[AX25_ADDR_LEN] = +{'L' << 1, 'I' << 1, 'N' << 1, 'U' << 1, 'X' << 1, ' ' << 1, '1' << 1}; + +/* netdevice register - finish painting netdev structure */ +static int pciscc_dev_init(struct net_device *dev) +{ + struct devctl_t *dctlp = (struct devctl_t *) dev->priv; + + dev->hard_start_xmit = pciscc_xmit; + dev->open = pciscc_dev_open; + dev->stop = pciscc_dev_close; + dev->get_stats = pciscc_get_stats; + dev->change_mtu = pciscc_change_mtu; + dev->do_ioctl = pciscc_dev_ioctl; + dev->set_mac_address = pciscc_dev_set_mac_address; + dev->hard_header = ax25_encapsulate; + dev->rebuild_header = ax25_rebuild_header; + dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN; + dev->addr_len = AX25_ADDR_LEN; + dev->type = ARPHRD_AX25; + dev->mtu = 1500; + dev->tx_queue_len = 10; + dev->tx_timeout = NULL; + dev->flags = 0; + /* dev->flags = (IFF_BROADCAST | IFF_MULTICAST); */ + + memcpy(dev->broadcast, ax25_bcast, AX25_ADDR_LEN); + memcpy(dev->dev_addr, ax25_nocall, AX25_ADDR_LEN); + + dev_init_buffers(dev); + memcpy(&dctlp->cfg, &devcfg_default, sizeof(struct devcfg_t)); + + return 0; +} + +/* --------------------------------------------------------------------------------------------- */ + +/* set device's L2 address */ +static int pciscc_dev_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *sa = addr; + + memcpy(dev->dev_addr, sa->sa_data, AX25_ADDR_LEN); + return 0; +} + +/* --------------------------------------------------------------------------------------------- */ +/* + * IOCTLs: + * + * SIOCPCISCCGCCFG PCISCC Get Chip ConFiG + * SIOCPCISCCSCCFG PCISCC Set Chip ConFiG + * SIOCPCISCCGDCFG PCISCC Get Device ConFiG + * SIOCPCISCCSDCFG PCISCC Set Device ConFiG + * SIOCPCISCCSLED PCISCC Set LEDs + * SIOCPCISCCGDSTAT PCISCC Get Device STATus + * SIOCPCISCCDCAL PCISCC Device CALibrate + * SIOCPCISCCLBI PCISCC DSCC-4 Local Bus Interface transaction + * SIOCPCISCCKICKTX PCISCC KICK TX + */ + +static int pciscc_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct devctl_t *dctlp = (struct devctl_t *) dev->priv; + struct chipctl_t *cctlp = dctlp->chip; + struct devcfg_t dcfg; + struct chipcfg_t ccfg; + struct lbi_xfer lbi; + int channel = dctlp->channel; + int i; + int probe_neccessary; + unsigned long l; + unsigned long status; + unsigned long time; + + switch (cmd) { + case SIOCPCISCCGCCFG: + /* return cctlp->cfg structure in user-provided buffer */ + if (copy_to_user(ifr->ifr_data, &cctlp->cfg, sizeof(struct chipcfg_t))) { + return -EFAULT; + } + return 0; + case SIOCPCISCCSCCFG: + /* copy user-provided data buffer to cctlp->cfg */ + if (!suser()) return -EPERM; + for (i = 0 ; i < dev_per_card ; i++) { + if (cctlp->device[i]->start) return -EBUSY; + } + if (copy_from_user(&ccfg, ifr->ifr_data, sizeof(struct chipcfg_t))) { + return -EFAULT; + } + if ((ccfg.rxbufcnt < 4) || (ccfg.rxbufcnt > 128)) return -EINVAL; + if ((ccfg.txbufcnt < 4) || (ccfg.txbufcnt > 256)) return -EINVAL; + if ((ccfg.iqlen < 32) || (ccfg.iqlen > 512) || (ccfg.iqlen % 32 != 0)) return -EINVAL; + if ((ccfg.prichan > 3) || (ccfg.prichan < -1)) return -EINVAL; + if ((ccfg.mfifo_rx_t > 124) || (ccfg.mfifo_rx_t < 4)) return -EINVAL; + memcpy((unsigned char *) &cctlp->cfg, (unsigned char *) &ccfg, sizeof(struct chipcfg_t)); + return 0; + case SIOCPCISCCGDCFG: + /* return dctlp->cfg structure in user-provided buffer */ + if (copy_to_user(ifr->ifr_data, &dctlp->cfg, sizeof(struct devcfg_t))) { + return -EFAULT; + } + return 0; + case SIOCPCISCCSDCFG: + /* copy user-provided data buffer to dctlp->cfg */ + if (!suser()) return -EPERM; + if (copy_from_user(&dcfg, ifr->ifr_data, sizeof(struct devcfg_t))) { + return -EFAULT; + } + if ((dcfg.coding < CFG_CHCODE_MIN) || (dcfg.coding > CFG_CHCODE_MAX)) return -EINVAL; + if ((dcfg.clockmode < CFG_CM_MIN) || (dcfg.clockmode > CFG_CM_MAX)) return -EINVAL; + if ((dcfg.duplex < CFG_DUPLEX_MIN) || (dcfg.duplex > CFG_DUPLEX_MAX)) return -EINVAL; + if (dcfg.brate_m > CFG_BRATEM_MAX) return -EINVAL; + if (dcfg.brate_n > CFG_BRATEN_MAX) return -EINVAL; + if ((dcfg.txddrive < CFG_TXDDRIVE_MIN) || (dcfg.txddrive > CFG_TXDDRIVE_MAX)) return -EINVAL; + if ((dcfg.txdelmode < CFG_TXDEL_MIN) || (dcfg.txdelmode > CFG_TXDEL_MAX)) return -EINVAL; + if ((dcfg.preamb_rpt!=0) && (dcfg.preamb_rpt!=1) && (dcfg.preamb_rpt!=2) && (dcfg.preamb_rpt!=4) && (dcfg.preamb_rpt!=8)) return -EINVAL; + probe_neccessary = ((dcfg.coding != dctlp->cfg.coding) + || (dcfg.clockmode != dctlp->cfg.clockmode) + || (dcfg.brate_m != dctlp->cfg.brate_m) + || (dcfg.brate_n != dctlp->cfg.brate_n) + || (dcfg.clkout != dctlp->cfg.clkout)); + memcpy((unsigned char *) &dctlp->cfg, (unsigned char *) &dcfg, sizeof(struct devcfg_t)); + if (dctlp->start) { + pciscc_channel_close(dctlp); + pciscc_channel_open(dctlp); + if (probe_neccessary) dctlp->tx_bitrate = (xtal >> 4) / ((dctlp->cfg.brate_n+1)*(1 << dctlp->cfg.brate_m)); + /* if (probe_neccessary) dctlp->tx_bitrate = pciscc_probe_txrate(dctlp); */ + printk(KERN_INFO "PCISCC: tx_bitrate=%ld (%d %d).\n", dctlp->tx_bitrate, dctlp->cfg.brate_m, dctlp->cfg.brate_n); + } + return 0; + case SIOCPCISCCSLED: + /* set channel LEDs */ + if (!suser()) return -EPERM; + writel(0x000000ff, cctlp->io_base+GPDIR); + writel(0x00000000, cctlp->io_base+GPIM); + l = readl(cctlp->io_base+GPDATA); + switch (channel) { + case 0: l &= ~((1<<0) | (1<<1)); + l |= (((unsigned long) ifr->ifr_data & 3) << 0); + break; + case 1: l &= ~((1<<2) | (1<<3)); + l |= (((unsigned long) ifr->ifr_data & 3) << 2); + break; + case 2: l &= ~((1<<4) | (1<<5)); + l |= (((unsigned long) ifr->ifr_data & 3) << 4); + break; + case 3: l &= ~((1<<6) | (1<<7)); + l |= (((unsigned long) ifr->ifr_data & 3) << 6); + break; + } + writel(l, cctlp->io_base+GPDATA); + return 0; + case SIOCPCISCCGDSTAT: + /* get channel status */ + status = (dctlp->txstate & 0x0f); + l = readl(cctlp->io_base+SCCBASE[channel]+STAR); + if (l & DPLA) status |= STATUS_DPLA; + if (l & RLI) status |= STATUS_RLI; + if (dctlp->cfg.cdinv) { + if (l & CD) status |= STATUS_CD; + } else { + if (!(l & CD)) status |= STATUS_CD; + } + if (l & CTS) status |= STATUS_CTS; + if (readl(cctlp->io_base+SCCBASE[dctlp->channel]+CCR1) & RTS) status |= STATUS_RTS; + ifr->ifr_data = (void *) status; + return 0; + case SIOCPCISCCDCAL: + /* calibrate */ + if (!suser()) return -EPERM; + if (!dctlp->start) return -EAGAIN; + if ((dctlp->txstate != TX_IDLE) && (dctlp->txstate != TX_CAL)) return -EAGAIN; + time = (unsigned long) ifr->ifr_data; + if ((dctlp->txstate == TX_CAL) && (time != 0)) return -EAGAIN; + if (time > 0xffffff) return -EINVAL; + writel((time*TVALUE), cctlp->io_base+SCCBASE[channel]+TIMR); + if (time == 0) { + pciscc_set_txstate(dctlp, TX_IDLE); + } else { + pciscc_set_txstate(dctlp, TX_CAL); + writel(STI, cctlp->io_base+SCCBASE[channel]+CMDR); + } + return 0; + case SIOCPCISCCLBI: + /* local bus transaction */ + if (!suser()) return -EPERM; + if (copy_from_user(&lbi, ifr->ifr_data, sizeof(struct lbi_xfer))) { + return -EFAULT; + } + if (lbi.mode == LBI_WRITE) { + writew(lbi.data, cctlp->lbi_base+lbi.addr); + } else { + lbi.data = readl(cctlp->lbi_base+lbi.addr); + if (copy_to_user(ifr->ifr_data, &lbi, sizeof(struct lbi_xfer))) + return -EFAULT; + } + return 0; + case SIOCPCISCCKICKTX: + /* kick TX */ + if (!suser()) return -EPERM; + spin_lock_irq(&dctlp->dev_lock); + pciscc_kick_tx(dctlp); + spin_unlock_irq(&dctlp->dev_lock); + return 0; + default: + return -EINVAL; + } + return 0; +} + +/* --------------------------------------------------------------------------------------------- */ + +/* kick transmitter, call only with devlock held! */ +static void pciscc_kick_tx(struct devctl_t *dctlp) +{ + int i; +#ifdef PCISCC_DEBUG + struct chipctl_t *cctlp = dctlp->chip; + + printk(KERN_INFO "----- PCISCC: KICK TX dev=%s -----\n", dctlp->name); + printk(KERN_INFO "TX IQ base=V0x%08lx next=V%08lx - dump:\n", + (unsigned long) dctlp->iq_tx, + (unsigned long) dctlp->iq_tx_next); + printk(KERN_INFO "Len=%d iq_tx=%p\n", cctlp->cfg.iqlen, dctlp->iq_tx); + if (dctlp->iq_tx) + for (i = 0; i < cctlp->cfg.iqlen ; i++) + printk(KERN_INFO "V%08lx: %08lx\n", + (unsigned long) &dctlp->iq_tx[i], + dctlp->iq_tx[i]); +#endif + if (!dctlp->start) return; + + i = pciscc_isr_txcleanup(dctlp); +#ifdef PCISCC_DEBUG + printk(KERN_INFO "PCISCC: SIOCPCISCCKICKTX %s: 1st pass cleaned %d descriptors.\n", dctlp->name, i); +#endif + if (i == 0) { + dctlp->dq_tx_cleanup->result |= C; + i = pciscc_isr_txcleanup(dctlp); +#ifdef PCISCC_DEBUG + printk(KERN_INFO "PCISCC: SIOCPCISCCKICKTX %s: 2nd pass cleaned %d descriptors.\n", dctlp->name, i); +#endif + } + dctlp->stats.tx_errors++; + return; +} + +/* --------------------------------------------------------------------------------------------- */ + +/* transmit frame, downcall from MAC layer */ +static int pciscc_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct devctl_t *dctlp = (struct devctl_t *) dev->priv; + struct chipctl_t *cctlp = (struct chipctl_t *) dctlp->chip; + int channel = dctlp->channel; + struct tx_desc_t * volatile txdp; + + if (!dctlp->start) { + printk(KERN_ERR "PCISCC: xmit(): Call when iface %s is down\n", dev->name); + kfree_skb(skb); + return 0; + } + if (dctlp->tbusy) { +#ifdef PCISCC_DEBUG + printk(KERN_INFO "PCISCC: pciscc_xmit(): I'm being kicked by L2!!!\n"); +#endif + /* auto-kick if TX has been busy for 30 seconds */ + if ((jiffies-dctlp->last_tx) > txkick*HZ) { + printk(KERN_INFO "PCISCC: pciscc_xmit(): 30 seconds!!!\n"); + pciscc_kick_tx(dctlp); + } + } + if (!skb) { + printk(KERN_ERR "PCISCC: xmit(): L2 handed us a NULL skb!\n"); + return 0; + } + if (!skb->len) { + printk(KERN_ERR "PCISCC: xmit(): L2 tried to trick us into sending a skb of len 0!\n"); + kfree_skb(skb); + return 0; + } + spin_lock_irq(&dctlp->dev_lock); + ATOMICY_CHECK; + txdp=dctlp->dq_tx_last->next; + if ((txdp == dctlp->dq_tx_cleanup) || (txdp->next == dctlp->dq_tx_cleanup) || (txdp->result & C) || (txdp->next->result & C)) { + /* desriptor chain "full" */ +#ifdef PCISCC_VDEBUG + printk(KERN_INFO "PCISCC: xmit(): Dropping frame due to full TX queue interface %s.\n", dev->name); +#endif + dctlp->stats.tx_dropped++; + kfree_skb(skb); +#ifdef PCISCC_DEBUG + if (!((txdp == dctlp->dq_tx_cleanup) || (txdp->next == dctlp->dq_tx_cleanup)) && ((txdp->result & C) || (txdp->next->result & C))) + printk(KERN_INFO "PCISCC: xmit(): txdp->result & C || txdp->next->result & C!!!!\n"); +#endif + ATOMICY_CHECK_END; + spin_unlock_irq(&dctlp->dev_lock); + return 0; + } + /* prepare TX descriptor */ + skb_pull(skb, 1); /* Ignore the KISS byte */ + txdp->result=0; + txdp->skb=skb; + txdp->flags=FE | (BNO*skb->len); + txdp->dataptr=(void *) virt_to_bus(skb->data); + dctlp->dq_tx_last=txdp; + wmb(); + flush_cache_all(); + /* printk(KERN_INFO "PCISCC: xmit(): interface %s - State %d - Sending %d bytes.\n", dev->name, dctlp->txstate, skb->len); */ + if (dctlp->cfg.duplex == CFG_DUPLEX_FULLPTT) { + /* in "always on" full duplex mode we can start frame transmit at once */ + pciscc_set_txstate(dctlp, TX_XMIT); + switch (channel) { + case 0: writel(virt_to_bus(txdp), cctlp->io_base+CH0LTDA); + break; + case 1: writel(virt_to_bus(txdp), cctlp->io_base+CH1LTDA); + break; + case 2: writel(virt_to_bus(txdp), cctlp->io_base+CH2LTDA); + break; + case 3: writel(virt_to_bus(txdp), cctlp->io_base+CH3LTDA); + break; + } + } else if ((dctlp->cfg.txdelmode == CFG_TXDEL_HARD) || !dctlp->cfg.txdelval) { + /* Hardware TX-delay control using RTS/CTS or zero TX-delay */ + writel(txtimeout*dctlp->tx_bitrate*TVALUE, cctlp->io_base+SCCBASE[channel]+TIMR); + writel(STI, cctlp->io_base+SCCBASE[channel]+CMDR); + pciscc_set_txstate(dctlp, TX_XMIT); + switch (channel) { + case 0: writel(virt_to_bus(txdp), cctlp->io_base+CH0LTDA); + break; + case 1: writel(virt_to_bus(txdp), cctlp->io_base+CH1LTDA); + break; + case 2: writel(virt_to_bus(txdp), cctlp->io_base+CH2LTDA); + break; + case 3: writel(virt_to_bus(txdp), cctlp->io_base+CH3LTDA); + break; + } + } else { + /* half duplex or "normal" full duplex, software txdelay */ + switch (dctlp->txstate) { + case TX_RESET: + /* TX not initialized */ + printk(KERN_INFO "PCISCC: xmit(): %s: Cannot transmit frame since TX is not inititalized!\n", dev->name); + break; + case TX_WAIT: + break; + case TX_IDLE: + /* TX is idle, start slottime or key up and start txdelay */ + if (dctlp->cfg.duplex == CFG_DUPLEX_FULL) { + pciscc_set_txstate(dctlp, TX_DELAY); + writel(dctlp->cfg.txdelval*TVALUE, cctlp->io_base+SCCBASE[channel]+TIMR); + writel(STI, cctlp->io_base+SCCBASE[channel]+CMDR); + } + else { + /* Half : check DCD... */ + if (dctlp->dcd || random_num() > dctlp->cfg.persist) { + /* Slottime delay */ + pciscc_set_txstate(dctlp, TX_WAIT); + writel(dctlp->cfg.slottime*TVALUE, cctlp->io_base+SCCBASE[channel]+TIMR); + writel(STI, cctlp->io_base+SCCBASE[channel]+CMDR); + } + else { + /* TxDelay */ + pciscc_set_txstate(dctlp, TX_DELAY); + writel(dctlp->cfg.txdelval*TVALUE, cctlp->io_base+SCCBASE[channel]+TIMR); + writel(STI, cctlp->io_base+SCCBASE[channel]+CMDR); + } + } + break; + case TX_DELAY: + /* tx is already keyed but not yet ready */ + break; + case TX_TAIL: + /* tx is currently transmitting closing txtail sequence */ + writel(txtimeout*dctlp->tx_bitrate*TVALUE, cctlp->io_base+SCCBASE[channel]+TIMR); + pciscc_clear_timer(dctlp); + writel(STI, cctlp->io_base+SCCBASE[channel]+CMDR); + case TX_XMIT: /* note fall-through */ + /* tx is already transmitting preamble or data */ + pciscc_set_txstate(dctlp, TX_XMIT); + switch (channel) { + case 0: writel(virt_to_bus(txdp), cctlp->io_base+CH0LTDA); + break; + case 1: writel(virt_to_bus(txdp), cctlp->io_base+CH1LTDA); + break; + case 2: writel(virt_to_bus(txdp), cctlp->io_base+CH2LTDA); + break; + case 3: writel(virt_to_bus(txdp), cctlp->io_base+CH3LTDA); + break; + } + break; + case TX_PROBE: + case TX_CAL: + /* we are busy with diagnostic stuff */ + break; + default: + /* should not occur */ + printk(KERN_ERR "PCISCC: Unhandled txstate in xmit() iface=%s.\n", dev->name); + } + } + /* printk(KERN_INFO "PCISCC: xmit(): interface %s - State now %d.\n", dev->name, dctlp->txstate); */ + txdp=txdp->next; + dctlp->tbusy = ((txdp == dctlp->dq_tx_cleanup) || (txdp->next == dctlp->dq_tx_cleanup)); + dctlp->last_tx = jiffies; + ATOMICY_CHECK_END; + spin_unlock_irq(&dctlp->dev_lock); + /* skb will be kfree()d by isr_txcleanup after transmission */ + return 0; +} + +/* --------------------------------------------------------------------------------------------- */ + +/* called by receiver function - prepare received skb and fire up to L2 */ +static __inline__ void pciscc_rx_skb(struct sk_buff *skb, struct devctl_t *dctlp) +{ + if (!skb) { + printk(KERN_ERR "PCISCC: rx_skb(): Received NULL skb iface=%s.\n", dctlp->name); + return; + } + dctlp->stats.rx_packets++; + dctlp->stats.rx_bytes += skb->len; + skb->protocol = htons(ETH_P_AX25); + skb->dev = &dctlp->dev; + skb->mac.raw = skb->data; + skb->pkt_type = PACKET_HOST; + netif_rx(skb); + return; +} + +/* --------------------------------------------------------------------------------------------- */ + +/* Initialize pciscc control device */ +static int __init pciscc_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int i; + int devnum; + unsigned char rev_id; + static int chipcnt=0; + struct chipctl_t *cctlp; + struct devctl_t *devctl[dev_per_card]; + + printk(KERN_INFO "PCISCC4 : %s\n", PCISCC_VERSION); + + if (pci_enable_device(pdev)) + goto err_out; + + if (!(dummybuf = kmalloc(256, GFP_DMA | GFP_KERNEL))) { + printk(KERN_ERR "PCISCC: init: Could not get memory for dummybuf.\n"); + goto err_out; + } + + if (!request_mem_region(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0), "registers")) { + printk (KERN_ERR "PCISCC: can't reserve MMIO region (regs)\n"); + goto err_out; + } + if (!request_mem_region(pci_resource_start(pdev, 1), + pci_resource_len(pdev, 1), "LBI interface")) { + printk (KERN_ERR "PCISCC: can't reserve MMIO region (lbi)\n"); + goto err_out_free_mmio_region0; + } + + pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); + printk(KERN_INFO "PCISCC4 : DSCC Rev %x.%x, MMIO at %#lx (regs), %#lx (lbi), IRQ %d.\n", + rev_id >> 4, rev_id & 0xf, + pci_resource_start(pdev, 0), + pci_resource_start(pdev, 1), + pdev->irq); + + if (!(cctlp = kmalloc(sizeof(struct chipctl_t), GFP_KERNEL))) { + printk(KERN_ERR "PCISCC: Out of memory allocating chipctl-structure\n"); + goto err_dealloc_priv; + } + + memset(cctlp, 0, sizeof(struct chipctl_t)); + cctlp->pcidev = pdev; + cctlp->chip_lock = SPIN_LOCK_UNLOCKED; + cctlp->io_base=ioremap(pci_resource_start(cctlp->pcidev, 0), + pci_resource_len(cctlp->pcidev, 0)); + cctlp->lbi_base=ioremap(pci_resource_start(cctlp->pcidev, 1), + pci_resource_len(cctlp->pcidev, 1)); + + memcpy(&cctlp->cfg, &chipcfg_default, sizeof(struct chipcfg_t)); + + for (i = 0 ; i < dev_per_card ; i++) { + struct net_device *d; + + if (!(devctl[i]=kmalloc(sizeof(struct devctl_t), GFP_KERNEL))) { + printk(KERN_ERR "PCISCC: Out of memory allocating devctl-structure.\n"); + goto err_dealloc_dev; + } + + memset(devctl[i], 0, sizeof(struct devctl_t)); + + d = &devctl[i]->dev; + d->priv = (void *) devctl[i]; + d->init = pciscc_dev_init; + d->irq = pdev->irq; + d->base_addr = pci_resource_start(pdev, 0); + + if (dev_alloc_name(d, "dscc%d") < 0) { + printk(KERN_ERR "PCISCC: Could not find free netdev name.\n"); + goto err_dealloc_dev; + } + + if (register_netdev(d)) { + printk(KERN_ERR "%s: register_netdev != 0.\n", d->name); + goto err_dealloc_dev; + } + + SET_MODULE_OWNER(d); + + strcpy(devctl[i]->name, d->name); + + devctl[i]->chip = cctlp; + devctl[i]->channel = i; + devctl[i]->dev_lock = SPIN_LOCK_UNLOCKED; + cctlp->device[i] = devctl[i]; + } + + pci_set_drvdata(pdev, cctlp); + + chipcnt++; + + return 0; + +err_dealloc_dev: + while (--i >= 0) { + devnum = i; + unregister_netdev(&devctl[i]->dev); + kfree(devctl[i]); + } + + iounmap(cctlp->io_base); + cctlp->io_base=0; + + iounmap(cctlp->lbi_base); + cctlp->lbi_base=0; + +err_dealloc_priv: + kfree(cctlp); + release_mem_region(pci_resource_start(pdev, 1), + pci_resource_len(pdev, 1)); + +err_out_free_mmio_region0: + release_mem_region(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); +err_out: + return -ENODEV; +} + + +static void __exit pciscc_remove_one(struct pci_dev *pdev) +{ + int i; + struct chipctl_t *cctlp = pci_get_drvdata(pdev); + + if (cctlp) { + for (i = 0 ; i < dev_per_card ; i++) { + struct devctl_t *dctlp = cctlp->device[i]; + + /* pciscc_dev_close(&dctlp->dev); */ + if (dctlp) { + unregister_netdev(&dctlp->dev); + kfree(dctlp); + cctlp->device[i] = NULL; + } + } + + if (cctlp->irq) { + free_irq(cctlp->irq, (void *) cctlp); + cctlp->irq=0; + } + if (cctlp->io_base) { + iounmap(cctlp->io_base); + cctlp->io_base=0; + } + if (cctlp->lbi_base) { + iounmap(cctlp->lbi_base); + cctlp->lbi_base=0; + } + kfree(cctlp); + pci_set_drvdata(pdev, NULL); + } + + release_mem_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); + release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); + + if (dummybuf) { + kfree(dummybuf); + } + + return; +} + +/* --------------------------------------------------------------------------------------------- */ + + +/***************************************************************************** + * Module stuff. * + *****************************************************************************/ + +MODULE_AUTHOR("Jens DAVID, "); +MODULE_DESCRIPTION("AX.25 Device Driver for PCISCC4 cards"); +MODULE_SUPPORTED_DEVICE("pciscc4"); +MODULE_PARM(xtal, "i"); +/* MODULE_PARM(probebit, "i"); */ +MODULE_PARM(txtimeout, "i"); +MODULE_PARM(txkick, "i"); + +static struct pci_device_id pciscc_pci_tbl[] __devinitdata = { + { PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4, + PCI_ANY_ID, PCI_ANY_ID, }, + { 0,} +}; +MODULE_DEVICE_TABLE(pci, pciscc_pci_tbl); + +static struct pci_driver pciscc_driver = { + name: "pciscc4", + id_table: pciscc_pci_tbl, + probe: pciscc_init_one, + remove: pciscc_remove_one, +}; + +static int __init pciscc_init_module(void) +{ + return pci_module_init(&pciscc_driver); +} + +static void __exit pciscc_cleanup_module(void) +{ + pci_unregister_driver(&pciscc_driver); +} + +module_init(pciscc_init_module); +module_exit(pciscc_cleanup_module); diff -u --recursive --new-file linux.old/include/linux/pciscc4.h linux/include/linux/pciscc4.h --- linux.old/include/linux/pciscc4.h Thu Jan 1 00:00:00 1970 +++ linux/include/linux/pciscc4.h Sun Feb 3 18:10:10 2002 @@ -0,0 +1,693 @@ +/* + * pciscc4.h: Header file for pciscc4.c - PCISCC-4 board driver + * + * Authors: Jens David + * + * CVS: $Id: pciscc4.h,v 1.20 2000/02/14 01:08:29 dg1kjd Exp $ + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef _LINUX_PCISCC_H +#define _LINUX_PCISCC_H + +#include + +#define PCISCC_MAGIC 0xbabe + +#define dev_per_card 4 + +#ifdef __KERNEL__ +/* General Registers */ +#define GCMDR 0x0000 /* Global Command Register */ +#define AR (1<<0) /* Action Request */ +#define IMAR (1<<9) /* Interrupt Mask Action Request */ +#define TXPR0 (1<<10) /* Transmit Poll Request Channel 0 */ +#define TXPR1 (1<<11) /* Transmit Poll Request Channel 1 */ +#define TXPR2 (1<<12) /* Transmit Poll Request Channel 2 */ +#define TXPR3 (1<<13) /* Transmit Poll Request Channel 3 */ +#define CFGIQP (1<<20) /* Configure IQ Peripheral */ +#define CFGIQCFG (1<<21) /* Configure IQ Peripheral */ +#define CFGIQSCC0TX (1<<24) /* Configure IQ SCC0 Transmit */ +#define CFGIQSCC1TX (1<<25) /* Configure IQ SCC1 Transmit */ +#define CFGIQSCC2TX (1<<26) /* Configure IQ SCC2 Transmit */ +#define CFGIQSCC3TX (1<<27) /* Configure IQ SCC3 Transmit */ +#define CFGIQSCC0RX (1<<28) /* Configure IQ SCC0 Receive */ +#define CFGIQSCC1RX (1<<29) /* Configure IQ SCC1 Receive */ +#define CFGIQSCC2RX (1<<30) /* Configure IQ SCC2 Receive */ +#define CFGIQSCC3RX (1<<31) /* Configure IQ SCC3 Receive */ +#define GSTAR 0x0004 /* Global Status Register */ +#define ARACK (1<<0) /* Action Request Acknowledge Status */ +#define ARF (1<<1) /* Action Request Failed Status */ +#define IIPGPP (1<<16) /* Int. Indication Peripheral Queue GPP */ +#define IIPLBI (1<<18) /* Int. Indication Peripheral Queue LBI */ +#define IIPSSC (1<<19) /* Int. Indication Peripheral Queue SSC */ +#define IICFG (1<<21) /* Int. Indication Configuration Queue */ +#define IISCC0TX (1<<24) /* Int. Indication Queue SCC0 TX */ +#define IISCC1TX (1<<25) /* Int. Indication Queue SCC1 TX */ +#define IISCC2TX (1<<26) /* Int. Indication Queue SCC2 TX */ +#define IISCC3TX (1<<27) /* Int. Indication Queue SCC3 TX */ +#define IISCC0RX (1<<28) /* Int. Indication Queue SCC0 RX */ +#define IISCC1RX (1<<29) /* Int. Indication Queue SCC1 RX */ +#define IISCC2RX (1<<30) /* Int. Indication Queue SCC2 RX */ +#define IISCC3RX (1<<31) /* Int. Indication Queue SCC3 RX */ +#define GMODE 0x0008 /* Global Mode Register */ +#define CMODE (1<<0) /* DMA Control Mode */ +#define DBE (1<<1) /* DEMUX Burst Enable */ +#define ENDIAN (1<<2) /* Endian Selection */ +#define CHN (1<<13) /* Channel Number Highest Priority */ +#define SPRI (1<<15) /* Select Priority */ +#define PERCFG (1<<16) /* Peripheral Block Configuration */ +#define LCD (1<<19) /* LBI Clock Division */ +#define OSCPD (1<<21) /* Oscillator Power Down */ + +/* IRQ Queue Control Registers */ +#define IQLENR1 0x000c /* Interrupt Queue Length Register 1 */ +#define IQSCC0TXLEN (1<<12) /* Interrupt Queue SCC0 TX Length */ +#define IQSCC1TXLEN (1<<8) /* Interrupt Queue SCC1 TX Length */ +#define IQSCC2TXLEN (1<<4) /* Interrupt Queue SCC2 TX Length */ +#define IQSCC3TXLEN (1<<0) /* Interrupt Queue SCC3 TX Length */ +#define IQSCC0RXLEN (1<<28) /* Interrupt Queue SCC0 RX Length */ +#define IQSCC1RXLEN (1<<24) /* Interrupt Queue SCC1 RX Length */ +#define IQSCC2RXLEN (1<<20) /* Interrupt Queue SCC2 RX Length */ +#define IQSCC3RXLEN (1<<16) /* Interrupt Queue SCC3 RX Length */ +#define IQLENR2 0x0010 /* Interrupt Queue Length Register 2 */ +#define IQPLEN (1<<16) /* Interrupt Queue Peripheral Length */ +#define IQCFGLEN (1<<20) /* Interrupt Queue Configuration Length */ +#define IQSCC0RXBAR 0x0014 /* Interrupt Queue SCC0 RX Base Address */ +#define IQSCC1RXBAR 0x0018 /* Interrupt Queue SCC1 RX Base Address */ +#define IQSCC2RXBAR 0x001c /* Interrupt Queue SCC2 RX Base Address */ +#define IQSCC3RXBAR 0x0020 /* Interrupt Queue SCC3 RX Base Address */ +#define IQSCC0TXBAR 0x0024 /* Interrupt Queue SCC0 TX Base Address */ +#define IQSCC1TXBAR 0x0028 /* Interrupt Queue SCC1 TX Base Address */ +#define IQSCC2TXBAR 0x002c /* Interrupt Queue SCC2 TX Base Address */ +#define IQSCC3TXBAR 0x0030 /* Interrupt Queue SCC3 TX Base Address */ +#define FIFOCR4 0x0034 /* FIFO Control Register 4 */ +#define TFFTHRES0 (1<<0) /* Transmit FIFO Forward Threshold Chan. 0 */ +#define TFFTHRES1 (1<<8) /* Transmit FIFO Forward Threshold Chan. 1 */ +#define TFFTHRES2 (1<<16) /* Transmit FIFO Forward Threshold Chan. 2 */ +#define TFFTHRES3 (1<<24) /* Transmit FIFO Forward Threshold Chan. 3 */ +#define IQCFGBAR 0x003c /* CFG Interrupt Queue Base Address */ +#define IQPBAR 0x0040 /* PER Interrupt Queue Base Address */ + +/* DMAC control registers */ +#define FIFOCR1 0x0044 /* FIFO Control Register 1 */ +#define TFSIZE0 (1<<27) /* Transmit FIFO Size Channel 0 */ +#define TFSIZE1 (1<<22) /* Transmit FIFO Size Channel 1 */ +#define TFSIZE2 (1<<17) /* Transmit FIFO Size Channel 2 */ +#define TFSIZE3 (1<<11) /* Transmit FIFO Size Channel 3 */ +#define FIFOCR2 0x0048 /* FIFO Control Register 2 */ +#define M4_0 (1<<7) /* Multiplier 4 FIFO Channel 0 */ +#define M2_0 (1<<6) /* Multiplier 2 FIFO Channel 0 */ +#define M4_1 (1<<5) /* Multiplier 4 FIFO Channel 1 */ +#define M2_1 (1<<4) /* Multiplier 2 FIFO Channel 1 */ +#define M4_2 (1<<3) /* Multiplier 4 FIFO Channel 2 */ +#define M2_2 (1<<2) /* Multiplier 2 FIFO Channel 2 */ +#define M4_3 (1<<1) /* Multiplier 4 FIFO Channel 3 */ +#define M2_3 (1<<0) /* Multiplier 2 FIFO Channel 3 */ +#define TFRTHRES0 (1<<27) /* Transmit FIFO Refill Threshold Chan. 0 */ +#define TFRTHRES1 (1<<22) /* Transmit FIFO Refill Threshold Chan. 1 */ +#define TFRTHRES2 (1<<17) /* Transmit FIFO Refill Threshold Chan. 2 */ +#define TFRTHRES3 (1<<11) /* Transmit FIFO Refill Threshold Chan. 3 */ +#define FIFOCR3 0x004c /* FIFO Control Register 3 */ +#define RFTHRES (1<<0) /* RX FIFO Threshold */ +#define M2 (1<<7) /* RX FIFO Threshold Multiplier 2 */ +#define M4 (1<<8) /* RX FIFO Threshold Multiplier 4 */ +#define CH0CFG 0x0050 /* Channel 0 Configuration */ +#define CH0BRDA 0x0054 /* Channel 0 Base Address RX Descriptor */ +#define CH0BTDA 0x0058 /* Channel 0 Base Address TX Descriptor */ +#define CH1CFG 0x005c /* Channel 1 Configuration */ +#define CH1BRDA 0x0060 /* Channel 1 Base Address RX Descriptor */ +#define CH1BTDA 0x0064 /* Channel 1 Base Address TX Descriptor */ +#define CH2CFG 0x0068 /* Channel 2 Configuration */ +#define CH2BRDA 0x006c /* Channel 2 Base Address RX Descriptor */ +#define CH2BTDA 0x0070 /* Channel 2 Base Address TX Descriptor */ +#define CH3CFG 0x0074 /* Channel 3 Configuration */ +#define CH3BRDA 0x0078 /* Channel 3 Base Address RX Descriptor */ +#define CH3BTDA 0x007c /* Channel 3 Base Address TX Descriptor */ +#define IDT (1<<19) +#define IDR (1<<20) +#define RDT (1<<21) +#define RDR (1<<22) +#define MTERR (1<<24) /* Mask TX ERR-Interrupt */ +#define MRERR (1<<25) /* Mask RX ERR-Interrupt */ +#define MTFI (1<<26) /* Mask RX FI-Interrupt */ +#define MRFI (1<<27) /* Mask TX FI-Interrupt */ +#define CH0FRDA 0x0098 /* Channel 0 First RX Descriptor Address */ +#define CH1FRDA 0x009c /* Channel 1 First RX Descriptor Address */ +#define CH2FRDA 0x00a0 /* Channel 2 First RX Descriptor Address */ +#define CH3FRDA 0x00a4 /* Channel 3 First RX Descriptor Address */ +#define CH0FTDA 0x00b0 /* Channel 0 First TX Descriptor Address */ +#define CH1FTDA 0x00b4 /* Channel 1 First TX Descriptor Address */ +#define CH2FTDA 0x00b8 /* Channel 2 First TX Descriptor Address */ +#define CH3FTDA 0x00bc /* Channel 3 First TX Descriptor Address */ +#define CH0LRDA 0x00c8 /* Channel 0 Last RX Descriptor Address */ +#define CH1LRDA 0x00cc /* Channel 1 Last RX Descriptor Address */ +#define CH2LRDA 0x00d0 /* Channel 2 Last RX Descriptor Address */ +#define CH3LRDA 0x00d4 /* Channel 3 Last RX Descriptor Address */ +#define CH0LTDA 0x00e0 /* Channel 0 Last TX Descriptor Address */ +#define CH1LTDA 0x00e4 /* Channel 1 Last TX Descriptor Address */ +#define CH2LTDA 0x00e8 /* Channel 2 Last TX Descriptor Address */ +#define CH3LTDA 0x00ec /* Channel 3 Last TX Descriptor Address */ + +/* SCC base addresses */ +const long SCCBASE[] = {0x0100, 0x0180, 0x0200, 0x0280}; + +/* SCC registers */ +#define CMDR 0x0000 /* Command Register */ +#define RNR (1<<0) /* Receiver Not Ready Command */ +#define STI (1<<8) /* Start Timer Command */ +#define RRES (1<<16) /* Receiver Reset Command */ +#define RFRD (1<<17) /* Receive FIFO Read Enable Command */ +#define HUNT (1<<18) /* Enter Hunt State Command */ +#define XRES (1<<24) /* Transmitter Reset Command */ +#define STAR 0x0004 /* Status Register */ +#define RRNR (1<<16) /* Received RNR Status */ +#define XRNR (1<<17) /* Transmit RNR Status */ +#define WFA (1<<18) /* Wait For Acknowledgement */ +#define DPLA (1<<19) /* DPLL Asynchronous */ +#define RLI (1<<20) /* Receive Line Inactive */ +#define CD (1<<21) /* Carrier Detect Input Signal State */ +#define RFNE (1<<22) /* Receive FIFO Not Empty */ +#define SYNC (1<<23) /* Synchronisation Status */ +#define CTS (1<<24) /* Clear To Send Input Signal State */ +#define FCS (1<<27) /* Flow Control Status */ +#define CEC (1<<28) /* Command Executing */ +#define TEC (1<<29) /* TIC executing */ +#define CCR0 0x0008 /* Channel Configuration Register 0 */ +#define CM (1<<0) /* Clock Mode */ +#define CM0 (1<<0) +#define CM1 (1<<1) +#define CM2 (1<<2) +#define HS (1<<3) /* High Speed (PEB-20534H-52) */ +#define SSEL (1<<4) /* Clock Source Select (a/b Select) */ +#define TOE (1<<5) /* Transmit Clock Out Enable */ +#define BCR (1<<7) /* Bit Clock Rate */ +#define PSD (1<<8) /* DPLL Phase Shift Disable */ +#define VIS (1<<12) /* Masked Interrupts Visible */ +#define SM (1<<16) /* Serial Port Mode */ +#define SM0 (1<<16) +#define SM1 (1<<17) +#define SC (1<<20) /* Serial Port Configuration */ +#define SC0 (1<<20) +#define SC1 (1<<21) +#define SC2 (1<<22) +#define PU (1<<31) /* Power Up */ +#define CCR1 0x000c /* Channel Configuration Register 1 */ +#define C32 (1<<0) /* CRC-32 Select */ +#define TOLEN (1<<0) /* Time Out Length */ +#define CRL (1<<1) /* CRC Reset Value */ +#define SFLAG (1<<7) /* Shared Flags Transmission */ +#define TOIE (1<<7) /* Time Out Indication Enable */ +#define TLP (1<<8) /* Test Loop */ +#define MCS (1<<9) /* Modulo Count Select */ +#define PPM0 (1<<10) /* PPP Mode Select 0 */ +#define BISNC (1<<10) /* Enable BISYNC Mode */ +#define PPM1 (1<<11) /* PPP Mode Select 1 */ +#define SLEN (1<<11) /* SYNC Character Length */ +#define NRM (1<<12) /* Normal Response Mode */ +#define ADM (1<<13) /* Address Mode Select */ +#define MDS0 (1<<14) /* Mode Select (HDLC Protocol Sub-Mode) */ +#define MDS1 (1<<15) +#define CAS (1<<17) /* Carrier Detect Auto Start */ +#define FCTS (1<<18) /* Flow Control (Using Signal /CTS) */ +#define FRTS (1<<19) /* Flow Control (Using Signal /RTS) */ +#define RTS (1<<20) /* Request To Send Pin Control */ +#define TCLKO (1<<21) /* Transmit Clock Output */ +#define ICD (1<<22) /* Invert Carrier Detect Pin Polarity */ +#define ODS (1<<25) /* Output Driver Select */ +#define DIV (1<<26) /* Data Inversion */ +#define SOC0 (1<<28) /* Serial Output Control */ +#define SOC1 (1<<29) +#define CCR2 0x0010 /* Channel Configuration Register 2 */ +#define XCRC (1<<0) /* Transmit CRC Checking Mode */ +#define FLON (1<<0) /* Flow Control Enable */ +#define CRCM (1<<0) /* CRC Mode Select */ +#define OIN (1<<1) /* One Insertion */ +#define CAPP (1<<1) /* CRC Append */ +#define SXIF (1<<2) /* Selects Transmission Of I-Frames */ +#define CRLBS (1<<2) /* CRC Reset Value In BISYNC Mode */ +#define ITF (1<<3) /* Interframe Time Fill */ +#define PRE0 (1<<4) /* Number Of Preamble Repetitions */ +#define PRE1 (1<<5) +#define EPT (1<<7) /* Enable Preamble Transmission */ +#define PRE (1<<8) /* Preamble */ +#define RFTH (1<<16) /* Receive FIFO Threshold */ +#define RFDF (1<<19) /* Receive FIFO Data Format */ +#define RADD (1<<20) /* Receive Address Pushed To FIFO */ +#define DPS (1<<20) /* Data Parity Storage */ +#define RCRC (1<<21) /* Receive CRC Checking Mode */ +#define PARE (1<<21) /* Parity Enable */ +#define DRCRC (1<<22) /* Disable Receive CRC Checking */ +#define PAR0 (1<<22) /* Parity Format */ +#define PAR1 (1<<23) +#define STOP (1<<24) /* Stop Bit Number */ +#define SLOAD (1<<24) /* Enable SYNC Character Load */ +#define XBRK (1<<25) /* Transmit Break */ +#define DXS (1<<26) /* Disable Storage of XON/XOFF-Characters */ +#define RAC (1<<27) /* Receiver Active */ +#define CHL0 (1<<28) /* Character Length */ +#define CHL1 (1<<29) +#define ACCM 0x0014 /* ASYNC Control Character Map */ +#define UDAC 0x0018 /* User Defined ASYNC Character */ +#define AC0 (1<<0) /* User Defined ASYNC Character Control Map */ +#define AC1 (1<<8) /* User Defined ASYNC Character Control Map */ +#define AC2 (1<<16) /* User Defined ASYNC Character Control Map */ +#define AC3 (1<<24) /* User Defined ASYNC Character Control Map */ +#define TTSA 0x001c /* TX Time Slot Assignment Register */ +#define TCC (1<<0) /* Transmit Channel Capacity */ +#define TEPCM (1<<15) /* Enable PCM Mask Transmit */ +#define TCS (1<<16) /* Transmit Clock Shift */ +#define TTSN (1<<24) /* Transmit Time Slot Number */ +#define RTSA 0x0020 /* RX Time Slot Assignment Register */ +#define RCC (1<<0) /* Receive Channel Capacity */ +#define REPCM (1<<15) /* Enable PCM Mask Receive */ +#define RCS (1<<16) /* Receive Clock Shift */ +#define RTSN (1<<24) /* Receive Time Slot Number */ +#define PCMMTX 0x0024 /* PCM Mask for Transmit */ +#define PCMMRX 0x0028 /* PCM Mask for Receive */ +#define BRR 0x002c /* Baud Rate Register */ +#define BRN (1<<0) /* Baud Rate Factor N */ +#define BRM (1<<8) /* Baud Rate Factor M k=(N+1)*2^M */ +#define TIMR 0x0030 /* Timer Register */ +#define TVALUE (1<<0) /* Timer Expiration Value */ +#define CNT (1<<24) /* Counter */ +#define TMD (1<<28) /* Timer Mode */ +#define SRC (1<<31) /* Clock Source */ +#define XADR 0x0034 /* TX Address Register */ +#define XAD1 (1<<0) /* Transmit Address 1 */ +#define XAD2 (1<<8) /* Transmit Address 2 */ +#define RADR 0x0038 /* RX Address Register */ +#define RAL1 (1<<16) /* RX Address 1 Low-Byte */ +#define RAH1 (1<<24) /* RX Address 1 High-Byte */ +#define RAL2 (1<<0) /* RX Address 2 Low-Byte */ +#define RAH2 (1<<8) /* RX Address 2 High-Byte */ +#define RAMR 0x003c /* Receive Address Mask Register */ +#define AMRAL1 (1<<0) /* Receive Mask Address 1 Low-Byte */ +#define AMRAH1 (1<<8) /* Receive Mask Address 1 High-Byte */ +#define AMRAL2 (1<<16) /* Receive Mask Address 2 Low-Byte */ +#define AMRAH2 (1<<24) /* Receive Mask Address 2 High-Byte */ +#define RLCR 0x0040 /* Receive Length Check Register */ +#define RL (1<<0) /* Receive Length Check Limit */ +#define RCE (1<<15) /* Receive Length Check Enable */ +#define XNXFR 0x0044 /* XON/XOFF Register */ +#define MXOFF (1<<0) /* XOFF Character Mask */ +#define MXON (1<<8) /* XON Character Mask */ +#define CXOFF (1<<16) /* XOFF Character */ +#define CXON (1<<24) /* XON Character */ +#define TCR 0x0048 /* Termination Character Register */ +#define TC (1<<0) /* Termination Character */ +#define TCDE (1<<15) /* Termination Character Detection Enable */ +#define TICR 0x004c /* Transmit Immediate Character Register */ +#define SYNCR 0x0050 /* Synchronization Character Register */ +#define SYNCL (1<<0) /* Synchronization Character Low */ +#define SYNCH (1<<8) /* Synchronization Character High */ +#define IMR 0x0054 /* Interrupt Mask Register */ +#define ISR 0x0058 /* Interrupt Status Register */ +#define FLEX (1<<0) /* Frame Length Exceeded Interrupt */ +#define RFO (1<<1) /* RX FIFO Overflow Interrupt */ +#define CDSC (1<<2) /* Carrier Detect Status Change Interrupt */ +#define PLLA (1<<3) /* DPLL Asynchronous Interrupt */ +#define PCE (1<<4) /* Protocol Error Interrupt */ +#define FERR (1<<4) /* Framing Error Interrupt */ +#define SCD (1<<4) /* SYN Character Detected Interrupt */ +#define RSC (1<<5) /* Receive Status Change Interrupt */ +#define PERR (1<<5) /* Parity Error Interrupt */ +#define RFS (1<<6) /* Receive Frame Start Interrupt */ +#define TIME (1<<6) /* Time Out Interrupt */ +#define RDO (1<<7) /* Receive Data Overflow Interrupt */ +#define TCD (1<<7) /* Termination Character Detected Interrupt */ +#define BRKT (1<<8) /* Break Terminated Interrupt */ +#define BRK (1<<9) /* Break Interrupt */ +#define XPR (1<<12) /* Transmit Pool Ready Interrupt */ +#define XMR (1<<13) /* Transmit Message Repeat */ +#define XON (1<<13) /* XOFF Character Detected Interrupt */ +#define CSC (1<<14) /* /CTS Status Change */ +#define TIN (1<<15) /* Timer Interrupt */ +#define XDU (1<<16) /* Transmit Data Underrun Interrupt */ +#define ALLS (1<<18) /* All Sent Interrupt */ + +/* Peripheral control registers */ +#define LCONF 0x0300 /* LBI Configuration Register */ +#define MCTC (1<<0) /* LBI Memory Cycle Time Control */ +#define ABM (1<<4) /* LBI Arbitration Master */ +#define RDEN (1<<5) /* LBI LRDY Enable */ +#define BTYP (1<<6) /* LBI Bus Type */ +#define BTYP0 (1<<6) +#define BTYP1 (1<<7) +#define HDEN (1<<8) /* LBI HOLD Enable */ +#define EALE (1<<9) /* LBI Extended ALE */ +#define EBCRES (1<<22) /* LBI External Bus Controller Reset */ +#define LINTIC (1<<31) /* LBI Interrupt Input Control */ +#define SSCCON 0x0380 /* SSC Control Register */ +#define SSCBM (1<<0) /* SSC Data Width Control */ +#define SSCBC (1<<0) /* SSC Shift Counter */ +#define SSCHB (1<<4) /* SSC Heading (Bit Order) Control */ +#define SSCPH (1<<5) /* SSC Clock Phase Control */ +#define SSCPO (1<<6) /* SSC Polarity Control */ +#define SSCTEN (1<<8) /* SSC Transmit Error Enable */ +#define SSCTE (1<<8) /* SSC Transmit Status Flag */ +#define SSCREN (1<<9) /* SSC Receive Error Enable */ +#define SSCRE (1<<9) /* SSC Receive Status Flag */ +#define SSCPEN (1<<10) /* SSC Phase Error Enable */ +#define SSCPE (1<<10) /* SSC Baud Rate Status Flag */ +#define SSCBEN (1<<11) /* SSC Baud Rate Error Enable */ +#define SSCBE (1<<11) /* SSC Baud Rate Status Flag */ +#define SSCBSY (1<<12) /* SSC Busy Flag */ +#define SSCMS (1<<14) /* SSC Master Select */ +#define SSCEN (1<<15) /* SSC Enable */ +#define SSCBR 0x0384 /* SSC Baud Rate Generator Register */ +#define SSCTB 0x0388 /* SSC Transmit Buffer */ +#define SSCRB 0x038c /* SSC Receive Buffer */ +#define SSCCSE 0x0390 /* SSC Chip Select Enable Register */ +#define ASEL0 (1<<4) /* SSC Chipselect 0 */ +#define ASEL1 (1<<5) /* SSC Chipselect 1 */ +#define ASEL2 (1<<6) /* SSC Chipselect 2 */ +#define ASEL3 (1<<7) /* SSC Chipselect 3 */ +#define SSCIM 0x0394 /* SSC Interrupt Mask Register */ +#define IMTX (1<<0) /* SSC Transmit Interrupt Mask */ +#define IMER (1<<1) /* SSC Error Interrupt Mask */ +#define IMRX (1<<2) /* SSC Receive Interrupt Mask */ +#define GPDIR 0x0400 /* GPP Direction Configuration Register */ +#define GPDATA 0x0404 /* GPP Data I/O Register */ +#define GPIM 0x0408 /* GPP Interrupt Mask Register */ + +/* Receive Data Section Status Byte (HDLC mode) */ +#define SB_LA (1<<0) /* low byte address compare */ +#define SB_CR (1<<1) /* command/response */ +#define SB_HA0 (1<<2) /* high byte address compare */ +#define SB_HA1 (1<<3) +#define SB_RAB (1<<4) /* receive message aborted */ +#define SB_CRC (1<<5) /* CRC compare */ +#define SB_RDO (1<<6) /* receive data overflow */ +#define SB_VFR (1<<7) /* valid frame */ + +/* Configuration Interrupt Vector */ +#define CIV_ARACK (1<<0) /* action request acknowledge */ +#define CIV_ARF (1<<1) /* action request failed */ +#define CIV_SRCID (1<<28) /* source ID, always 0x0a */ +#define CIV_SRCIDVAL 0x0a + +/* DMA Controller Interrupt Vector */ +#define DMACIV_ERR (1<<16) /* error indication interrupt */ +#define DMACIV_FI (1<<17) /* frame indication interrupt */ +#define DMACIV_HI (1<<18) /* host initiated interrupt */ +#define DMACIV_SRCID (1<<28) + +/* SCC Interrupt Vector */ +#define SCCIV_FLEX (1<<0) /* Frame Length Exceeded Interrupt */ +#define SCCIV_RFO (1<<1) /* RX FIFO Overflow Interrupt */ +#define SCCIV_CDSC (1<<2) /* Carrier Detect Status Change Interrupt */ +#define SCCIV_PLLA (1<<3) /* DPLL Asynchronous Interrupt */ +#define SCCIV_PCE (1<<4) /* Protocol Error Interrupt */ +#define SCCIV_FERR (1<<4) /* Framing Error Interrupt */ +#define SCCIV_SCD (1<<4) /* SYN Character Detected Interrupt */ +#define SCCIV_RSC (1<<5) /* Receive Status Change Interrupt */ +#define SCCIV_PERR (1<<5) /* Parity Error Interrupt */ +#define SCCIV_RFS (1<<6) /* Receive Frame Start Interrupt */ +#define SCCIV_TIME (1<<6) /* Time Out Interrupt */ +#define SCCIV_RDO (1<<7) /* Receive Data Overflow Interrupt */ +#define SCCIV_TCD (1<<7) /* Termination Character Detected Interrupt */ +#define SCCIV_BRKT (1<<8) /* Break Terminated Interrupt */ +#define SCCIV_BRK (1<<9) /* Break Interrupt */ +#define SCCIV_XPR (1<<12) /* Transmit Pool Ready Interrupt */ +#define SCCIV_XMR (1<<13) /* Transmit Message Repeat */ +#define SCCIV_XOFF (1<<13) /* XOFF Character Detected Interrupt */ +#define SCCIV_CSC (1<<14) /* /CTS Status Change */ +#define SCCIV_TIN (1<<15) /* Timer Interrupt */ +#define SCCIV_XDU (1<<16) /* Transmit Data Underrun Interrupt */ +#define SCCIV_ALLS (1<<18) /* All Sent Interrupt */ +#define SCCIV_SRCID (1<<28) +#define SCCIV_SCC (1<<25) /* 1: SCC generated. 0: DMAC generated */ +#define SCCIV_ERR (1<<16) /* ERROR Indication Interrupt */ +#define SCCIV_FI (1<<17) /* Frame Indication Interrupt */ +#define SCCIV_HI (1<<18) /* Host Initiated Interrupt */ +#define SCCIV_IGNORE (1<<20) /* internal use */ + +/* SSC Interrupt Vector */ +#define SSCIV_INSW (1<<0) /* interrupt status word */ +#define SSCIV_TX (1<<16) /* transmit interrupt */ +#define SSCIV_RX (1<<17) /* receive interrupt */ +#define SSCIV_ERR (1<<18) /* error interrupt */ +#define SSCIV_DE (1<<23) /* data/error indication */ +#define SSCIV_RT (1<<24) /* rx/tx indicator */ +#define SSCIV_SRCID (1<<28) +#define SSCIV_SRCIDVAL 0x0c + +/* LBI Interrupt Vector */ +#define LBIIV_SRCID (1<<28) +#define LBIIV_SRCIDVAL 0x0d + +/* GPP Interrupt Vector */ +#define GPPIV_GPDATA (1<<0) +#define GPPIV_SRCID (1<<28) +#define GPPIV_SRCIDVAL 0x0f +#endif /* __KERNEL__ */ + +/* Structure definitions */ +struct devcfg_t { + int coding; /* channel coding */ +#define CFG_CHCODE_NONE 0 +#define CFG_CHCODE_NRZ 1 /* non-return-to-zero */ +#define CFG_CHCODE_NRZI 2 /* non-return-to-zero inverted */ +#define CFG_CHCODE_FM0 3 /* FM-0 */ +#define CFG_CHCODE_FM1 4 /* FM-1 */ +#define CFG_CHCODE_MANCH 5 /* Manchester */ +#define CFG_CHCODE_MIN 1 +#define CFG_CHCODE_MAX 5 + int clockmode; +#define CFG_CM_NONE 0 +#define CFG_CM_DF9IC 1 /* TXCLK, RXCLK external source */ +#define CFG_CM_G3RUH 2 /* TXCLK generated by BRG intern */ +#define CFG_CM_TCM3105 3 /* TXCLK from BRG, RXCLK from DPLL */ +#define CFG_CM_HS 4 /* high-Speed mode */ +#define CFG_CM_MIN 1 +#define CFG_CM_MAX 4 + int duplex; /* duplex mode */ +#define CFG_DUPLEX_HALF 0 +#define CFG_DUPLEX_FULL 1 +#define CFG_DUPLEX_FULLPTT 2 /* full duplex with PTT always asserted */ +#define CFG_DUPLEX_MIN 0 +#define CFG_DUPLEX_MAX 2 + int dpll; /* DPLL configuration */ +#define CFG_DPLL_PS (1<<0) /* 180 deg phase shift enable */ + int brate_m; /* BRG "M" */ +#define CFG_BRATEM_MAX 15 + int brate_n; /* BRG "N" */ +#define CFG_BRATEN_MAX 63 + int clkout; /* enable clock out */ +#define CFG_TXTXCLK (1<<0) /* tx-clock on TXCLK */ +#define CFG_TXRTS (1<<1) /* tx-clock on RTS in highspeed mode */ + int datainv; /* NRZ data inversion */ + int txddrive; /* TXD driver select */ +#define CFG_TXDDRIVE_NONE 0 +#define CFG_TXDDRIVE_TP 1 /* totem pole */ +#define CFG_TXDDRIVE_OD 2 /* open drain */ +#define CFG_TXDDRIVE_MIN 1 +#define CFG_TXDDRIVE_MAX 2 + int cdinv; /* CD line invert */ + int testloop; /* TXD<->RXD testloop */ + int txdelmode; /* TX-delay mode */ +#define CFG_TXDEL_NONE 0 +#define CFG_TXDEL_SOFT 1 /* TX-delay software generated */ +#define CFG_TXDEL_HARD 2 /* TX-delay generated by modem (CTS) */ +#define CFG_TXDEL_MIN 1 +#define CFG_TXDEL_MAX 2 + int txdelval; /* txdelay value */ + int txtailval; /* txtail value */ + int sharedflg; /* use shared flags */ + int crcmode; /* CRC check */ +#define CFG_CRCMODE_CRC32 (1<<0) /* 1: CRC-32 0:CRC-16 */ +#define CFG_CRCMODE_RESET_0000 (1<<1) /* 1: reset value 0x0000 0: 0xFFFF */ +#define CFG_CRCMODE_RXCD (1<<2) /* 1: disable RX CRC check 0: enable */ +#define CFG_CRCMODE_RXCRCFWD (1<<3) /* 1: forward crc bytes 0: dont */ +#define CFG_CRCMODE_TXNOCRC (1<<4) /* 0: generate and append CRC 1: dont */ + int preamb_rpt; /* number of preamble repeats 0=off */ + unsigned char preamble; /* preamble value */ + int hdlcext; /* HDLC extensions */ +#define CFG_HDLCEXT_ONEINS (1<<0) /* one insertion after seven zeros */ +#define CFG_HDLCEXT_ONEFILL (1<<1) /* 1: interframe fill by ones 0: flags */ + int slottime; /* DCD Slottime */ + int persist; /* DCD Persist Value (0..255) */ +}; + +struct chipcfg_t { + unsigned long lbimode; /* local bus mode 1:1 reg */ + int oscpwr; /* oscillator power */ + int rxbufcnt; /* number of RX descriptors and buffers */ + int txbufcnt; /* number of TX descriptors */ + int iqlen; /* irq-queuelen */ + int prichan; /* priority channel -1=none */ + int mfifo_rx_t; /* RX main-FIFO DMA init threshold dwords */ +}; + +#ifdef __KERNEL__ +/* #include */ +struct devctl_t { + struct net_device dev; /* link to netdev struct */ + struct ax25_dev ax25dev; + struct chipctl_t *chip; + struct devcfg_t cfg; + int channel; /* channel number */ + char name[10]; + unsigned long * volatile iq_rx; /* interrupt queues */ + unsigned long * volatile iq_tx; + unsigned long * volatile iq_rx_next; /* next entry to be processed */ + unsigned long * volatile iq_tx_next; + struct rx_desc_t * volatile dq_rx; /* RX descriptor queue */ + struct rx_desc_t * volatile dq_rx_next; + struct tx_desc_t * volatile dq_tx; /* TX descriptor queue */ + struct tx_desc_t * volatile dq_tx_cleanup; /* first to be cleaned up after transmission */ + struct tx_desc_t * volatile dq_tx_last; /* last one to be transmitted */ + volatile unsigned long rx_mailbox; /* communication with isr */ + volatile unsigned long tx_mailbox; /* communication with isr */ + volatile unsigned long probe_mailbox; /* communication with isr */ + struct net_device_stats stats; /* statistics */ + volatile int txstate; + unsigned long last_tx; /* jiffy-timestamp of last TX frame queued (for tbusy) */ + unsigned long ccr0, ccr1, ccr2; /* SCC control register 0:2 save */ + int dmac_rx; /* DMA controller RX state */ +#define DMAC_RX_RESET 0 /* RESET state */ +#define DMAC_RX_INIT 1 /* initialized */ + volatile long tx_bitrate; /* effective TX bitrate, probed if extern */ + struct timeval tv; /* for baudrate probe */ + spinlock_t dev_lock; /* spinlock for device */ + int start; /* State of the driver */ + int tbusy; /* State of the driver */ + int dcd; /* State of the driver */ +}; + +struct chipctl_t { + struct pci_dev *pcidev; + struct chipcfg_t cfg; + struct devctl_t *device[dev_per_card]; + unsigned long * volatile iq_per; /* peripheral interrupt queue */ + unsigned long * volatile iq_cfg; /* configuration interrupt queue */ + unsigned long * volatile iq_per_next; /* next entry to be processed */ + unsigned long * volatile iq_cfg_next; + void *io_base; + void *lbi_base; + unsigned int irq; + int initialized; /* chip initialized? */ + int usecnt; /* number of open channels on chip */ + volatile unsigned long mailbox; /* communication with isr */ +#define MAILBOX_NONE 0 +#define MAILBOX_OK 1 +#define MAILBOX_FAILURE 2 + spinlock_t chip_lock; /* spinlock for chip */ +}; + +struct tx_desc_t { + volatile long flags __attribute__ ((packed)); +#define NO (1<<16) /* number of bytes */ +#define HI (1<<29) /* host initiated interrupt */ +#define HOLD (1<<30) /* last descriptor in chain */ +#define FE (1<<31) /* frame end */ + volatile void * volatile nextptr __attribute__ ((packed)); /* note busaddr */ + volatile void * volatile dataptr __attribute__ ((packed)); + volatile unsigned long result __attribute__ ((packed)); +#define C (1<<30) /* descriptor complete flag */ + struct sk_buff *skb; /* pointer to complete sk_buff */ + struct tx_desc_t *next; /* note virtaddr */ + struct tx_desc_t *prev; +}; + +struct rx_desc_t { + volatile long flags __attribute__ ((packed)); +#define RA (1<<9) /* receive abort */ +#define BNO (1<<16) /* byte number of received data */ + volatile void * volatile nextptr __attribute__ ((packed)); /* note busaddr */ + volatile void * volatile dataptr __attribute__ ((packed)); + volatile unsigned long result __attribute__ ((packed)); + volatile struct rx_desc_t * volatile feptr __attribute__ ((packed)); + struct sk_buff *skb; /* pointer to complete sk_buff */ + struct rx_desc_t *next; /* note virtaddr */ + struct rx_desc_t *prev; /* previous */ +}; + +#define SKB_HEADROOM (AX25_MAX_HEADER_LEN + (4-(AX25_MAX_HEADER_LEN % 4))) /* keep dword alignment */ +#endif /* __KERNEL__ */ + +/* ioctl()s */ +#define SIOCPCISCCGCCFG SIOCDEVPRIVATE +#define SIOCPCISCCSCCFG (SIOCDEVPRIVATE+1) +#define SIOCPCISCCGDCFG (SIOCDEVPRIVATE+2) +#define SIOCPCISCCSDCFG (SIOCDEVPRIVATE+3) +#define SIOCPCISCCSLED (SIOCDEVPRIVATE+4) +#define SIOCPCISCCGDSTAT (SIOCDEVPRIVATE+5) +#define SIOCPCISCCDCAL (SIOCDEVPRIVATE+6) +#define SIOCPCISCCLBI (SIOCDEVPRIVATE+7) +#define SIOCPCISCCKICKTX (SIOCDEVPRIVATE+8) + +#define TX_MIN 0 +#define TX_RESET 0 +#define TX_IDLE 1 +#define TX_DELAY 2 +#define TX_XMIT 3 +#define TX_TAIL 4 +#define TX_PROBE 5 +#define TX_CAL 6 +#define TX_WAIT 7 +#define TX_MAX 7 + +#define STATUS_DPLA (1<<4) +#define STATUS_RLI (1<<5) +#define STATUS_CD (1<<6) +#define STATUS_CTS (1<<7) +#define STATUS_RTS (1<<8) + +struct lbi_xfer { + unsigned int mode; +#define LBI_READ 0 +#define LBI_WRITE 1 + unsigned short addr; + unsigned short data; +}; + +#ifdef __KERNEL__ +/* In pciscc4.c */ +#ifdef PCISCC_DEBUG +static void pciscc_dmac_regdump(struct chipctl_t *cctlp); +static void pciscc_queuedump(struct devctl_t *dctlp); +#endif +static int pciscc_chip_open(struct chipctl_t *cctlp); +static int pciscc_chip_close(struct chipctl_t *cctlp); +static int pciscc_channel_open(struct devctl_t *dctlp); +static void pciscc_channel_close(struct devctl_t *dctlp); +static void pciscc_isr(int irq, void *dev_id, struct pt_regs *regs); +static __inline__ void pciscc_isr_receiver(struct devctl_t *dctlp); +static __inline__ int pciscc_isr_txcleanup(struct devctl_t *dctlp); +static void pciscc_bh_txto(void *arg); +static void pciscc_bh_txreset(void *arg); +static __inline__ void pciscc_clear_timer(struct devctl_t *dctlp); +/*static long pciscc_probe_txrate(struct devctl_t *dctlp);*/ +static __inline__ void pciscc_set_txstate(struct devctl_t *dctlp, int state); +static int pciscc_dev_close(struct net_device *dev); +static int pciscc_dev_open(struct net_device *dev); +static int pciscc_change_mtu(struct net_device *dev, int new_mtu); +static struct net_device_stats * + pciscc_get_stats(struct net_device *dev); +static int pciscc_dev_init(struct net_device *devctl); +static int pciscc_dev_set_mac_address(struct net_device *dev, void *addr); +static int pciscc_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static void pciscc_kick_tx(struct devctl_t *dctlp); +static int pciscc_xmit(struct sk_buff *skb, struct net_device *dev); +static __inline__ void pciscc_rx_skb(struct sk_buff *skb, struct devctl_t *dctlp); +void cleanup_module(void); +int init_module(void); +#endif /* __KERNEL__ */ + +#endif /* _LINUX_PCISCC.H */