Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-05 00:18:49 -0800
committerLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-05 00:18:49 -0800
commit1ea864f1c53bc771294e61cf9be43b1d22e78f4c (patch)
treede928ec3d1e22abf7b0963e59092152daa5a2ecb
parent2f886464aa00cd9eb9cf46c8c155a24a752bb317 (diff)
v2.5.2.6 -> v2.5.3v2.5.3
- Doug Ledford: i810 audio driver update - Evgeniy Polyakov: update various SCSI drivers to new locking - David Howells: syscall latency improvement, try 2 - Francois Romieu: dscc4 driver update - Patrick Mochel: driver model fixes - Andrew Morton: clean up a few details in ext3 inode initialization - Pete Wyckoff: make x86 machine check print out right address.. - Hans Reiser: reiserfs update - Richard Gooch: devfs update - Greg KH: USB updates - Dave Jones: PNPBIOS - Nathan Scott: extended attributes - Corey Minyard: clean up zlib duplication (triplication..)
-rw-r--r--Documentation/filesystems/Locking6
-rw-r--r--Documentation/filesystems/devfs/ChangeLog8
-rw-r--r--Documentation/filesystems/devfs/README6
-rw-r--r--MAINTAINERS16
-rw-r--r--Makefile4
-rw-r--r--arch/alpha/kernel/check_asm.c4
-rw-r--r--arch/alpha/kernel/entry.S10
-rw-r--r--arch/alpha/kernel/process.c4
-rw-r--r--arch/arm/kernel/entry-common.S16
-rw-r--r--arch/arm/tools/getconstants.c6
-rw-r--r--arch/cris/kernel/entryoffsets.c6
-rw-r--r--arch/i386/defconfig3
-rw-r--r--arch/i386/kernel/bluesmoke.c2
-rw-r--r--arch/i386/kernel/entry.S135
-rw-r--r--arch/i386/kernel/head.S10
-rw-r--r--arch/i386/kernel/process.c8
-rw-r--r--arch/i386/kernel/ptrace.c43
-rw-r--r--arch/i386/kernel/signal.c2
-rw-r--r--arch/i386/kernel/traps.c16
-rw-r--r--arch/i386/kernel/vm86.c6
-rw-r--r--arch/ia64/kernel/entry.S16
-rw-r--r--arch/ia64/tools/print_offsets.c6
-rw-r--r--arch/m68k/kernel/m68k_defs.c4
-rw-r--r--arch/mips/kernel/entry.S8
-rw-r--r--arch/mips/kernel/scall_o32.S12
-rw-r--r--arch/mips/kernel/smp.c2
-rw-r--r--arch/mips/tools/offset.c6
-rw-r--r--arch/mips64/kernel/entry.S10
-rw-r--r--arch/mips64/kernel/scall_64.S4
-rw-r--r--arch/mips64/kernel/scall_o32.S20
-rw-r--r--arch/mips64/tools/offset.c6
-rw-r--r--arch/parisc/kernel/entry.S14
-rw-r--r--arch/parisc/tools/offset.c6
-rw-r--r--arch/ppc/kernel/entry.S10
-rw-r--r--arch/ppc/kernel/mk_defs.c6
-rw-r--r--arch/ppc/kernel/smp.c2
-rw-r--r--arch/s390/kernel/entry.S20
-rw-r--r--arch/s390x/kernel/entry.S18
-rw-r--r--arch/sh/kernel/entry.S24
-rw-r--r--arch/sparc/kernel/rtrap.S12
-rw-r--r--arch/sparc/kernel/smp.c2
-rw-r--r--arch/sparc64/kernel/process.c2
-rw-r--r--arch/sparc64/kernel/rtrap.S12
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/interface.c2
-rw-r--r--drivers/net/ppp_deflate.c123
-rw-r--r--drivers/net/wan/dscc4.c953
-rw-r--r--drivers/net/zlib.c5371
-rw-r--r--drivers/net/zlib.h1010
-rw-r--r--drivers/pnp/Config.help16
-rw-r--r--drivers/pnp/Config.in4
-rw-r--r--drivers/pnp/Makefile15
-rw-r--r--drivers/pnp/isapnp.c1
-rw-r--r--drivers/pnp/pnpbios_core.c1276
-rw-r--r--drivers/pnp/pnpbios_proc.c151
-rw-r--r--drivers/scsi/3w-xxxx.c14
-rw-r--r--drivers/scsi/53c7,8xx.c10
-rw-r--r--drivers/scsi/53c700.c15
-rw-r--r--drivers/scsi/AM53C974.c7
-rw-r--r--drivers/scsi/BusLogic.h4
-rw-r--r--drivers/scsi/NCR5380.c34
-rw-r--r--drivers/scsi/NCR53C9x.c12
-rw-r--r--drivers/scsi/NCR53c406a.c9
-rw-r--r--drivers/scsi/a2091.c4
-rw-r--r--drivers/scsi/a3000.c7
-rw-r--r--drivers/scsi/advansys.c6
-rw-r--r--drivers/scsi/aha152x.c4
-rw-r--r--drivers/scsi/aha1542.c12
-rw-r--r--drivers/scsi/aha1740.c4
-rw-r--r--drivers/scsi/atp870u.c4
-rw-r--r--drivers/scsi/blz1230.c2
-rw-r--r--drivers/scsi/blz2060.c2
-rw-r--r--drivers/scsi/cpqfcTSinit.c20
-rw-r--r--drivers/scsi/cpqfcTSworker.c8
-rw-r--r--drivers/scsi/cyberstorm.c2
-rw-r--r--drivers/scsi/cyberstormII.c2
-rw-r--r--drivers/scsi/dec_esp.c6
-rw-r--r--drivers/scsi/dpt_i2o.c20
-rw-r--r--drivers/scsi/dtc.c2
-rw-r--r--drivers/scsi/eata.c8
-rw-r--r--drivers/scsi/eata_dma.c9
-rw-r--r--drivers/scsi/eata_pio.c33
-rw-r--r--drivers/scsi/esp.c2
-rw-r--r--drivers/scsi/fastlane.c2
-rw-r--r--drivers/scsi/fd_mcs.c16
-rw-r--r--drivers/scsi/fdomain.c12
-rw-r--r--drivers/scsi/gdth.c20
-rw-r--r--drivers/scsi/gvp11.c4
-rw-r--r--drivers/scsi/ibmmca.c64
-rw-r--r--drivers/scsi/imm.c4
-rw-r--r--drivers/scsi/in2000.h4
-rw-r--r--drivers/scsi/ini9100u.c56
-rw-r--r--drivers/scsi/inia100.c24
-rw-r--r--drivers/scsi/ips.c30
-rw-r--r--drivers/scsi/jazz_esp.c2
-rw-r--r--drivers/scsi/mac53c94.c7
-rw-r--r--drivers/scsi/mac_esp.c6
-rw-r--r--drivers/scsi/mca_53c9x.c2
-rw-r--r--drivers/scsi/megaraid.c12
-rw-r--r--drivers/scsi/mesh.c13
-rw-r--r--drivers/scsi/oktagon_esp.c2
-rw-r--r--drivers/scsi/pas16.c2
-rw-r--r--drivers/scsi/pci2000.c2
-rw-r--r--drivers/scsi/pci2220i.c12
-rw-r--r--drivers/scsi/ppa.c12
-rw-r--r--drivers/scsi/psi240i.c9
-rw-r--r--drivers/scsi/qla1280.c10
-rw-r--r--drivers/scsi/qlogicfas.c4
-rw-r--r--drivers/scsi/seagate.c9
-rw-r--r--drivers/scsi/sgiwd93.c7
-rw-r--r--drivers/scsi/sim710.c4
-rw-r--r--drivers/scsi/sun3x_esp.c2
-rw-r--r--drivers/scsi/sym53c416.c31
-rw-r--r--drivers/scsi/sym53c8xx_comm.h4
-rw-r--r--drivers/scsi/t128.c2
-rw-r--r--drivers/scsi/tmscsim.c40
-rw-r--r--drivers/scsi/u14-34f.c8
-rw-r--r--drivers/scsi/ultrastor.c27
-rw-r--r--drivers/scsi/wd7000.c12
-rw-r--r--drivers/sound/i810_audio.c743
-rw-r--r--drivers/usb/hcd.c4
-rw-r--r--drivers/usb/ov511.c16
-rw-r--r--drivers/usb/usb.c57
-rw-r--r--fs/Config.in12
-rw-r--r--fs/Makefile3
-rw-r--r--fs/block_dev.c3
-rw-r--r--fs/buffer.c46
-rw-r--r--fs/cramfs/Makefile2
-rw-r--r--fs/devfs/base.c28
-rw-r--r--fs/driverfs/inode.c19
-rw-r--r--fs/ext3/ialloc.c1
-rw-r--r--fs/hfs/file_cap.c3
-rw-r--r--fs/hfs/file_hdr.c3
-rw-r--r--fs/hpfs/dir.c5
-rw-r--r--fs/isofs/Makefile2
-rw-r--r--fs/jffs2/Makefile2
-rw-r--r--fs/jffs2/compr_zlib.c58
-rw-r--r--fs/jffs2/zlib.c5371
-rw-r--r--fs/jffs2/zlib.h1010
-rw-r--r--fs/lockd/svc.c2
-rw-r--r--fs/nfsd/export.c2
-rw-r--r--fs/proc/generic.c17
-rw-r--r--fs/read_write.c15
-rw-r--r--fs/reiserfs/Makefile2
-rw-r--r--fs/reiserfs/bitmap.c65
-rw-r--r--fs/reiserfs/buffer2.c285
-rw-r--r--fs/reiserfs/dir.c86
-rw-r--r--fs/reiserfs/do_balan.c7
-rw-r--r--fs/reiserfs/file.c26
-rw-r--r--fs/reiserfs/fix_node.c28
-rw-r--r--fs/reiserfs/inode.c166
-rw-r--r--fs/reiserfs/ioctl.c6
-rw-r--r--fs/reiserfs/journal.c371
-rw-r--r--fs/reiserfs/lbalance.c4
-rw-r--r--fs/reiserfs/namei.c155
-rw-r--r--fs/reiserfs/objectid.c19
-rw-r--r--fs/reiserfs/prints.c37
-rw-r--r--fs/reiserfs/procfs.c105
-rw-r--r--fs/reiserfs/resize.c4
-rw-r--r--fs/reiserfs/stree.c138
-rw-r--r--fs/reiserfs/super.c495
-rw-r--r--fs/reiserfs/tail_conversion.c2
-rw-r--r--fs/reiserfs/version.c7
-rw-r--r--fs/ufs/file.c3
-rw-r--r--fs/xattr.c341
-rw-r--r--include/asm-i386/desc.h26
-rw-r--r--include/asm-i386/signal.h4
-rw-r--r--include/asm-i386/unistd.h12
-rw-r--r--include/linux/ext3_fs_i.h1
-rw-r--r--include/linux/fs.h5
-rw-r--r--include/linux/init_task.h10
-rw-r--r--include/linux/limits.h3
-rw-r--r--include/linux/pnpbios.h211
-rw-r--r--include/linux/reiserfs_fs.h696
-rw-r--r--include/linux/reiserfs_fs_i.h83
-rw-r--r--include/linux/reiserfs_fs_sb.h239
-rw-r--r--include/linux/sched.h22
-rw-r--r--include/linux/usb.h13
-rw-r--r--include/linux/xattr.h15
-rw-r--r--include/linux/zlib.h654
-rw-r--r--include/linux/zlib_fs.h707
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/ksyms.c1
-rw-r--r--kernel/sched.c16
-rw-r--r--kernel/signal.c8
-rw-r--r--lib/Config.in32
-rw-r--r--lib/Makefile7
-rw-r--r--lib/zlib_deflate/Makefile18
-rw-r--r--lib/zlib_deflate/deflate.c1250
-rw-r--r--lib/zlib_deflate/deflate_syms.c21
-rw-r--r--lib/zlib_deflate/deftree.c1096
-rw-r--r--lib/zlib_deflate/defutil.h335
-rw-r--r--lib/zlib_inflate/Makefile26
-rw-r--r--lib/zlib_inflate/infblock.c355
-rw-r--r--lib/zlib_inflate/infblock.h44
-rw-r--r--lib/zlib_inflate/infcodes.c204
-rw-r--r--lib/zlib_inflate/infcodes.h33
-rw-r--r--lib/zlib_inflate/inffast.c161
-rw-r--r--lib/zlib_inflate/inffast.h17
-rw-r--r--lib/zlib_inflate/inffixed.h151
-rw-r--r--lib/zlib_inflate/inflate.c382
-rw-r--r--lib/zlib_inflate/inflate_syms.c21
-rw-r--r--lib/zlib_inflate/inftrees.c391
-rw-r--r--lib/zlib_inflate/inftrees.h63
-rw-r--r--lib/zlib_inflate/infutil.c87
-rw-r--r--lib/zlib_inflate/infutil.h197
-rw-r--r--linux/zconf.h90
-rw-r--r--linux/zutil.h126
-rw-r--r--net/sunrpc/sched.c4
-rw-r--r--net/sunrpc/svc.c2
-rw-r--r--scripts/Menuconfig35
211 files changed, 11345 insertions, 16505 deletions
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 8e7276b35868..e40aecd5c0a9 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -219,7 +219,7 @@ prototypes:
locking rules:
All except ->poll() may block.
BKL
-llseek: yes
+llseek: yes (see below)
read: no
write: no
readdir: yes (see below)
@@ -235,6 +235,10 @@ lock: yes
readv: no
writev: no
+->llseek() locking has moved from llseek to the individual llseek
+implementations. If your fs is not using generic_file_llseek, you
+need to acquire and release the BKL in your ->llseek().
+
->open() locking is in-transit: big lock partially moved into the methods.
The only exception is ->open() in the instances of file_operations that never
end up in ->i_fop/->proc_fops, i.e. ones that belong to character devices
diff --git a/Documentation/filesystems/devfs/ChangeLog b/Documentation/filesystems/devfs/ChangeLog
index 8a65ff6090f9..1066a0bbfc6a 100644
--- a/Documentation/filesystems/devfs/ChangeLog
+++ b/Documentation/filesystems/devfs/ChangeLog
@@ -1877,3 +1877,11 @@ Changes for patch v207
- Tag VFS deletable in <devfs_mk_symlink> if handle ignored
- Updated README from master HTML file
+===============================================================================
+Changes for patch v208
+
+- Added KERN_* to remaining messages
+
+- Cleaned up declaration of <stat_read>
+
+- Updated README from master HTML file
diff --git a/Documentation/filesystems/devfs/README b/Documentation/filesystems/devfs/README
index 9f0e99e96740..909d52d378bd 100644
--- a/Documentation/filesystems/devfs/README
+++ b/Documentation/filesystems/devfs/README
@@ -3,7 +3,7 @@ Devfs (Device File System) FAQ
Linux Devfs (Device File System) FAQ
Richard Gooch
-20-JAN-2002
+24-JAN-2002
Document languages:
@@ -1925,10 +1925,6 @@ http://www.atnf.csiro.au/~rgooch/linux/docs/devfs.html
A Korean translation by viatoris@nownuri.net is available at
-http://home.nownuri.net/~viatoris/devfs/devfs.html
-
-A newer version is under construcation at
-
http://viatoris.new21.org/devfs/devfs.html
diff --git a/MAINTAINERS b/MAINTAINERS
index 1c4472ec57ef..e42d0589d53d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -154,8 +154,6 @@ S: Maintained
AD1816 SOUND DRIVER
P: Thorsten Knabe
-M: Thorsten Knabe <tek@rbg.informatik.tu-darmstadt.de>
-M: Thorsten Knabe <tek01@hrzpub.tu-darmstadt.de>
W: http://www.student.informatik.tu-darmstadt.de/~tek/projects/linux.html
W: http://www.tu-darmstadt.de/~tek01/projects/linux.html
S: Maintained
@@ -216,7 +214,6 @@ S: Maintained
ARPD SUPPORT
P: Jonathan Layes
-M: layes@loran.com
L: linux-net@vger.kernel.org
S: Maintained
@@ -235,7 +232,6 @@ S: Maintained
BERKSHIRE PRODUCTS PC WATCHDOG DRIVER
P: Kenji Hollis
-M: kenji@bitgate.com
W: http://ftp.bitgate.com/pcwd/
S: Maintained
@@ -433,13 +429,11 @@ S: Maintained
DIGI INTL. EPCA DRIVER
P: Chad Schwartz
M: support@dgii.com
-M: chads@dgii.com
L: digilnux@dgii.com
S: Maintained
DIGI RIGHTSWITCH NETWORK DRIVER
P: Rick Richardson
-M: rick@remotepoint.com
L: linux-net@vger.kernel.org
W: http://www.dgii.com/linux/
S: Maintained
@@ -485,7 +479,7 @@ S: Maintained
DRM DRIVERS
P: Rik Faith
-M: faith@valinux.com
+M: faith@redhat.com
L: dri-devel@lists.sourceforge.net
S: Supported
@@ -497,7 +491,6 @@ S: Maintained
EATA-DMA SCSI DRIVER
P: Michael Neuffer
-M: mike@i-Connect.Net
L: linux-eata@i-connect.net, linux-scsi@vger.kernel.org
S: Maintained
@@ -927,7 +920,6 @@ S: Maintained
LOGICAL VOLUME MANAGER
P: Heinz Mauelshagen
-M: mge@sistina.de
L: linux-LVM@sistina.com
W: http://www.sistina.com/lvm
S: Maintained
@@ -1134,7 +1126,7 @@ S: Maintained
OLYMPIC NETWORK DRIVER
P: Peter De Shrijver
-M: p2@ace.ulyssis.sutdent.kuleuven.ac.be
+M: p2@ace.ulyssis.student.ac.be
P: Mike Phillips
M: mikep@linuxtr.net
L: linux-net@vger.kernel.org
@@ -1293,7 +1285,6 @@ S: Maintained
RISCOM8 DRIVER
P: Dmitry Gorodchanin
-M: pgmdsg@ibi.com
L: linux-kernel@vger.kernel.org
S: Maintained
@@ -1660,13 +1651,11 @@ W: http://www.kroah.com/linux-usb/
USB SERIAL BELKIN F5U103 DRIVER
P: William Greathouse
M: wgreathouse@smva.com
-M: wgreathouse@myfavoritei.com
L: linux-usb-users@lists.sourceforge.net
L: linux-usb-devel@lists.sourceforge.net
S: Maintained
USB SERIAL CYBERJACK PINPAD/E-COM DRIVER
-M: linux-usb@sii.li
L: linux-usb-users@lists.sourceforge.net
L: linux-usb-devel@lists.sourceforge.net
S: Supported
@@ -1792,7 +1781,6 @@ S: Maintained
ZF MACHZ WATCHDOG
P: Fernando Fuganti
-M: fuganti@conectiva.com.br
M: fuganti@netbank.com.br
W: http://cvs.conectiva.com.br/drivers/ZFL-watchdog/
S: Maintained
diff --git a/Makefile b/Makefile
index 825c3e6551a1..7c729498936f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 5
SUBLEVEL = 3
-EXTRAVERSION =-pre6
+EXTRAVERSION =
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
@@ -170,7 +170,7 @@ DRIVERS-$(CONFIG_ZORRO) += drivers/zorro/driver.o
DRIVERS-$(CONFIG_FC4) += drivers/fc4/fc4.a
DRIVERS-$(CONFIG_ALL_PPC) += drivers/macintosh/macintosh.o
DRIVERS-$(CONFIG_MAC) += drivers/macintosh/macintosh.o
-DRIVERS-$(CONFIG_ISAPNP) += drivers/pnp/pnp.o
+DRIVERS-$(CONFIG_PNP) += drivers/pnp/pnp.o
DRIVERS-$(CONFIG_SGI_IP22) += drivers/sgi/sgi.a
DRIVERS-$(CONFIG_VT) += drivers/video/video.o
DRIVERS-$(CONFIG_PARIDE) += drivers/block/paride/paride.a
diff --git a/arch/alpha/kernel/check_asm.c b/arch/alpha/kernel/check_asm.c
index 68203d926446..9685fa6cdf12 100644
--- a/arch/alpha/kernel/check_asm.c
+++ b/arch/alpha/kernel/check_asm.c
@@ -12,13 +12,13 @@ int main()
printf("#define TASK_FLAGS %ld\n",
(long)offsetof(struct task_struct, flags));
printf("#define TASK_SIGPENDING %ld\n",
- (long)offsetof(struct task_struct, sigpending));
+#error (long)offsetof(struct task_struct, sigpending));
printf("#define TASK_ADDR_LIMIT %ld\n",
(long)offsetof(struct task_struct, addr_limit));
printf("#define TASK_EXEC_DOMAIN %ld\n",
(long)offsetof(struct task_struct, exec_domain));
printf("#define TASK_NEED_RESCHED %ld\n",
- (long)offsetof(struct task_struct, need_resched));
+#error (long)offsetof(struct task_struct, work.need_resched));
printf("#define TASK_SIZE %ld\n", sizeof(struct task_struct));
printf("#define STACK_SIZE %ld\n", sizeof(union task_union));
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index a54bcf7dc651..04533d30037b 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -30,11 +30,11 @@
*/
#define TASK_STATE 0
#define TASK_FLAGS 8
-#define TASK_SIGPENDING 16
+#error #define TASK_SIGPENDING 16
#define TASK_ADDR_LIMIT 24
#define TASK_EXEC_DOMAIN 32
-#define TASK_NEED_RESCHED 40
-#define TASK_PTRACE 48
+#error #define TASK_NEED_RESCHED 40
+#error #define TASK_PTRACE 48
#define TASK_PROCESSOR 100
/*
@@ -580,11 +580,11 @@ ret_from_sys_call:
and $0,8,$0
beq $0,restore_all
ret_from_reschedule:
- ldq $2,TASK_NEED_RESCHED($8)
+#error ldq $2,TASK_NEED_RESCHED($8)
lda $4,init_task_union
bne $2,reschedule
xor $4,$8,$4
- ldl $5,TASK_SIGPENDING($8)
+#error ldl $5,TASK_SIGPENDING($8)
beq $4,restore_all
bne $5,signal_return
restore_all:
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 708220d6da99..68fc72ecdd0a 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -82,9 +82,9 @@ cpu_idle(void)
/* Although we are an idle CPU, we do not want to
get into the scheduler unnecessarily. */
- long oldval = xchg(&current->need_resched, -1UL);
+ long oldval = xchg(&current->work.need_resched, -1UL);
if (!oldval)
- while (current->need_resched < 0);
+ while (current->work.need_resched < 0);
schedule();
check_pgt_cache();
}
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 2947d32a8a3d..a10389665262 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -34,8 +34,8 @@ ENTRY(__do_softirq)
* stack.
*/
ret_fast_syscall:
- ldr r1, [tsk, #TSK_NEED_RESCHED]
- ldr r2, [tsk, #TSK_SIGPENDING]
+#error ldr r1, [tsk, #TSK_NEED_RESCHED]
+#error ldr r2, [tsk, #TSK_SIGPENDING]
teq r1, #0 @ need_resched || sigpending
teqeq r2, #0
bne slow
@@ -54,8 +54,8 @@ reschedule:
bl SYMBOL_NAME(schedule)
ENTRY(ret_to_user)
ret_slow_syscall:
- ldr r1, [tsk, #TSK_NEED_RESCHED]
- ldr r2, [tsk, #TSK_SIGPENDING]
+#error ldr r1, [tsk, #TSK_NEED_RESCHED]
+#error ldr r2, [tsk, #TSK_SIGPENDING]
1: teq r1, #0 @ need_resched => schedule()
bne reschedule
teq r2, #0 @ sigpending => do_signal()
@@ -66,7 +66,7 @@ __do_signal:
mov r0, #0 @ NULL 'oldset'
mov r1, sp @ 'regs'
mov r2, why @ 'syscall'
- b SYMBOL_NAME(do_signal) @ note the bl above sets lr
+#error b SYMBOL_NAME(do_signal) @ note the bl above sets lr
/*
* This is how we return from a fork. __switch_to will be calling us
@@ -82,7 +82,7 @@ ENTRY(ret_from_fork)
beq ret_slow_syscall
mov r1, sp
mov r0, #1 @ trace exit [IP = 1]
- bl SYMBOL_NAME(syscall_trace)
+#error bl SYMBOL_NAME(syscall_trace)
b ret_slow_syscall
@@ -160,7 +160,7 @@ ENTRY(vector_swi)
__sys_trace:
add r1, sp, #S_OFF
mov r0, #0 @ trace entry [IP = 0]
- bl SYMBOL_NAME(syscall_trace)
+#error bl SYMBOL_NAME(syscall_trace)
adrsvc al, lr, __sys_trace_return @ return address
add r1, sp, #S_R0 + S_OFF @ pointer to regs
@@ -173,7 +173,7 @@ __sys_trace_return:
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
mov r1, sp
mov r0, #1 @ trace exit [IP = 1]
- bl SYMBOL_NAME(syscall_trace)
+#error bl SYMBOL_NAME(syscall_trace)
b ret_slow_syscall
.align 5
diff --git a/arch/arm/tools/getconstants.c b/arch/arm/tools/getconstants.c
index 6b9eab4c5e9b..b75ba79e384d 100644
--- a/arch/arm/tools/getconstants.c
+++ b/arch/arm/tools/getconstants.c
@@ -37,10 +37,10 @@
void func(void)
{
-DEFN("TSK_SIGPENDING", OFF_TSK(sigpending));
+#error DEFN("TSK_SIGPENDING", OFF_TSK(sigpending));
DEFN("TSK_ADDR_LIMIT", OFF_TSK(addr_limit));
-DEFN("TSK_NEED_RESCHED", OFF_TSK(need_resched));
-DEFN("TSK_PTRACE", OFF_TSK(ptrace));
+#error DEFN("TSK_NEED_RESCHED", OFF_TSK(need_resched));
+#error DEFN("TSK_PTRACE", OFF_TSK(ptrace));
DEFN("TSK_USED_MATH", OFF_TSK(used_math));
DEFN("TSS_SAVE", OFF_TSK(thread.save));
diff --git a/arch/cris/kernel/entryoffsets.c b/arch/cris/kernel/entryoffsets.c
index 01b804699a52..4084a63c86e5 100644
--- a/arch/cris/kernel/entryoffsets.c
+++ b/arch/cris/kernel/entryoffsets.c
@@ -33,9 +33,9 @@ void NAME ## _fun (void) \
VAL (NAME, offsetof (TYPE, MEMBER))
/* task_struct offsets. */
-OF (LTASK_SIGPENDING, struct task_struct, sigpending)
-OF (LTASK_NEEDRESCHED, struct task_struct, need_resched)
-OF (LTASK_PTRACE, struct task_struct, ptrace)
+#error OF (LTASK_SIGPENDING, struct task_struct, sigpending)
+#error OF (LTASK_NEEDRESCHED, struct task_struct, need_resched)
+#error OF (LTASK_PTRACE, struct task_struct, ptrace)
OF (LTASK_PID, struct task_struct, pid)
/* pt_regs offsets. */
diff --git a/arch/i386/defconfig b/arch/i386/defconfig
index d045155c8bd3..48a29fe45a46 100644
--- a/arch/i386/defconfig
+++ b/arch/i386/defconfig
@@ -681,7 +681,6 @@ CONFIG_LOCKD=y
# CONFIG_NCPFS_NLS is not set
# CONFIG_NCPFS_EXTRAS is not set
# CONFIG_ZISOFS_FS is not set
-# CONFIG_ZLIB_FS_INFLATE is not set
#
# Partition Types
@@ -846,3 +845,5 @@ CONFIG_USB_STORAGE=y
# Library routines
#
# CONFIG_CRC32 is not set
+# CONFIG_ZLIB_INFLATE is not set
+# CONFIG_ZLIB_DEFLATE is not set
diff --git a/arch/i386/kernel/bluesmoke.c b/arch/i386/kernel/bluesmoke.c
index 0aa4b209cf74..63db0ab54101 100644
--- a/arch/i386/kernel/bluesmoke.c
+++ b/arch/i386/kernel/bluesmoke.c
@@ -47,7 +47,7 @@ static void intel_machine_check(struct pt_regs * regs, long error_code)
{
rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
printk(" at %08x%08x",
- high, low);
+ ahigh, alow);
}
printk("\n");
/* Clear it */
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index ead91fcb66ef..2544f3ddf9b4 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -72,10 +72,13 @@ VM_MASK = 0x00020000
*/
state = 0
flags = 4
-sigpending = 8
+work = 8
+need_resched = work+0
+syscall_trace = work+1
+sigpending = work+2
+notify_resume = work+3
addr_limit = 12
exec_domain = 16
-need_resched = 20
tsk_ptrace = 24
cpu = 32
@@ -151,7 +154,7 @@ ENTRY(lcall7)
call *%edx
addl $4, %esp
popl %eax
- jmp ret_from_sys_call
+ jmp resume_userspace
ENTRY(lcall27)
pushfl # We get a different stack layout with call gates,
@@ -172,7 +175,7 @@ ENTRY(lcall27)
call *%edx
addl $4, %esp
popl %eax
- jmp ret_from_sys_call
+ jmp resume_userspace
ENTRY(ret_from_fork)
@@ -180,9 +183,7 @@ ENTRY(ret_from_fork)
call SYMBOL_NAME(schedule_tail)
addl $4, %esp
GET_CURRENT(%ebx)
- testb $0x02,tsk_ptrace(%ebx) # PT_TRACESYS
- jne tracesys_exit
- jmp ret_from_sys_call
+ jmp syscall_exit
/*
* Return to user mode is not as complex as all this looks,
@@ -191,73 +192,105 @@ ENTRY(ret_from_fork)
* less clear than it otherwise should be.
*/
+ # userspace resumption stub bypassing syscall exit tracing
+ ALIGN
+ENTRY(ret_from_intr)
+ GET_CURRENT(%ebx)
+ret_from_exception:
+ movl EFLAGS(%esp),%eax # mix EFLAGS and CS
+ movb CS(%esp),%al
+ testl $(VM_MASK | 3),%eax
+ jz restore_all # returning to kernel-space or vm86-space
+ENTRY(resume_userspace)
+ cli # make sure need_resched and sigpending don't change
+ # between sampling and the iret
+ movl work(%ebx),%ecx
+ andl $0xffff00ff,%ecx # current->work (ignoring syscall_trace)
+ jne work_pending
+ jmp restore_all
+
+ # system call handler stub
+ ALIGN
ENTRY(system_call)
pushl %eax # save orig_eax
SAVE_ALL
GET_CURRENT(%ebx)
- testb $0x02,tsk_ptrace(%ebx) # PT_TRACESYS
- jne tracesys
cmpl $(NR_syscalls),%eax
- jae badsys
+ jae syscall_badsys
+ testb $0xff,syscall_trace(%ebx) # system call tracing in operation
+ jnz syscall_trace_entry
+syscall_traced:
call *SYMBOL_NAME(sys_call_table)(,%eax,4)
- movl %eax,EAX(%esp) # save the return value
-ENTRY(ret_from_sys_call)
- cli # need_resched and signals atomic test
- cmpl $0,need_resched(%ebx)
- jne reschedule
- cmpl $0,sigpending(%ebx)
- jne signal_return
+ movl %eax,EAX(%esp) # store the return value
+syscall_exit:
+ cli # make sure need_resched and sigpending don't change
+ # between sampling and the iret
+ movl work(%ebx),%ecx
+ testl %ecx,%ecx # current->work
+ jne syscall_exit_work
restore_all:
RESTORE_ALL
+ # perform work that needs to be done immediately before resumption
ALIGN
-signal_return:
- sti # we can get here from an interrupt handler
+work_pending:
+ testb %cl,%cl # current->work.need_resched
+ jz work_notifysig
+work_resched:
+ call SYMBOL_NAME(schedule)
+ cli # make sure need_resched and sigpending don't change
+ # between sampling and the iret
+ movl work(%ebx),%ecx
+ andl $0xffff00ff,%ecx # ignore the syscall trace counter
+ jz restore_all
+ testb %cl,%cl # current->work.need_resched
+ jnz work_resched
+
+work_notifysig: # deal with pending signals and notify-resume requests
testl $(VM_MASK),EFLAGS(%esp)
movl %esp,%eax
- jne v86_signal_return
+ jne work_notifysig_v86 # returning to kernel-space or vm86-space
xorl %edx,%edx
- call SYMBOL_NAME(do_signal)
+ call SYMBOL_NAME(do_notify_resume)
jmp restore_all
ALIGN
-v86_signal_return:
+work_notifysig_v86:
+ pushl %ecx
call SYMBOL_NAME(save_v86_state)
+ popl %ecx
movl %eax,%esp
xorl %edx,%edx
- call SYMBOL_NAME(do_signal)
+ call SYMBOL_NAME(do_notify_resume)
jmp restore_all
+ # perform syscall exit tracing
ALIGN
-tracesys:
+syscall_trace_entry:
movl $-ENOSYS,EAX(%esp)
- call SYMBOL_NAME(syscall_trace)
+ movl %esp,%eax
+ xorl %edx,%edx
+ call SYMBOL_NAME(do_syscall_trace)
movl ORIG_EAX(%esp),%eax
cmpl $(NR_syscalls),%eax
- jae tracesys_exit
- call *SYMBOL_NAME(sys_call_table)(,%eax,4)
- movl %eax,EAX(%esp) # save the return value
-tracesys_exit:
- call SYMBOL_NAME(syscall_trace)
- jmp ret_from_sys_call
-badsys:
- movl $-ENOSYS,EAX(%esp)
- jmp ret_from_sys_call
+ jnae syscall_traced
+ jmp syscall_exit
+ # perform syscall exit tracing
ALIGN
-ENTRY(ret_from_intr)
- GET_CURRENT(%ebx)
-ret_from_exception:
- movl EFLAGS(%esp),%eax # mix EFLAGS and CS
- movb CS(%esp),%al
- testl $(VM_MASK | 3),%eax # return to VM86 mode or non-supervisor?
- jne ret_from_sys_call
- jmp restore_all
+syscall_exit_work:
+ testb %ch,%ch # current->work.syscall_trace
+ jz work_pending
+ sti # could let do_syscall_trace() call schedule() instead
+ movl %esp,%eax
+ movl $1,%edx
+ call SYMBOL_NAME(do_syscall_trace)
+ jmp resume_userspace
ALIGN
-reschedule:
- call SYMBOL_NAME(schedule) # test
- jmp ret_from_sys_call
+syscall_badsys:
+ movl $-ENOSYS,EAX(%esp)
+ jmp resume_userspace
ENTRY(divide_error)
pushl $0 # no error code
@@ -622,6 +655,18 @@ ENTRY(sys_call_table)
.long SYMBOL_NAME(sys_ni_syscall) /* Reserved for Security */
.long SYMBOL_NAME(sys_gettid)
.long SYMBOL_NAME(sys_readahead) /* 225 */
+ .long SYMBOL_NAME(sys_setxattr)
+ .long SYMBOL_NAME(sys_lsetxattr)
+ .long SYMBOL_NAME(sys_fsetxattr)
+ .long SYMBOL_NAME(sys_getxattr)
+ .long SYMBOL_NAME(sys_lgetxattr) /* 230 */
+ .long SYMBOL_NAME(sys_fgetxattr)
+ .long SYMBOL_NAME(sys_listxattr)
+ .long SYMBOL_NAME(sys_llistxattr)
+ .long SYMBOL_NAME(sys_flistxattr)
+ .long SYMBOL_NAME(sys_removexattr) /* 235 */
+ .long SYMBOL_NAME(sys_lremovexattr)
+ .long SYMBOL_NAME(sys_fremovexattr)
.rept NR_syscalls-(.-sys_call_table)/4
.long SYMBOL_NAME(sys_ni_syscall)
diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S
index 2705c8058451..b52e143f38de 100644
--- a/arch/i386/kernel/head.S
+++ b/arch/i386/kernel/head.S
@@ -445,6 +445,16 @@ ENTRY(gdt_table)
.quad 0x00409a0000000000 /* 0x48 APM CS code */
.quad 0x00009a0000000000 /* 0x50 APM CS 16 code (16 bit) */
.quad 0x0040920000000000 /* 0x58 APM DS data */
+ /* Segments used for calling PnP BIOS */
+ .quad 0x00c09a0000000000 /* 0x60 32-bit code */
+ .quad 0x00809a0000000000 /* 0x68 16-bit code */
+ .quad 0x0080920000000000 /* 0x70 16-bit data */
+ .quad 0x0080920000000000 /* 0x78 16-bit data */
+ .quad 0x0080920000000000 /* 0x80 16-bit data */
+ .quad 0x0000000000000000 /* 0x88 not used */
+ .quad 0x0000000000000000 /* 0x90 not used */
+ .quad 0x0000000000000000 /* 0x98 not used */
+ /* Per CPU segments */
.fill NR_CPUS*4,8,0 /* space for TSS's and LDT's */
/*
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 558c9d6030cf..50bd8e2ab646 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -89,7 +89,7 @@ static void default_idle(void)
/*
* On SMP it's slightly faster (but much more power-consuming!)
- * to poll the ->need_resched flag instead of waiting for the
+ * to poll the ->work.need_resched flag instead of waiting for the
* cross-CPU IPI to arrive. Use this option with caution.
*/
static void poll_idle (void)
@@ -102,15 +102,15 @@ static void poll_idle (void)
* Deal with another CPU just having chosen a thread to
* run here:
*/
- oldval = xchg(&current->need_resched, -1);
+ oldval = xchg(&current->work.need_resched, -1);
if (!oldval)
asm volatile(
"2:"
- "cmpl $-1, %0;"
+ "cmpb $-1, %0;"
"rep; nop;"
"je 2b;"
- : :"m" (current->need_resched));
+ : :"m" (current->work.need_resched));
}
/*
diff --git a/arch/i386/kernel/ptrace.c b/arch/i386/kernel/ptrace.c
index 0fe86897fb0d..96466d39a76c 100644
--- a/arch/i386/kernel/ptrace.c
+++ b/arch/i386/kernel/ptrace.c
@@ -277,10 +277,18 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
ret = -EIO;
if ((unsigned long) data > _NSIG)
break;
- if (request == PTRACE_SYSCALL)
- child->ptrace |= PT_TRACESYS;
- else
- child->ptrace &= ~PT_TRACESYS;
+ if (request == PTRACE_SYSCALL) {
+ if (!(child->ptrace & PT_SYSCALLTRACE)) {
+ child->ptrace |= PT_SYSCALLTRACE;
+ child->work.syscall_trace++;
+ }
+ }
+ else {
+ if (child->ptrace & PT_SYSCALLTRACE) {
+ child->ptrace &= ~PT_SYSCALLTRACE;
+ child->work.syscall_trace--;
+ }
+ }
child->exit_code = data;
/* make sure the single step bit is not set. */
tmp = get_stack_long(child, EFL_OFFSET) & ~TRAP_FLAG;
@@ -315,7 +323,10 @@ asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
ret = -EIO;
if ((unsigned long) data > _NSIG)
break;
- child->ptrace &= ~PT_TRACESYS;
+ if (child->ptrace & PT_SYSCALLTRACE) {
+ child->ptrace &= ~PT_SYSCALLTRACE;
+ child->work.syscall_trace--;
+ }
if ((child->ptrace & PT_DTRACE) == 0) {
/* Spurious delayed TF traps may occur */
child->ptrace |= PT_DTRACE;
@@ -439,10 +450,14 @@ out:
return ret;
}
-asmlinkage void syscall_trace(void)
+/* notification of system call entry/exit
+ * - triggered by current->work.syscall_trace
+ */
+__attribute__((regparm(3)))
+void do_syscall_trace(struct pt_regs *regs, int entryexit)
{
- if ((current->ptrace & (PT_PTRACED|PT_TRACESYS)) !=
- (PT_PTRACED|PT_TRACESYS))
+ if ((current->ptrace & (PT_PTRACED|PT_SYSCALLTRACE)) !=
+ (PT_PTRACED|PT_SYSCALLTRACE))
return;
/* the 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */
@@ -461,3 +476,15 @@ asmlinkage void syscall_trace(void)
current->exit_code = 0;
}
}
+
+/* notification of userspace execution resumption
+ * - triggered by current->work.notify_resume
+ */
+__attribute__((regparm(3)))
+void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
+ struct task_work work_pending)
+{
+ /* deal with pending signal delivery */
+ if (work_pending.sigpending)
+ do_signal(regs,oldset);
+}
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c
index dd99d9dfe0df..7cfa09402361 100644
--- a/arch/i386/kernel/signal.c
+++ b/arch/i386/kernel/signal.c
@@ -28,8 +28,6 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-int FASTCALL(do_signal(struct pt_regs *regs, sigset_t *oldset));
-
int copy_siginfo_to_user(siginfo_t *to, siginfo_t *from)
{
if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index f0d730fd9d5b..130c45eabb17 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -271,6 +271,22 @@ static void inline do_trap(int trapnr, int signr, char *str, int vm86,
{
if (vm86 && regs->eflags & VM_MASK)
goto vm86_trap;
+
+#ifdef CONFIG_PNPBIOS
+ if (regs->xcs == 0x60 || regs->xcs == 0x68)
+ {
+ extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
+ extern u32 pnp_bios_is_utter_crap;
+ pnp_bios_is_utter_crap = 1;
+ printk(KERN_CRIT "PNPBIOS fault.. attempting recovery.\n");
+ __asm__ volatile(
+ "movl %0, %%esp\n\t"
+ "jmp %1\n\t"
+ : "=a" (pnp_bios_fault_esp), "=b" (pnp_bios_fault_eip));
+ panic("do_trap: can't hit this");
+ }
+#endif
+
if (!(regs->xcs & 3))
goto kernel_trap;
diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c
index 56b1e3a89e09..8fde50bffcc8 100644
--- a/arch/i386/kernel/vm86.c
+++ b/arch/i386/kernel/vm86.c
@@ -212,7 +212,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
info->regs.__null_ds = 0;
info->regs.__null_es = 0;
-/* we are clearing fs,gs later just before "jmp ret_from_sys_call",
+/* we are clearing fs,gs later just before "jmp resume_userspace",
* because starting with Linux 2.1.x they aren't no longer saved/restored
*/
@@ -255,7 +255,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
__asm__ __volatile__(
"xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs\n\t"
"movl %0,%%esp\n\t"
- "jmp ret_from_sys_call"
+ "jmp resume_userspace"
: /* no outputs */
:"r" (&info->regs), "b" (tsk) : "ax");
/* we never return here */
@@ -268,7 +268,7 @@ static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval)
regs32 = save_v86_state(regs16);
regs32->eax = retval;
__asm__ __volatile__("movl %0,%%esp\n\t"
- "jmp ret_from_sys_call"
+ "jmp resume_userspace"
: : "r" (regs32), "b" (current));
}
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 07099adb1ea0..4b88d01bb8fc 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -433,7 +433,7 @@ GLOBAL_ENTRY(invoke_syscall_trace)
.body
mov loc2=b6
;;
- br.call.sptk.many rp=syscall_trace
+#error br.call.sptk.many rp=syscall_trace
.ret3: mov rp=loc0
mov ar.pfs=loc1
mov b6=loc2
@@ -454,7 +454,7 @@ END(invoke_syscall_trace)
GLOBAL_ENTRY(ia64_trace_syscall)
PT_REGS_UNWIND_INFO(0)
- br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch syscall args
+#error br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch syscall args
.ret6: br.call.sptk.many rp=b6 // do the syscall
strace_check_retval:
cmp.lt p6,p0=r8,r0 // syscall failed?
@@ -467,7 +467,7 @@ strace_save_retval:
.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
ia64_strace_leave_kernel:
- br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch return value
+#error br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch return value
.rety: br.cond.sptk ia64_leave_kernel
strace_error:
@@ -530,14 +530,14 @@ GLOBAL_ENTRY(ia64_leave_kernel)
#ifdef CONFIG_PERFMON
(pUser) ld8 r19=[r19] // load current->thread.pfm_must_block
#endif
-(pUser) ld8 r17=[r17] // load current->need_resched
-(pUser) ld4 r18=[r18] // load current->sigpending
+#error (pUser) ld8 r17=[r17] // load current->need_resched
+#error (pUser) ld4 r18=[r18] // load current->sigpending
;;
#ifdef CONFIG_PERFMON
(pUser) cmp.ne.unc p9,p0=r19,r0 // current->thread.pfm_must_block != 0?
#endif
-(pUser) cmp.ne.unc p7,p0=r17,r0 // current->need_resched != 0?
-(pUser) cmp.ne.unc p8,p0=r18,r0 // current->sigpending != 0?
+#error (pUser) cmp.ne.unc p7,p0=r17,r0 // current->need_resched != 0?
+#errror (pUser) cmp.ne.unc p8,p0=r18,r0 // current->sigpending != 0?
;;
adds r2=PT(R8)+16,r12
adds r3=PT(R9)+16,r12
@@ -816,7 +816,7 @@ ENTRY(handle_signal_delivery)
.spillpsp ar.unat, 16 // (note that offset is relative to psp+0x10!)
st8 [sp]=r9,-16 // allocate space for ar.unat and save it
.body
- br.call.sptk.many rp=ia64_do_signal
+#error br.call.sptk.many rp=ia64_do_signal
.ret15: .restore sp
adds sp=16,sp // pop scratch stack space
;;
diff --git a/arch/ia64/tools/print_offsets.c b/arch/ia64/tools/print_offsets.c
index 29f733241595..d1b9727b6242 100644
--- a/arch/ia64/tools/print_offsets.c
+++ b/arch/ia64/tools/print_offsets.c
@@ -51,9 +51,9 @@ tab[] =
{ "SIGFRAME_SIZE", sizeof (struct sigframe) },
{ "UNW_FRAME_INFO_SIZE", sizeof (struct unw_frame_info) },
{ "", 0 }, /* spacer */
- { "IA64_TASK_PTRACE_OFFSET", offsetof (struct task_struct, ptrace) },
- { "IA64_TASK_SIGPENDING_OFFSET", offsetof (struct task_struct, sigpending) },
- { "IA64_TASK_NEED_RESCHED_OFFSET", offsetof (struct task_struct, need_resched) },
+#error { "IA64_TASK_PTRACE_OFFSET", offsetof (struct task_struct, ptrace) },
+#error { "IA64_TASK_SIGPENDING_OFFSET", offsetof (struct task_struct, sigpending) },
+#error { "IA64_TASK_NEED_RESCHED_OFFSET", offsetof (struct task_struct, need_resched) },
{ "IA64_TASK_PROCESSOR_OFFSET", offsetof (struct task_struct, processor) },
{ "IA64_TASK_THREAD_OFFSET", offsetof (struct task_struct, thread) },
{ "IA64_TASK_THREAD_KSP_OFFSET", offsetof (struct task_struct, thread.ksp) },
diff --git a/arch/m68k/kernel/m68k_defs.c b/arch/m68k/kernel/m68k_defs.c
index 817583a7d4c8..42873ab0e674 100644
--- a/arch/m68k/kernel/m68k_defs.c
+++ b/arch/m68k/kernel/m68k_defs.c
@@ -25,8 +25,8 @@ int main(void)
DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
- DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, sigpending));
- DEFINE(TASK_NEEDRESCHED, offsetof(struct task_struct, need_resched));
+#error DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, work.sigpending));
+#error DEFINE(TASK_NEEDRESCHED, offsetof(struct task_struct, work.need_resched));
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index b0675218abe2..3d816c8bc385 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -36,7 +36,7 @@
EXPORT(ret_from_fork)
move a0, v0 # prev
jal schedule_tail
- lw t0, TASK_PTRACE($28) # syscall tracing enabled?
+#error lw t0, TASK_PTRACE($28) # syscall tracing enabled?
andi t0, PT_TRACESYS
bnez t0, tracesys_exit
j ret_from_sys_call
@@ -62,8 +62,8 @@ EXPORT(ret_from_sys_call)
mtc0 t0, CP0_STATUS
nop; nop; nop
- lw v0, TASK_NEED_RESCHED($28)
- lw v1, TASK_SIGPENDING($28)
+#error lw v0, TASK_NEED_RESCHED($28)
+#error lw v1, TASK_SIGPENDING($28)
bnez v0, reschedule
bnez v1, signal_return
restore_all: .set noat
@@ -80,7 +80,7 @@ signal_return:
move a0, zero
move a1, sp
- jal do_signal
+#error jal do_signal
b restore_all
/*
diff --git a/arch/mips/kernel/scall_o32.S b/arch/mips/kernel/scall_o32.S
index 3bff5792be1c..7df9bd8bbbdb 100644
--- a/arch/mips/kernel/scall_o32.S
+++ b/arch/mips/kernel/scall_o32.S
@@ -49,7 +49,7 @@ NESTED(handle_sys, PT_SIZE, sp)
stack_done:
sw a3, PT_R26(sp) # save for syscall restart
- lw t0, TASK_PTRACE($28) # syscall tracing enabled?
+#error lw t0, TASK_PTRACE($28) # syscall tracing enabled?
andi t0, PT_TRACESYS
bnez t0, trace_a_syscall
@@ -70,9 +70,9 @@ EXPORT(o32_ret_from_sys_call)
xori t0, t0, 1
mtc0 t0, CP0_STATUS
- lw t2, TASK_NEED_RESCHED($28)
+#error lw t2, TASK_NEED_RESCHED($28)
bnez t2, o32_reschedule
- lw v0, TASK_SIGPENDING($28)
+#error lw v0, TASK_SIGPENDING($28)
bnez v0, signal_return
restore_all:
RESTORE_SOME
@@ -88,7 +88,7 @@ signal_return:
move a0, zero
move a1, sp
- jal do_signal
+#error jal do_signal
b restore_all
o32_reschedule:
@@ -101,7 +101,7 @@ o32_reschedule:
trace_a_syscall:
SAVE_STATIC
sw t2, PT_R1(sp)
- jal syscall_trace
+#error jal syscall_trace
lw t2, PT_R1(sp)
lw a0, PT_R4(sp) # Restore argument registers
@@ -119,7 +119,7 @@ trace_a_syscall:
sw v0, PT_R0(sp) # set flag for syscall restarting
1: sw v0, PT_R2(sp) # result
- jal syscall_trace
+#error jal syscall_trace
j ret_from_sys_call
/* ------------------------------------------------------------------------ */
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 48085d3155d7..b94a75eb2a6c 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -179,7 +179,7 @@ void __init smp_commence(void)
static void reschedule_this_cpu(void *dummy)
{
- current->need_resched = 1;
+ current->work.need_resched = 1;
}
void FASTCALL(smp_send_reschedule(int cpu))
diff --git a/arch/mips/tools/offset.c b/arch/mips/tools/offset.c
index 7df4a6c1dcc0..110554358e2d 100644
--- a/arch/mips/tools/offset.c
+++ b/arch/mips/tools/offset.c
@@ -79,9 +79,9 @@ void output_task_defines(void)
text("/* MIPS task_struct offsets. */");
offset("#define TASK_STATE ", struct task_struct, state);
offset("#define TASK_FLAGS ", struct task_struct, flags);
- offset("#define TASK_SIGPENDING ", struct task_struct, sigpending);
- offset("#define TASK_NEED_RESCHED ", struct task_struct, need_resched);
- offset("#define TASK_PTRACE ", struct task_struct, ptrace);
+#error offset("#define TASK_SIGPENDING ", struct task_struct, work.sigpending);
+#error offset("#define TASK_NEED_RESCHED ", struct task_struct, work.need_resched);
+#error offset("#define TASK_PTRACE ", struct task_struct, ptrace);
offset("#define TASK_COUNTER ", struct task_struct, counter);
offset("#define TASK_NICE ", struct task_struct, nice);
offset("#define TASK_MM ", struct task_struct, mm);
diff --git a/arch/mips64/kernel/entry.S b/arch/mips64/kernel/entry.S
index a6ca5a4f2a42..14ace8b60301 100644
--- a/arch/mips64/kernel/entry.S
+++ b/arch/mips64/kernel/entry.S
@@ -17,7 +17,7 @@
#include <asm/stackframe.h>
/* This duplicates the definition from <linux/sched.h> */
-#define PT_TRACESYS 0x00000002 /* tracing system calls */
+#error #define PT_TRACESYS 0x00000002 /* tracing system calls */
#define KU_USER 0x10
@@ -26,8 +26,8 @@
FEXPORT(ret_from_fork)
move a0, v0 # prev
jal schedule_tail
- lw t0, TASK_PTRACE($28) # syscall tracing enabled?
- andi t0, PT_TRACESYS
+#error lw t0, TASK_PTRACE($28) # syscall tracing enabled?
+#error andi t0, PT_TRACESYS
bnez t0, tracesys_exit
j ret_from_sys_call
@@ -50,8 +50,8 @@ FEXPORT(ret_from_sys_call)
xori t0, t0, 1
mtc0 t0, CP0_STATUS
- ld v0, TASK_NEED_RESCHED($28)
- lw v1, TASK_SIGPENDING($28)
+#error ld v0, TASK_NEED_RESCHED($28)
+#error lw v1, TASK_SIGPENDING($28)
bnez v0, reschedule
bnez v1, signal_return
diff --git a/arch/mips64/kernel/scall_64.S b/arch/mips64/kernel/scall_64.S
index 8b19f20e0fa1..70b70cd7caa7 100644
--- a/arch/mips64/kernel/scall_64.S
+++ b/arch/mips64/kernel/scall_64.S
@@ -69,9 +69,9 @@ ret_from_sys_call:
ori t0, t0, 1
mtc0 t0, CP0_STATUS
- ld t2, TASK_NEED_RESCHED($28)
+#error ld t2, TASK_NEED_RESCHED($28)
bnez t2, reschedule
- lw v0, TASK_SIGPENDING($28)
+#error lw v0, TASK_SIGPENDING($28)
bnez v0, signal_return
restore_all:
diff --git a/arch/mips64/kernel/scall_o32.S b/arch/mips64/kernel/scall_o32.S
index 600d2f3d91aa..00a45caa4548 100644
--- a/arch/mips64/kernel/scall_o32.S
+++ b/arch/mips64/kernel/scall_o32.S
@@ -22,7 +22,7 @@
#include <asm/sysmips.h>
/* This duplicates the definition from <linux/sched.h> */
-#define PT_TRACESYS 0x00000002 /* tracing system calls */
+#error #define PT_TRACESYS 0x00000002 /* tracing system calls */
/* This duplicates the definition from <asm/signal.h> */
#define SIGILL 4 /* Illegal instruction (ANSI). */
@@ -54,8 +54,8 @@ NESTED(handle_sys, PT_SIZE, sp)
bgez t0, stackargs
stack_done:
- ld t0, TASK_PTRACE($28) # syscall tracing enabled?
- andi t0, PT_TRACESYS
+#error ld t0, TASK_PTRACE($28) # syscall tracing enabled?
+#error andi t0, PT_TRACESYS
bnez t0, trace_a_syscall
jalr t2 # Do The Real Thing (TM)
@@ -75,9 +75,9 @@ FEXPORT(o32_ret_from_sys_call)
xori t0, t0, 1
mtc0 t0, CP0_STATUS
- ld t2, TASK_NEED_RESCHED($28)
+#error ld t2, TASK_NEED_RESCHED($28)
bnez t2, o32_reschedule
- lw v0, TASK_SIGPENDING($28)
+#error lw v0, TASK_SIGPENDING($28)
bnez v0, signal_return
restore_all: RESTORE_SOME
@@ -92,7 +92,7 @@ signal_return: mfc0 t0, CP0_STATUS # need_resched and signals atomic test
move a0, zero
move a1, sp
SAVE_STATIC
- jal do_signal
+#error jal do_signal
o32_reschedule:
SAVE_STATIC
@@ -130,7 +130,7 @@ trace_a_syscall:
sd v0, PT_R0(sp) # set flag for syscall restarting
1: sd v0, PT_R2(sp) # result
- jal syscall_trace
+#error jal syscall_trace
j o32_ret_from_sys_call
/* ------------------------------------------------------------------------ */
@@ -213,13 +213,13 @@ illegal_syscall:
1: sd v0, PT_R2(sp) # result
/* Success, so skip usual error handling garbage. */
- ld t0, TASK_PTRACE($28) # syscall tracing enabled?
- andi t0, PT_TRACESYS
+#error ld t0, TASK_PTRACE($28) # syscall tracing enabled?
+#error andi t0, PT_TRACESYS
bnez t0, 1f
b o32_ret_from_sys_call
1: SAVE_STATIC
- jal syscall_trace
+#error jal syscall_trace
li a3, 0 # success
j ret_from_sys_call
diff --git a/arch/mips64/tools/offset.c b/arch/mips64/tools/offset.c
index 37324533c935..3b31d13c00a3 100644
--- a/arch/mips64/tools/offset.c
+++ b/arch/mips64/tools/offset.c
@@ -76,9 +76,9 @@ void output_task_defines(void)
text("/* MIPS task_struct offsets. */");
offset("#define TASK_STATE ", struct task_struct, state);
offset("#define TASK_FLAGS ", struct task_struct, flags);
- offset("#define TASK_SIGPENDING ", struct task_struct, sigpending);
- offset("#define TASK_NEED_RESCHED ", struct task_struct, need_resched);
- offset("#define TASK_PTRACE ", struct task_struct, ptrace);
+#error offset("#define TASK_SIGPENDING ", struct task_struct, sigpending);
+#error offset("#define TASK_NEED_RESCHED ", struct task_struct, need_resched);
+#error offset("#define TASK_PTRACE ", struct task_struct, ptrace);
offset("#define TASK_COUNTER ", struct task_struct, counter);
offset("#define TASK_NICE ", struct task_struct, nice);
offset("#define TASK_MM ", struct task_struct, mm);
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 0839e0bbb4f2..86bfbddb45a4 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -650,14 +650,14 @@ intr_check_resched:
copy %r30,%r1
/* FIXME! depi below has hardcoded dependency on kernel stack size */
depi 0,31,14,%r1 /* get task pointer */
- LDREG TASK_NEED_RESCHED(%r1),%r19 /* sched.h: long need_resched */
+#error LDREG TASK_NEED_RESCHED(%r1),%r19 /* sched.h: long need_resched */
comib,<>,n 0,%r19,intr_do_resched /* forward */
intr_check_sig:
/* As above */
copy %r30,%r1
depi 0,31,14,%r1 /* get task pointer */
- ldw TASK_SIGPENDING(%r1),%r19 /* sched.h: int sigpending */
+#error ldw TASK_SIGPENDING(%r1),%r19 /* sched.h: int sigpending */
comib,<>,n 0,%r19,intr_do_signal /* forward */
intr_restore:
@@ -719,7 +719,7 @@ intr_do_signal:
copy %r0, %r24 /* unsigned long in_syscall */
copy %r16, %r25 /* struct pt_regs *regs */
ssm PSW_SM_I, %r0
- bl do_signal,%r2
+#error bl do_signal,%r2
copy %r0, %r26 /* sigset_t *oldset = NULL */
b intr_restore
@@ -1699,20 +1699,20 @@ syscall_check_resched:
/* check for reschedule */
- LDREG TASK_NEED_RESCHED-TASK_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
+#error LDREG TASK_NEED_RESCHED-TASK_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
comib,<>,n 0,%r19,syscall_do_resched /* forward */
syscall_check_sig:
ldo -TASK_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
/* check for pending signals */
- ldw TASK_SIGPENDING(%r1),%r19
+#error ldw TASK_SIGPENDING(%r1),%r19
comib,<>,n 0,%r19,syscall_do_signal /* forward */
syscall_restore:
/* disable interrupts while dicking with the kernel stack, */
/* or life can become unpleasant */
rsm PSW_SM_I, %r20
- LDREG TASK_PTRACE(%r1), %r19 /* Are we being ptraced? */
+#error LDREG TASK_PTRACE(%r1), %r19 /* Are we being ptraced? */
bb,<,n %r19,31,syscall_restore_rfi
LDREG TASK_PT_GR20(%r1),%r19
mtctl %r19, %cr27
@@ -1840,7 +1840,7 @@ syscall_do_signal:
ldi 1, %r24 /* unsigned long in_syscall */
- bl do_signal,%r2
+#error bl do_signal,%r2
copy %r0, %r26 /* sigset_t *oldset = NULL */
ldo -TASK_SZ_ALGN-FRAME_SIZE(%r30), %r1 /* reload task ptr */
diff --git a/arch/parisc/tools/offset.c b/arch/parisc/tools/offset.c
index 167087f7e807..05dfec675936 100644
--- a/arch/parisc/tools/offset.c
+++ b/arch/parisc/tools/offset.c
@@ -241,11 +241,11 @@ void output_task_defines(void)
text("/* PARISC task_struct offsets. */");
offset("#define TASK_STATE ", struct task_struct, state);
offset("#define TASK_FLAGS ", struct task_struct, flags);
- offset("#define TASK_SIGPENDING ", struct task_struct, sigpending);
+#error offset("#define TASK_SIGPENDING ", struct task_struct, sigpending);
offset("#define TASK_SEGMENT ", struct task_struct, addr_limit);
- offset("#define TASK_NEED_RESCHED ", struct task_struct, need_resched);
+#error offset("#define TASK_NEED_RESCHED ", struct task_struct, need_resched);
offset("#define TASK_COUNTER ", struct task_struct, counter);
- offset("#define TASK_PTRACE ", struct task_struct, ptrace);
+#error offset("#define TASK_PTRACE ", struct task_struct, ptrace);
offset("#define TASK_NICE ", struct task_struct, nice);
offset("#define TASK_MM ", struct task_struct, mm);
offset("#define TASK_PROCESSOR ", struct task_struct, processor);
diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S
index 5ab5d6df61a7..cb4ff46032f5 100644
--- a/arch/ppc/kernel/entry.S
+++ b/arch/ppc/kernel/entry.S
@@ -261,9 +261,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
.globl ret_from_fork
ret_from_fork:
bl schedule_tail
- lwz r0,TASK_PTRACE(r2)
+#error lwz r0,TASK_PTRACE(r2)
andi. r0,r0,PT_TRACESYS
- bnel- syscall_trace
+#error bnel- syscall_trace
b ret_from_except
.globl ret_from_intercept
@@ -279,16 +279,16 @@ ret_from_except:
lwz r3,_MSR(r1) /* Returning to user mode? */
andi. r3,r3,MSR_PR
beq+ do_signal_ret /* if so, check need_resched and signals */
- lwz r3,NEED_RESCHED(r2)
+#error lwz r3,NEED_RESCHED(r2)
cmpi 0,r3,0 /* check need_resched flag */
beq+ 7f
bl schedule
-7: lwz r5,SIGPENDING(r2) /* Check for pending unblocked signals */
+#error 7: lwz r5,SIGPENDING(r2) /* Check for pending unblocked signals */
cmpwi 0,r5,0
beq+ do_signal_ret
li r3,0
addi r4,r1,STACK_FRAME_OVERHEAD
- bl do_signal
+#error bl do_signal
.globl do_signal_ret
do_signal_ret:
.globl ret_to_user_hook
diff --git a/arch/ppc/kernel/mk_defs.c b/arch/ppc/kernel/mk_defs.c
index 1d0da0341a8d..2ff838e89c04 100644
--- a/arch/ppc/kernel/mk_defs.c
+++ b/arch/ppc/kernel/mk_defs.c
@@ -39,7 +39,7 @@ main(void)
DEFINE(NEXT_TASK, offsetof(struct task_struct, next_task));
DEFINE(COUNTER, offsetof(struct task_struct, counter));
DEFINE(PROCESSOR, offsetof(struct task_struct, processor));
- DEFINE(SIGPENDING, offsetof(struct task_struct, sigpending));
+#error DEFINE(SIGPENDING, offsetof(struct task_struct, sigpending));
DEFINE(THREAD, offsetof(struct task_struct, thread));
DEFINE(MM, offsetof(struct task_struct, mm));
DEFINE(ACTIVE_MM, offsetof(struct task_struct, active_mm));
@@ -50,8 +50,8 @@ main(void)
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
DEFINE(PT_TRACESYS, PT_TRACESYS);
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
- DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
- DEFINE(NEED_RESCHED, offsetof(struct task_struct, need_resched));
+#error DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
+#error DEFINE(NEED_RESCHED, offsetof(struct task_struct, need_resched));
DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
#ifdef CONFIG_ALTIVEC
diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c
index cf202435f1b1..e5e3247924d3 100644
--- a/arch/ppc/kernel/smp.c
+++ b/arch/ppc/kernel/smp.c
@@ -106,7 +106,7 @@ void smp_message_recv(int msg, struct pt_regs *regs)
smp_call_function_interrupt();
break;
case PPC_MSG_RESCHEDULE:
- current->need_resched = 1;
+ current->work.need_resched = 1;
break;
case PPC_MSG_INVALIDATE_TLB:
_tlbia();
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index bc4535cf737a..ea9541286059 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -76,9 +76,9 @@ _TSS_FLAGS = (_TSS_IEEE+4)
*/
state = 0
flags = 4
-sigpending = 8
-need_resched = 24
-tsk_ptrace = 28
+#error sigpending = 8
+#error need_resched = 24
+#error tsk_ptrace = 28
processor = 56
/*
@@ -218,7 +218,7 @@ pgm_system_call:
stosm 24(%r15),0x03 # reenable interrupts
sll %r8,2
l %r8,sys_call_table-entry_base(8,%r13) # get address of system call
- tm tsk_ptrace+3(%r9),0x02 # PT_TRACESYS
+#error tm tsk_ptrace+3(%r9),0x02 # PT_TRACESYS
bnz BASED(sysc_tracesys)
basr %r14,%r8 # call sys_xxxx
st %r2,SP_R2(%r15) # store return value (change R2 on stack)
@@ -231,9 +231,9 @@ sysc_return:
#
# check, if reschedule is needed
#
- icm %r0,15,need_resched(%r9) # get need_resched from task_struct
+#error icm %r0,15,need_resched(%r9) # get need_resched from task_struct
bnz BASED(sysc_reschedule)
- icm %r0,15,sigpending(%r9) # get sigpending from task_struct
+#error icm %r0,15,sigpending(%r9) # get sigpending from task_struct
bnz BASED(sysc_signal_return)
sysc_leave:
tm SP_PGM_OLD_ILC(%r15),0xff
@@ -744,9 +744,9 @@ io_return_bh:
#
# check, if reschedule is needed
#
- icm %r0,15,need_resched(%r9) # get need_resched from task_struct
+#error icm %r0,15,need_resched(%r9) # get need_resched from task_struct
bnz BASED(io_reschedule)
- icm %r0,15,sigpending(%r9) # get sigpending from task_struct
+#error icm %r0,15,sigpending(%r9) # get sigpending from task_struct
bnz BASED(io_signal_return)
io_leave:
stnsm 24(%r15),0xfc # disable I/O and ext. interrupts
@@ -874,7 +874,7 @@ restart_go:
*/
.Ls390_mcck: .long s390_do_machine_check
.Ldo_IRQ: .long do_IRQ
-.Ldo_signal: .long do_signal
+#error .Ldo_signal: .long do_signal
.Ldo_softirq: .long do_softirq
.Lentry_base: .long entry_base
.Lext_hash: .long ext_int_hash
@@ -891,7 +891,7 @@ restart_go:
.Lsigreturn: .long sys_sigreturn
.Lsigsuspend: .long sys_sigsuspend
.Lsigaltstack: .long sys_sigaltstack
-.Ltrace: .long syscall_trace
+#error .Ltrace: .long syscall_trace
.Lvfork: .long sys_vfork
#ifdef CONFIG_SMP
diff --git a/arch/s390x/kernel/entry.S b/arch/s390x/kernel/entry.S
index 567bff52719f..c4f22df5a322 100644
--- a/arch/s390x/kernel/entry.S
+++ b/arch/s390x/kernel/entry.S
@@ -76,9 +76,9 @@ _TSS_FLAGS = (_TSS_IEEE+8)
*/
state = 0
flags = 8
-sigpending = 16
-need_resched = 32
-tsk_ptrace = 40
+#error sigpending = 16
+#error need_resched = 32
+#error tsk_ptrace = 40
processor = 92
/*
@@ -220,10 +220,10 @@ sysc_return:
#
# check, if reschedule is needed
#
- lg %r0,need_resched(%r9) # get need_resched from task_struct
+#error lg %r0,need_resched(%r9) # get need_resched from task_struct
ltgr %r0,%r0
jnz sysc_reschedule
- icm %r0,15,sigpending(%r9) # get sigpending from task_struct
+#error icm %r0,15,sigpending(%r9) # get sigpending from task_struct
jnz sysc_signal_return
sysc_leave:
tm SP_PGM_OLD_ILC(%r15),0xff
@@ -246,7 +246,7 @@ sysc_signal_return:
sysc_tracesys:
lghi %r2,-ENOSYS
stg %r2,SP_R2(%r15) # give sysc_trace an -ENOSYS retval
- brasl %r14,syscall_trace
+#error brasl %r14,syscall_trace
lg %r2,SP_R2(%r15)
cghi %r2,-ENOSYS
je sysc_tracesys_dn1
@@ -263,7 +263,7 @@ sysc_tracesys_dn1:
basr %r14,%r8 # call sys_xxx
stg %r2,SP_R2(%r15) # store return value
larl %r14,sysc_return
- jg syscall_trace # return point is sysc_return
+#error jg syscall_trace # return point is sysc_return
#
# call schedule with sysc_return as return-address
@@ -734,10 +734,10 @@ io_return_bh:
#
# check, if reschedule is needed
#
- lg %r0,need_resched(%r9) # get need_resched from task_struct
+#error lg %r0,need_resched(%r9) # get need_resched from task_struct
ltgr %r0,%r0
jnz io_reschedule
- icm %r0,15,sigpending(%r9) # get sigpending from task_struct
+#error icm %r0,15,sigpending(%r9) # get sigpending from task_struct
jnz io_signal_return
io_leave:
stnsm 48(%r15),0xfc # disable I/O and ext. interrupts
diff --git a/arch/sh/kernel/entry.S b/arch/sh/kernel/entry.S
index cb382df16983..620beadcfb6e 100644
--- a/arch/sh/kernel/entry.S
+++ b/arch/sh/kernel/entry.S
@@ -61,11 +61,11 @@
* These are offsets into the task-struct.
*/
flags = 4
-sigpending = 8
-need_resched = 20
-tsk_ptrace = 24
+#error sigpending = 8
+#error need_resched = 20
+#error tsk_ptrace = 24
-PT_TRACESYS = 0x00000002
+#error PT_TRACESYS = 0x00000002
ENOSYS = 38
EINVAL = 22
@@ -300,8 +300,8 @@ ENTRY(ret_from_fork)
! If we're being traced, return via syscall_ret_trace, otherwise
! return directly to ret_from_syscall
stc k_current, r0
- mov.l @(tsk_ptrace,r0), r0 ! Is current PTRACE_SYSCALL'd?
- mov #PT_TRACESYS, r1
+#error mov.l @(tsk_ptrace,r0), r0 ! Is current PTRACE_SYSCALL'd?
+#error mov #PT_TRACESYS, r1
tst r1, r0
bt ret_from_syscall
bra syscall_ret_trace
@@ -371,8 +371,8 @@ system_call:
STI()
!
stc k_current, r11
- mov.l @(tsk_ptrace,r11), r10 ! Is current PTRACE_SYSCALL'd?
- mov #PT_TRACESYS, r11
+#error mov.l @(tsk_ptrace,r11), r10 ! Is current PTRACE_SYSCALL'd?
+#error mov #PT_TRACESYS, r11
tst r11, r10
bt 5f
! Yes it is traced.
@@ -497,7 +497,7 @@ fixup_syscall_argerr:
.align 2
__TRA: .long TRA
__syscall_trace:
- .long SYMBOL_NAME(syscall_trace)
+#error .long SYMBOL_NAME(syscall_trace)
__n_sys:.long NR_syscalls
__sct: .long SYMBOL_NAME(sys_call_table)
__syscall_ret_trace:
@@ -545,10 +545,10 @@ ENTRY(ret_from_syscall)
ldc r0, sr
!
stc k_current, r1
- mov.l @(need_resched,r1), r0
+#error mov.l @(need_resched,r1), r0
tst r0, r0
bf reschedule
- mov.l @(sigpending,r1), r0
+#error mov.l @(sigpending,r1), r0
tst r0, r0
bt restore_all
signal_return:
@@ -560,7 +560,7 @@ signal_return:
lds r0, pr
.align 2
__do_signal:
- .long SYMBOL_NAME(do_signal)
+#error .long SYMBOL_NAME(do_signal)
__irq_stat:
.long SYMBOL_NAME(irq_stat)
diff --git a/arch/sparc/kernel/rtrap.S b/arch/sparc/kernel/rtrap.S
index fc1b8d9d4494..566233e82fb8 100644
--- a/arch/sparc/kernel/rtrap.S
+++ b/arch/sparc/kernel/rtrap.S
@@ -58,15 +58,15 @@ C_LABEL(ret_trap_lockless_ipi):
nop
1:
- ld [%curptr + AOFF_task_need_resched], %g2
+#error ld [%curptr + AOFF_task_need_resched], %g2
orcc %g2, %g0, %g0
be signal_p
- ld [%curptr + AOFF_task_sigpending], %g2
+#error ld [%curptr + AOFF_task_sigpending], %g2
call C_LABEL(schedule)
nop
- ld [%curptr + AOFF_task_sigpending], %g2
+#error ld [%curptr + AOFF_task_sigpending], %g2
signal_p:
cmp %g2, 0
bz,a ret_trap_continue
@@ -98,7 +98,7 @@ ret_trap_continue:
add %sp, REGWIN_SZ, %o0
b signal_p
- ld [%curptr + AOFF_task_sigpending], %g2
+#error ld [%curptr + AOFF_task_sigpending], %g2
ret_trap_nobufwins:
/* Load up the user's out registers so we can pull
@@ -168,7 +168,7 @@ ret_trap_unaligned_pc:
nop
b signal_p
- ld [%curptr + AOFF_task_sigpending], %g2
+#error ld [%curptr + AOFF_task_sigpending], %g2
ret_trap_kernel:
/* Will the rett land us in the invalid window? */
@@ -218,7 +218,7 @@ ret_trap_user_stack_is_bolixed:
add %sp, REGWIN_SZ, %o0
b signal_p
- ld [%curptr + AOFF_task_sigpending], %g2
+#error ld [%curptr + AOFF_task_sigpending], %g2
.globl C_LABEL(sun4c_rett_stackchk)
C_LABEL(sun4c_rett_stackchk):
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c
index 1fb26314d184..3e329fa532ef 100644
--- a/arch/sparc/kernel/smp.c
+++ b/arch/sparc/kernel/smp.c
@@ -247,7 +247,7 @@ void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
/* Reschedule call back. */
void smp_reschedule_irq(void)
{
- current->need_resched = 1;
+ current->work.need_resched = 1;
}
/* Stopping processors. */
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 7151f73a6c72..dd27616020e0 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -54,7 +54,7 @@ int cpu_idle(void)
/* endless idle loop with no priority at all */
for (;;) {
- /* If current->need_resched is zero we should really
+ /* If current->work.need_resched is zero we should really
* setup for a system wakup event and execute a shutdown
* instruction.
*
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
index 5625e6cc5f3a..0df100042088 100644
--- a/arch/sparc64/kernel/rtrap.S
+++ b/arch/sparc64/kernel/rtrap.S
@@ -42,14 +42,14 @@ __handle_user_windows:
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
/* Redo sched+sig checks */
- ldx [%g6 + AOFF_task_need_resched], %l0
+#error ldx [%g6 + AOFF_task_need_resched], %l0
brz,pt %l0, 1f
nop
call schedule
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
-1: lduw [%g6 + AOFF_task_sigpending], %l0
+#error1: lduw [%g6 + AOFF_task_sigpending], %l0
brz,pt %l0, __handle_user_windows_continue
nop
clr %o0
@@ -82,14 +82,14 @@ __handle_perfctrs:
call fault_in_user_windows
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
-1: ldx [%g6 + AOFF_task_need_resched], %l0
+#error 1: ldx [%g6 + AOFF_task_need_resched], %l0
brz,pt %l0, 1f
nop
call schedule
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
-1: lduw [%g6 + AOFF_task_sigpending], %l0
+#error 1: lduw [%g6 + AOFF_task_sigpending], %l0
brz,pt %l0, __handle_perfctrs_continue
sethi %hi(TSTATE_PEF), %o0
clr %o0
@@ -173,9 +173,9 @@ __handle_softirq_continue:
*/
to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
__handle_preemption_continue:
- ldx [%g6 + AOFF_task_need_resched], %l0
+#error ldx [%g6 + AOFF_task_need_resched], %l0
brnz,pn %l0, __handle_preemption
- lduw [%g6 + AOFF_task_sigpending], %l0
+#error lduw [%g6 + AOFF_task_sigpending], %l0
brnz,pn %l0, __handle_signal
nop
__handle_signal_continue:
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 7d378e981008..e2500331eabd 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -208,5 +208,7 @@ int __init device_driver_init(void)
}
EXPORT_SYMBOL(device_register);
+EXPORT_SYMBOL(put_device);
EXPORT_SYMBOL(iobus_register);
+EXPORT_SYMBOL(put_iobus);
EXPORT_SYMBOL(device_driver_init);
diff --git a/drivers/base/interface.c b/drivers/base/interface.c
index 28813db26e25..b7c5c94db710 100644
--- a/drivers/base/interface.c
+++ b/drivers/base/interface.c
@@ -113,7 +113,7 @@ device_write_power(struct device * dev, const char * buf, size_t count, loff_t o
if (!dev->driver)
goto done;
- num_args = sscanf(buf,"%s %s %u",str_command,str_stage,&state);
+ num_args = sscanf(buf,"%10s %10s %u",str_command,str_stage,&state);
error = -EINVAL;
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index 0092f7b6c830..d1b8b3b66260 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -39,7 +39,7 @@
#include <linux/ppp_defs.h>
#include <linux/ppp-comp.h>
-#include "zlib.c"
+#include <linux/zlib.h>
/*
* State for a Deflate (de)compressor.
@@ -56,10 +56,6 @@ struct ppp_deflate_state {
#define DEFLATE_OVHD 2 /* Deflate overhead/packet */
-static void *zalloc __P((void *, unsigned int items, unsigned int size));
-static void *zalloc_init __P((void *, unsigned int items,
- unsigned int size));
-static void zfree __P((void *, void *ptr));
static void *z_comp_alloc __P((unsigned char *options, int opt_len));
static void *z_decomp_alloc __P((unsigned char *options, int opt_len));
static void z_comp_free __P((void *state));
@@ -80,72 +76,6 @@ static void z_comp_reset __P((void *state));
static void z_decomp_reset __P((void *state));
static void z_comp_stats __P((void *state, struct compstat *stats));
-struct chunk_header {
- int valloced; /* allocated with valloc, not kmalloc */
- int guard; /* check for overwritten header */
-};
-
-#define GUARD_MAGIC 0x77a8011a
-#define MIN_VMALLOC 2048 /* use kmalloc for blocks < this */
-
-/*
- * Space allocation and freeing routines for use by zlib routines.
- */
-void
-zfree(arg, ptr)
- void *arg;
- void *ptr;
-{
- struct chunk_header *hdr = ((struct chunk_header *)ptr) - 1;
-
- if (hdr->guard != GUARD_MAGIC) {
- printk(KERN_WARNING "zfree: header corrupted (%x %x) at %p\n",
- hdr->valloced, hdr->guard, hdr);
- return;
- }
- if (hdr->valloced)
- vfree(hdr);
- else
- kfree(hdr);
-}
-
-void *
-zalloc(arg, items, size)
- void *arg;
- unsigned int items, size;
-{
- struct chunk_header *hdr;
- unsigned nbytes;
-
- nbytes = items * size + sizeof(*hdr);
- hdr = kmalloc(nbytes, GFP_ATOMIC);
- if (hdr == 0)
- return 0;
- hdr->valloced = 0;
- hdr->guard = GUARD_MAGIC;
- return (void *) (hdr + 1);
-}
-
-void *
-zalloc_init(arg, items, size)
- void *arg;
- unsigned int items, size;
-{
- struct chunk_header *hdr;
- unsigned nbytes;
-
- nbytes = items * size + sizeof(*hdr);
- if (nbytes >= MIN_VMALLOC)
- hdr = vmalloc(nbytes);
- else
- hdr = kmalloc(nbytes, GFP_KERNEL);
- if (hdr == 0)
- return 0;
- hdr->valloced = nbytes >= MIN_VMALLOC;
- hdr->guard = GUARD_MAGIC;
- return (void *) (hdr + 1);
-}
-
static void
z_comp_free(arg)
void *arg;
@@ -153,7 +83,9 @@ z_comp_free(arg)
struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
if (state) {
- deflateEnd(&state->strm);
+ zlib_deflateEnd(&state->strm);
+ if (state->strm.workspace)
+ kfree(state->strm.workspace);
kfree(state);
MOD_DEC_USE_COUNT;
}
@@ -180,22 +112,24 @@ z_comp_alloc(options, opt_len)
if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE)
return NULL;
- state = (struct ppp_deflate_state *) kmalloc(sizeof(*state), GFP_KERNEL);
+ state = (struct ppp_deflate_state *) kmalloc(sizeof(*state),
+ GFP_KERNEL);
if (state == NULL)
return NULL;
MOD_INC_USE_COUNT;
memset (state, 0, sizeof (struct ppp_deflate_state));
- state->strm.next_in = NULL;
- state->strm.zalloc = zalloc_init;
- state->strm.zfree = zfree;
- state->w_size = w_size;
+ state->strm.next_in = NULL;
+ state->w_size = w_size;
+ state->strm.workspace = kmalloc(zlib_deflate_workspacesize(),
+ GFP_KERNEL);
+ if (state->strm.workspace == NULL)
+ goto out_free;
- if (deflateInit2(&state->strm, Z_DEFAULT_COMPRESSION,
+ if (zlib_deflateInit2(&state->strm, Z_DEFAULT_COMPRESSION,
DEFLATE_METHOD_VAL, -w_size, 8, Z_DEFAULT_STRATEGY)
!= Z_OK)
goto out_free;
- state->strm.zalloc = zalloc;
return (void *) state;
out_free:
@@ -224,7 +158,7 @@ z_comp_init(arg, options, opt_len, unit, hdrlen, debug)
state->unit = unit;
state->debug = debug;
- deflateReset(&state->strm);
+ zlib_deflateReset(&state->strm);
return 1;
}
@@ -236,7 +170,7 @@ z_comp_reset(arg)
struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
state->seqno = 0;
- deflateReset(&state->strm);
+ zlib_deflateReset(&state->strm);
}
int
@@ -286,7 +220,7 @@ z_compress(arg, rptr, obuf, isize, osize)
state->strm.avail_in = (isize - off);
for (;;) {
- r = deflate(&state->strm, Z_PACKET_FLUSH);
+ r = zlib_deflate(&state->strm, Z_PACKET_FLUSH);
if (r != Z_OK) {
if (state->debug)
printk(KERN_ERR
@@ -337,7 +271,9 @@ z_decomp_free(arg)
struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
if (state) {
- inflateEnd(&state->strm);
+ zlib_inflateEnd(&state->strm);
+ if (state->strm.workspace)
+ kfree(state->strm.workspace);
kfree(state);
MOD_DEC_USE_COUNT;
}
@@ -370,14 +306,15 @@ z_decomp_alloc(options, opt_len)
MOD_INC_USE_COUNT;
memset (state, 0, sizeof (struct ppp_deflate_state));
- state->w_size = w_size;
- state->strm.next_out = NULL;
- state->strm.zalloc = zalloc_init;
- state->strm.zfree = zfree;
+ state->w_size = w_size;
+ state->strm.next_out = NULL;
+ state->strm.workspace = kmalloc(zlib_inflate_workspacesize(),
+ GFP_KERNEL);
+ if (state->strm.workspace == NULL)
+ goto out_free;
- if (inflateInit2(&state->strm, -w_size) != Z_OK)
+ if (zlib_inflateInit2(&state->strm, -w_size) != Z_OK)
goto out_free;
- state->strm.zalloc = zalloc;
return (void *) state;
out_free:
@@ -407,7 +344,7 @@ z_decomp_init(arg, options, opt_len, unit, hdrlen, mru, debug)
state->debug = debug;
state->mru = mru;
- inflateReset(&state->strm);
+ zlib_inflateReset(&state->strm);
return 1;
}
@@ -419,7 +356,7 @@ z_decomp_reset(arg)
struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg;
state->seqno = 0;
- inflateReset(&state->strm);
+ zlib_inflateReset(&state->strm);
}
/*
@@ -492,7 +429,7 @@ z_decompress(arg, ibuf, isize, obuf, osize)
* Call inflate, supplying more input or output as needed.
*/
for (;;) {
- r = inflate(&state->strm, Z_PACKET_FLUSH);
+ r = zlib_inflate(&state->strm, Z_PACKET_FLUSH);
if (r != Z_OK) {
if (state->debug)
printk(KERN_DEBUG "z_decompress%d: inflate returned %d (%s)\n",
@@ -575,7 +512,7 @@ z_incomp(arg, ibuf, icnt)
++state->strm.avail_in;
}
- r = inflateIncomp(&state->strm);
+ r = zlib_inflateIncomp(&state->strm);
if (r != Z_OK) {
/* gak! */
if (state->debug) {
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index e78dfa4ba235..cc48edb4c809 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -72,12 +72,19 @@
* the documentation/chipset releases. An on-line errata would be welcome.
*
* TODO:
- * - some trivial error lurk,
- * - the stats are fscked,
+ * - syncppp oopses. X25 untested.
* - use polling at high irq/s,
* - performance analysis,
* - endianness.
*
+ * 2001/12/10 Daniela Squassoni <daniela@cyclades.com>
+ * - Contribution to support the new generic HDLC layer.
+ *
+ * 2002/01 Ueimor
+ * - old style interface removal
+ * - dscc4_release_ring fix (related to DMA mapping)
+ * - hard_start_xmit fix (hint: TxSizeMax)
+ * - misc crapectomy.
*/
#include <linux/version.h>
@@ -107,17 +114,34 @@
#include <linux/hdlc.h>
/* Version */
-static const char version[] = "$Id: dscc4.c,v 1.130 2001/02/25 15:27:34 romieu Exp $\n";
+static const char version[] = "$Id: dscc4.c,v 1.157 2002/01/28 01:54:19 romieu Exp $\n";
static int debug;
+static int quartz;
+
+#define DRV_NAME "dscc4"
+#undef DSCC4_POLLING
+#define DEBUG
/* Module parameters */
+
MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>");
-MODULE_DESCRIPTION("Siemens PEB20534 PCI Controller");
+MODULE_DESCRIPTION("Siemens PEB20534 PCI Controler");
MODULE_LICENSE("GPL");
MODULE_PARM(debug,"i");
+MODULE_PARM_DESC(debug,"Enable/disable extra messages");
+MODULE_PARM(quartz,"i");
+MODULE_PARM_DESC(quartz,"If present, on-board quartz frequency (Hz)");
+
+EXPORT_NO_SYMBOLS;
/* Structures */
+
+struct thingie {
+ int define;
+ u32 bits;
+};
+
struct TxFD {
u32 state;
u32 next;
@@ -135,18 +159,19 @@ struct RxFD {
};
#define DEBUG
-#define DEBUG_PARANOID
+#define DEBUG_PARANOIA
#define TX_RING_SIZE 32
#define RX_RING_SIZE 32
#define IRQ_RING_SIZE 64 /* Keep it A multiple of 32 */
#define TX_TIMEOUT (HZ/10)
+#define DSCC4_HZ_MAX 33000000
#define BRR_DIVIDER_MAX 64*0x00008000
#define dev_per_card 4
#define SOURCE_ID(flags) ((flags >> 28 ) & 0x03)
#define TO_SIZE(state) ((state >> 16) & 0x1fff)
#define TO_STATE(len) cpu_to_le32((len & TxSizeMax) << 16)
-#define RX_MAX(len) ((((len) >> 5) + 1) << 5)
+#define RX_MAX(len) ((((len) >> 5) + 1)<< 5)
#define SCC_REG_START(id) SCC_START+(id)*SCC_OFFSET
#undef DEBUG
@@ -157,7 +182,7 @@ struct dscc4_pci_priv {
spinlock_t lock;
struct pci_dev *pdev;
- struct net_device *root;
+ struct dscc4_dev_priv *root;
dma_addr_t iqcfg_dma;
u32 xtal_hz;
};
@@ -186,19 +211,21 @@ struct dscc4_dev_priv {
dma_addr_t iqtx_dma;
dma_addr_t iqrx_dma;
- struct net_device_stats stats;
struct timer_list timer;
struct dscc4_pci_priv *pci_priv;
spinlock_t lock;
int dev_id;
- u32 flags;
+ volatile u32 flags;
u32 timer_help;
u32 hi_expected;
- struct hdlc_device_struct hdlc;
- int usecount;
+ hdlc_device hdlc;
+ sync_serial_settings settings;
+ unsigned short encoding;
+ unsigned short parity;
+ u32 pad __attribute__ ((aligned (4)));
};
/* GLOBAL registers definitions */
@@ -232,6 +259,9 @@ struct dscc4_dev_priv {
#define ISR 0x58
/* Bit masks */
+#define EncodingMask 0x00700000
+#define CrcMask 0x00000003
+
#define IntRxScc0 0x10000000
#define IntTxScc0 0x01000000
@@ -242,10 +272,9 @@ struct dscc4_dev_priv {
#define Rdt 0x00200000
#define Idr 0x00100000
#define Idt 0x00080000
-#define TxSccRes 0x01000000
-#define RxSccRes 0x00010000
-#define TxSizeMax 0x1ffc
-#define RxSizeMax 0x1ffc
+#define TxSccRes 0x01000000
+#define RxSccRes 0x00010000
+#define TxSizeMax 0x1fff
#define Ccr0ClockMask 0x0000003f
#define Ccr1LoopMask 0x00000200
@@ -258,6 +287,7 @@ struct dscc4_dev_priv {
#define FrameVfr 0x80
#define FrameRdo 0x40
#define FrameCrc 0x20
+#define FrameRab 0x10
#define FrameAborted 0x00000200
#define FrameEnd 0x80000000
#define DataComplete 0x40000000
@@ -293,63 +323,103 @@ struct dscc4_dev_priv {
#define RdoSet 0x00000004
/* Functions prototypes */
-static __inline__ void dscc4_rx_irq(struct dscc4_pci_priv *, struct net_device *);
-static __inline__ void dscc4_tx_irq(struct dscc4_pci_priv *, struct net_device *);
+static inline void dscc4_rx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *);
+static inline void dscc4_tx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *);
static int dscc4_found1(struct pci_dev *, unsigned long ioaddr);
static int dscc4_init_one(struct pci_dev *, const struct pci_device_id *ent);
static int dscc4_open(struct net_device *);
static int dscc4_start_xmit(struct sk_buff *, struct net_device *);
static int dscc4_close(struct net_device *);
static int dscc4_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static int dscc4_change_mtu(struct net_device *dev, int mtu);
static int dscc4_init_ring(struct net_device *);
static void dscc4_release_ring(struct dscc4_dev_priv *);
static void dscc4_timer(unsigned long);
static void dscc4_tx_timeout(struct net_device *);
static void dscc4_irq(int irq, void *dev_id, struct pt_regs *ptregs);
-static struct net_device_stats *dscc4_get_stats(struct net_device *);
-static int dscc4_attach_hdlc_device(struct net_device *);
-static void dscc4_unattach_hdlc_device(struct net_device *);
-static int dscc4_hdlc_open(struct hdlc_device_struct *);
-static void dscc4_hdlc_close(struct hdlc_device_struct *);
-static int dscc4_hdlc_ioctl(struct hdlc_device_struct *, struct ifreq *, int);
-static int dscc4_hdlc_xmit(hdlc_device *, struct sk_buff *);
-#ifdef EXPERIMENTAL_POLLING
+static int dscc4_hdlc_attach(hdlc_device *, unsigned short, unsigned short);
+static int dscc4_set_iface(struct net_device *);
+static inline int dscc4_set_quartz(struct dscc4_dev_priv *, int);
+#ifdef DSCC4_POLLING
static int dscc4_tx_poll(struct dscc4_dev_priv *, struct net_device *);
#endif
-void inline reset_TxFD(struct TxFD *tx_fd) {
- /* FIXME: test with the last arg (size specification) = 0 */
- tx_fd->state = FrameEnd | Hold | 0x00100000;
- tx_fd->complete = 0x00000000;
+static inline void dscc4_patch_register(u32 ioaddr, u32 mask, u32 value)
+{
+ u32 state;
+
+ state = readl(ioaddr);
+ state &= ~mask;
+ state |= value;
+ writel(state, ioaddr);
}
-void inline dscc4_release_ring_skbuff(struct sk_buff **p, int n)
+int state_check(u32 state, struct dscc4_dev_priv *dpriv,
+ struct net_device *dev, const char *msg)
{
- for(; n > 0; n--) {
- if (*p)
- dev_kfree_skb(*p);
- p++;
+#ifdef DEBUG_PARANOIA
+ if (SOURCE_ID(state) != dpriv->dev_id) {
+ printk(KERN_DEBUG "%s (%s): Source Id=%d, state=%08x\n",
+ dev->name, msg, SOURCE_ID(state), state );
+ return -1;
}
+ if (state & 0x0df80c00) {
+ printk(KERN_DEBUG "%s (%s): state=%08x (UFO alert)\n",
+ dev->name, msg, state);
+ return -1;
+ }
+ return 0;
+#else
+ return 1;
+#endif
+}
+
+void inline reset_TxFD(struct TxFD *tx_fd) {
+ /* FIXME: test with the last arg (size specification) = 0 */
+ tx_fd->state = FrameEnd | Hold | 0x00100000;
+ tx_fd->complete = 0x00000000;
}
static void dscc4_release_ring(struct dscc4_dev_priv *dpriv)
{
struct pci_dev *pdev = dpriv->pci_priv->pdev;
+ struct TxFD *tx_fd = dpriv->tx_fd;
+ struct RxFD *rx_fd = dpriv->rx_fd;
+ struct sk_buff **skbuff;
+ int i;
+
+ pci_free_consistent(pdev, TX_RING_SIZE*sizeof(struct TxFD), tx_fd,
+ dpriv->tx_fd_dma);
+ pci_free_consistent(pdev, RX_RING_SIZE*sizeof(struct RxFD), rx_fd,
+ dpriv->rx_fd_dma);
+
+ skbuff = dpriv->tx_skbuff;
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (*skbuff) {
+ pci_unmap_single(pdev, tx_fd->data, (*skbuff)->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb(*skbuff);
+ }
+ skbuff++;
+ tx_fd++;
+ }
- pci_free_consistent(pdev, TX_RING_SIZE*sizeof(struct TxFD),
- dpriv->tx_fd, dpriv->tx_fd_dma);
- pci_free_consistent(pdev, RX_RING_SIZE*sizeof(struct RxFD),
- dpriv->rx_fd, dpriv->rx_fd_dma);
- dscc4_release_ring_skbuff(dpriv->tx_skbuff, TX_RING_SIZE);
- dscc4_release_ring_skbuff(dpriv->rx_skbuff, RX_RING_SIZE);
+ skbuff = dpriv->rx_skbuff;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (*skbuff) {
+ pci_unmap_single(pdev, rx_fd->data, (*skbuff)->len,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(*skbuff);
+ }
+ skbuff++;
+ rx_fd++;
+ }
}
void inline try_get_rx_skb(struct dscc4_dev_priv *priv, int cur, struct net_device *dev)
{
struct sk_buff *skb;
- skb = dev_alloc_skb(RX_MAX(HDLC_MAX_MRU+2));
+ skb = dev_alloc_skb(RX_MAX(HDLC_MAX_MRU));
priv->rx_skbuff[cur] = skb;
if (!skb) {
priv->rx_fd[cur--].data = (u32) NULL;
@@ -376,6 +446,7 @@ static int dscc4_wait_ack_cec(u32 ioaddr, struct net_device *dev, char *msg)
printk(KERN_ERR "%s: %s timeout\n", dev->name, msg);
return -1;
}
+ rmb();
}
printk(KERN_DEBUG "%s: %s ack (%d try)\n", dev->name, msg, i);
return 0;
@@ -406,22 +477,26 @@ static int dscc4_do_action(struct net_device *dev, char *msg)
return -1;
}
-static __inline__ int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv)
+static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv)
{
- int cur;
+ int cur, ret = 0;
s16 i;
cur = dpriv->iqtx_current%IRQ_RING_SIZE;
for (i = 0; i >= 0; i++) {
if (!(dpriv->flags & (NeedIDR | NeedIDT)) ||
(dpriv->iqtx[cur] & Xpr))
- return 0;
+ break;
+ smp_rmb();
}
- printk(KERN_ERR "%s: %s timeout\n", "dscc4", "XPR");
- return -1;
+ if (i < 0) {
+ printk(KERN_ERR "%s: %s timeout\n", "dscc4", "XPR");
+ ret = -1;
+ }
+ return ret;
}
-static __inline__ void dscc4_rx_skb(struct dscc4_dev_priv *dpriv, int cur,
+static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv, int cur,
struct RxFD *rx_fd, struct net_device *dev)
{
struct pci_dev *pdev = dpriv->pci_priv->pdev;
@@ -433,24 +508,23 @@ static __inline__ void dscc4_rx_skb(struct dscc4_dev_priv *dpriv, int cur,
pci_dma_sync_single(pdev, rx_fd->data, pkt_len + 1, PCI_DMA_FROMDEVICE);
if((skb->data[pkt_len] & FrameOk) == FrameOk) {
pci_unmap_single(pdev, rx_fd->data, skb->len, PCI_DMA_FROMDEVICE);
- dpriv->stats.rx_packets++;
- dpriv->stats.rx_bytes += pkt_len;
+ dev_to_hdlc(dev)->stats.rx_packets++;
+ dev_to_hdlc(dev)->stats.rx_bytes += pkt_len;
skb->tail += pkt_len;
skb->len = pkt_len;
if (netif_running(hdlc_to_dev(&dpriv->hdlc)))
- hdlc_netif_rx(&dpriv->hdlc, skb);
- else
- netif_rx(skb);
+ skb->protocol = htons(ETH_P_HDLC);
+ netif_rx(skb);
try_get_rx_skb(dpriv, cur, dev);
} else {
if(skb->data[pkt_len] & FrameRdo)
- dpriv->stats.rx_fifo_errors++;
+ dev_to_hdlc(dev)->stats.rx_fifo_errors++;
else if(!(skb->data[pkt_len] | ~FrameCrc))
- dpriv->stats.rx_crc_errors++;
- else if(!(skb->data[pkt_len] | ~FrameVfr))
- dpriv->stats.rx_length_errors++;
+ dev_to_hdlc(dev)->stats.rx_crc_errors++;
+ else if(!(skb->data[pkt_len] | ~(FrameVfr | FrameRab)))
+ dev_to_hdlc(dev)->stats.rx_length_errors++;
else
- dpriv->stats.rx_errors++;
+ dev_to_hdlc(dev)->stats.rx_errors++;
}
rx_fd->state1 |= Hold;
rx_fd->state2 = 0x00000000;
@@ -468,9 +542,9 @@ static int __init dscc4_init_one (struct pci_dev *pdev,
{
struct dscc4_pci_priv *priv;
struct dscc4_dev_priv *dpriv;
- int i;
static int cards_found = 0;
unsigned long ioaddr;
+ int i;
printk(KERN_DEBUG "%s", version);
@@ -478,26 +552,29 @@ static int __init dscc4_init_one (struct pci_dev *pdev,
goto err_out;
if (!request_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0), "registers")) {
- printk (KERN_ERR "dscc4: can't reserve MMIO region (regs)\n");
+ printk(KERN_ERR "%s: can't reserve MMIO region (regs)\n",
+ DRV_NAME);
goto err_out;
}
if (!request_mem_region(pci_resource_start(pdev, 1),
pci_resource_len(pdev, 1), "LBI interface")) {
- printk (KERN_ERR "dscc4: can't reserve MMIO region (lbi)\n");
+ printk(KERN_ERR "%s: can't reserve MMIO region (lbi)\n",
+ DRV_NAME);
goto err_out_free_mmio_region0;
}
ioaddr = (unsigned long)ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (!ioaddr) {
- printk(KERN_ERR "dscc4: cannot remap MMIO region %lx @ %lx\n",
- pci_resource_len(pdev, 0), pci_resource_start(pdev, 0));
+ printk(KERN_ERR "%s: cannot remap MMIO region %lx @ %lx\n",
+ DRV_NAME, pci_resource_len(pdev, 0),
+ pci_resource_start(pdev, 0));
goto err_out_free_mmio_region;
}
- printk(KERN_DEBUG "Siemens DSCC4, MMIO at %#lx (regs), %#lx (lbi), IRQ %d.\n",
+ printk(KERN_DEBUG "Siemens DSCC4, MMIO at %#lx (regs), %#lx (lbi), IRQ %d\n",
pci_resource_start(pdev, 0),
pci_resource_start(pdev, 1), pdev->irq);
- /* High PCI latency useless. Cf app. note. */
+ /* No need for High PCI latency. Cf app. note. */
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x10);
pci_set_master(pdev);
@@ -506,11 +583,10 @@ static int __init dscc4_init_one (struct pci_dev *pdev,
priv = (struct dscc4_pci_priv *)pci_get_drvdata(pdev);
- if (request_irq(pdev->irq, &dscc4_irq, SA_SHIRQ, "dscc4", priv->root)) {
- printk(KERN_WARNING "dscc4: IRQ %d is busy\n", pdev->irq);
+ if (request_irq(pdev->irq, &dscc4_irq, SA_SHIRQ, DRV_NAME, priv->root)){
+ printk(KERN_WARNING "%s: IRQ %d busy\n", DRV_NAME, pdev->irq);
goto err_out_iounmap;
}
- priv->pdev = pdev;
/* power up/little endian/dma core controlled via hold bit */
writel(0x00000000, ioaddr + GMODE);
@@ -537,7 +613,7 @@ static int __init dscc4_init_one (struct pci_dev *pdev,
* IQRX/TXi needs to be set soon. Learned it the hard way...
*/
for(i = 0; i < dev_per_card; i++) {
- dpriv = (struct dscc4_dev_priv *)(priv->root + i)->priv;
+ dpriv = priv->root + i;
dpriv->iqtx = (u32 *) pci_alloc_consistent(pdev,
IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma);
if (!dpriv->iqtx)
@@ -545,7 +621,7 @@ static int __init dscc4_init_one (struct pci_dev *pdev,
writel(dpriv->iqtx_dma, ioaddr + IQTX0 + i*4);
}
for(i = 0; i < dev_per_card; i++) {
- dpriv = (struct dscc4_dev_priv *)(priv->root + i)->priv;
+ dpriv = priv->root + i;
dpriv->iqrx = (u32 *) pci_alloc_consistent(pdev,
IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma);
if (!dpriv->iqrx)
@@ -553,10 +629,7 @@ static int __init dscc4_init_one (struct pci_dev *pdev,
writel(dpriv->iqrx_dma, ioaddr + IQRX0 + i*4);
}
- /*
- * Cf application hint. Beware of hard-lock condition on
- * threshold .
- */
+ /* Cf application hint. Beware of hard-lock condition on threshold. */
writel(0x42104000, ioaddr + FIFOCR1);
//writel(0x9ce69800, ioaddr + FIFOCR2);
writel(0xdef6d800, ioaddr + FIFOCR2);
@@ -572,14 +645,14 @@ static int __init dscc4_init_one (struct pci_dev *pdev,
err_out_free_iqrx:
while (--i >= 0) {
- dpriv = (struct dscc4_dev_priv *)(priv->root + i)->priv;
+ dpriv = priv->root + i;
pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
dpriv->iqrx, dpriv->iqrx_dma);
}
i = dev_per_card;
err_out_free_iqtx:
while (--i >= 0) {
- dpriv = (struct dscc4_dev_priv *)(priv->root + i)->priv;
+ dpriv = priv->root + i;
pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
dpriv->iqtx, dpriv->iqtx_dma);
}
@@ -599,88 +672,102 @@ err_out:
return -ENODEV;
};
+/*
+ * Let's hope the default values are decent enough to protect my
+ * feet from the user's gun - Ueimor
+ */
+static void dscc4_init_registers(u32 base_addr, int dev_id)
+{
+ u32 ioaddr = base_addr + SCC_REG_START(dev_id);
+
+ writel(0x80001000, ioaddr + CCR0);
+
+ writel(LengthCheck | (HDLC_MAX_MRU >> 5), ioaddr + RLCR);
+
+ /* no address recognition/crc-CCITT/cts enabled */
+ writel(0x021c8000, ioaddr + CCR1);
+
+ /* crc not forwarded */
+ writel(0x00050008 & ~RxActivate, ioaddr + CCR2);
+ // crc forwarded
+ //writel(0x00250008 & ~RxActivate, ioaddr + CCR2);
+
+ /* Don't mask RDO. Ever. */
+#ifdef DSCC4_POLLING
+ writel(0xfffeef7f, ioaddr + IMR); /* Interrupt mask */
+#else
+ //writel(0xfffaef7f, ioaddr + IMR); /* Interrupt mask */
+ writel(0xfffaef7e, ioaddr + IMR); /* Interrupt mask */
+#endif
+}
+
static int dscc4_found1(struct pci_dev *pdev, unsigned long ioaddr)
{
struct dscc4_pci_priv *ppriv;
- struct dscc4_dev_priv *dpriv;
- struct net_device *dev;
+ struct dscc4_dev_priv *root;
int i = 0;
- dpriv = (struct dscc4_dev_priv *)
- kmalloc(dev_per_card*sizeof(struct dscc4_dev_priv), GFP_KERNEL);
- if (!dpriv) {
- printk(KERN_ERR "dscc4: can't allocate data\n");
+ root = (struct dscc4_dev_priv *)
+ kmalloc(dev_per_card*sizeof(*root), GFP_KERNEL);
+ if (!root) {
+ printk(KERN_ERR "%s: can't allocate data\n", DRV_NAME);
goto err_out;
}
- memset(dpriv, 0, dev_per_card*sizeof(struct dscc4_dev_priv));
-
- dev = (struct net_device *)
- kmalloc(dev_per_card*sizeof(struct net_device), GFP_KERNEL);
- if (!dev) {
- printk(KERN_ERR "dscc4: can't allocate net_device\n");
- goto err_dealloc_priv;
- }
- memset(dev, 0, dev_per_card*sizeof(struct net_device));
+ memset(root, 0, dev_per_card*sizeof(*root));
- ppriv = (struct dscc4_pci_priv *)
- kmalloc(sizeof(struct dscc4_pci_priv), GFP_KERNEL);
+ ppriv = (struct dscc4_pci_priv *) kmalloc(sizeof(*ppriv), GFP_KERNEL);
if (!ppriv) {
- printk(KERN_ERR "dscc4: can't allocate pci private data.\n");
- goto err_dealloc_dev;
+ printk(KERN_ERR "%s: can't allocate private data\n", DRV_NAME);
+ goto err_free_dev;
}
memset(ppriv, 0, sizeof(struct dscc4_pci_priv));
for (i = 0; i < dev_per_card; i++) {
- struct dscc4_dev_priv *p;
- struct net_device *d;
+ struct dscc4_dev_priv *dpriv = root + i;
+ hdlc_device *hdlc = &dpriv->hdlc;
+ struct net_device *d = hdlc_to_dev(hdlc);
- d = dev + i;
d->base_addr = ioaddr;
d->init = NULL;
d->irq = pdev->irq;
- /* The card adds the crc */
- d->type = ARPHRD_RAWHDLC;
d->open = dscc4_open;
d->stop = dscc4_close;
- d->hard_start_xmit = dscc4_start_xmit;
d->set_multicast_list = NULL;
d->do_ioctl = dscc4_ioctl;
- d->get_stats = dscc4_get_stats;
- d->change_mtu = dscc4_change_mtu;
- d->mtu = HDLC_MAX_MTU;
- d->flags = IFF_MULTICAST|IFF_POINTOPOINT|IFF_NOARP;
d->tx_timeout = dscc4_tx_timeout;
d->watchdog_timeo = TX_TIMEOUT;
- p = dpriv + i;
- p->dev_id = i;
- p->pci_priv = ppriv;
- spin_lock_init(&p->lock);
- d->priv = p;
+ dpriv->dev_id = i;
+ dpriv->pci_priv = ppriv;
+ spin_lock_init(&dpriv->lock);
+ d->priv = dpriv;
- if (dev_alloc_name(d, "scc%d")<0) {
- printk(KERN_ERR "dev_alloc_name failed for scc.\n");
- goto err_dealloc_dev;
- }
- if (register_netdev(d)) {
- printk(KERN_ERR "%s: register_netdev != 0.\n", d->name);
- goto err_dealloc_dev;
+ hdlc->xmit = dscc4_start_xmit;
+ hdlc->attach = dscc4_hdlc_attach;
+
+ if (register_hdlc_device(hdlc)) {
+ printk(KERN_ERR "%s: unable to register\n", DRV_NAME);
+ goto err_unregister;
}
- dscc4_attach_hdlc_device(d);
+ hdlc->proto = IF_PROTO_HDLC;
SET_MODULE_OWNER(d);
+ dscc4_init_registers(ioaddr, i);
+ dpriv->parity = PARITY_CRC16_PR0_CCITT;
+ dpriv->encoding = ENCODING_NRZ;
}
- ppriv->root = dev;
- ppriv->pdev = pdev;
+ if (dscc4_set_quartz(root, quartz) < 0)
+ goto err_unregister;
+ ppriv->root = root;
spin_lock_init(&ppriv->lock);
pci_set_drvdata(pdev, ppriv);
return 0;
-err_dealloc_dev:
+err_unregister:
while (--i >= 0)
- unregister_netdev(dev + i);
- kfree(dev);
-err_dealloc_priv:
- kfree(dpriv);
+ unregister_hdlc_device(&root[i].hdlc);
+ kfree(ppriv);
+err_free_dev:
+ kfree(root);
err_out:
return -1;
};
@@ -701,7 +788,7 @@ static void dscc4_timer(unsigned long data)
printk(KERN_DEBUG "%s: pending events\n", dev->name);
dev->trans_start = jiffies;
spin_lock_irqsave(&ppriv->lock, flags);
- dscc4_tx_irq(ppriv, dev);
+ dscc4_tx_irq(ppriv, dpriv);
spin_unlock_irqrestore(&ppriv->lock, flags);
} else {
struct TxFD *tx_fd;
@@ -711,7 +798,7 @@ static void dscc4_timer(unsigned long data)
printk(KERN_DEBUG "%s: missing events\n", dev->name);
i = dpriv->tx_dirty%TX_RING_SIZE;
j = dpriv->tx_current - dpriv->tx_dirty;
- dpriv->stats.tx_dropped += j;
+ dev_to_hdlc(dev)->stats.tx_dropped += j;
while(j--) {
skb = dpriv->tx_skbuff[i];
tx_fd = dpriv->tx_fd + i;
@@ -740,13 +827,34 @@ static void dscc4_timer(unsigned long data)
static void dscc4_tx_timeout(struct net_device *dev)
{
/* FIXME: something is missing there */
-};
+}
+
+static int dscc4_loopback_check(struct dscc4_dev_priv *dpriv)
+{
+ sync_serial_settings *settings = &dpriv->settings;
+
+ if (settings->loopback && (settings->clock_type != CLOCK_INT)) {
+ struct net_device *dev = hdlc_to_dev(&dpriv->hdlc);
+
+ printk(KERN_INFO "%s: loopback requires clock\n", dev->name);
+ return -1;
+ }
+ return 0;
+}
static int dscc4_open(struct net_device *dev)
{
- struct dscc4_dev_priv *dpriv = (struct dscc4_dev_priv *)dev->priv;
+ struct dscc4_dev_priv *dpriv = dev->priv;
+ hdlc_device *hdlc = &dpriv->hdlc;
struct dscc4_pci_priv *ppriv;
- u32 ioaddr = 0;
+ u32 ioaddr;
+ int ret = -EAGAIN;
+
+ if ((dscc4_loopback_check(dpriv) < 0) || !dev->hard_start_xmit)
+ goto err;
+
+ if ((ret = hdlc_open(hdlc)))
+ goto err;
MOD_INC_USE_COUNT;
@@ -757,24 +865,6 @@ static int dscc4_open(struct net_device *dev)
ioaddr = dev->base_addr + SCC_REG_START(dpriv->dev_id);
- /* FIXME: VIS */
- writel(readl(ioaddr + CCR0) | 0x80001000, ioaddr + CCR0);
-
- writel(LengthCheck | (HDLC_MAX_MRU >> 5), ioaddr + RLCR);
-
- /* no address recognition/crc-CCITT/cts enabled */
- writel(readl(ioaddr + CCR1) | 0x021c8000, ioaddr + CCR1);
-
- /* Ccr2.Rac = 0 */
- writel(0x00050008 & ~RxActivate, ioaddr + CCR2);
-
-#ifdef EXPERIMENTAL_POLLING
- writel(0xfffeef7f, ioaddr + IMR); /* Interrupt mask */
-#else
- /* Don't mask RDO. Ever. */
- //writel(0xfffaef7f, ioaddr + IMR); /* Interrupt mask */
- writel(0xfffaef7e, ioaddr + IMR); /* Interrupt mask */
-#endif
/* IDT+IDR during XPR */
dpriv->flags = NeedIDR | NeedIDT;
@@ -789,9 +879,10 @@ static int dscc4_open(struct net_device *dev)
printk(KERN_ERR "%s busy. Try later\n", dev->name);
goto err_free_ring;
}
+
+ /* Posted write is flushed in the busy-waiting loop */
writel(TxSccRes | RxSccRes, ioaddr + CMDR);
- /* ... the following isn't */
if (dscc4_wait_ack_cec(ioaddr, dev, "Cec"))
goto err_free_ring;
@@ -819,16 +910,18 @@ static int dscc4_open(struct net_device *dev)
err_free_ring:
dscc4_release_ring(dpriv);
err_out:
+ hdlc_close(hdlc);
MOD_DEC_USE_COUNT;
- return -EAGAIN;
+err:
+ return ret;
}
-#ifdef EXPERIMENTAL_POLLING
+#ifdef DSCC4_POLLING
static int dscc4_tx_poll(struct dscc4_dev_priv *dpriv, struct net_device *dev)
{
/* FIXME: it's gonna be easy (TM), for sure */
}
-#endif /* EXPERIMENTAL_POLLING */
+#endif /* DSCC4_POLLING */
static int dscc4_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
@@ -842,13 +935,14 @@ static int dscc4_start_xmit(struct sk_buff *skb, struct net_device *dev)
next = dpriv->tx_current%TX_RING_SIZE;
dpriv->tx_skbuff[next] = skb;
tx_fd = dpriv->tx_fd + next;
- tx_fd->state = FrameEnd | Hold | TO_STATE(skb->len & TxSizeMax);
+ printk(KERN_DEBUG "%s: %d sent\n", dev->name, skb->len);
+ tx_fd->state = FrameEnd | Hold | TO_STATE(skb->len);
tx_fd->data = pci_map_single(ppriv->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
tx_fd->complete = 0x00000000;
mb(); // FIXME: suppress ?
-#ifdef EXPERIMENTAL_POLLING
+#ifdef DSCC4_POLLING
spin_lock(&dpriv->lock);
while(dscc4_tx_poll(dpriv, dev));
spin_unlock(&dpriv->lock);
@@ -881,6 +975,7 @@ static int dscc4_close(struct net_device *dev)
struct dscc4_dev_priv *dpriv = (struct dscc4_dev_priv *)dev->priv;
u32 ioaddr = dev->base_addr;
int dev_id;
+ hdlc_device *hdlc = dev_to_hdlc(dev);
del_timer_sync(&dpriv->timer);
netif_stop_queue(dev);
@@ -890,26 +985,45 @@ static int dscc4_close(struct net_device *dev)
writel(0x00050000, ioaddr + SCC_REG_START(dev_id) + CCR2);
writel(MTFi|Rdr|Rdt, ioaddr + CH0CFG + dev_id*0x0c); /* Reset Rx/Tx */
writel(0x00000001, ioaddr + GCMDR);
+ readl(ioaddr + GCMDR);
+ /*
+ * FIXME: wait for the command ack before returning the memory
+ * structures to the kernel.
+ */
+ hdlc_close(hdlc);
dscc4_release_ring(dpriv);
MOD_DEC_USE_COUNT;
return 0;
}
+static inline int dscc4_check_clock_ability(int port)
+{
+ int ret = 0;
+
+#ifdef CONFIG_DSCC4_CLOCK_ON_TWO_PORTS_ONLY
+ if (port >= 2)
+ ret = -1;
+#endif
+ return ret;
+}
+
static int dscc4_set_clock(struct net_device *dev, u32 *bps, u32 *state)
{
struct dscc4_dev_priv *dpriv = (struct dscc4_dev_priv *)dev->priv;
u32 brr;
*state &= ~Ccr0ClockMask;
- if (*bps) { /* DCE */
+ if (*bps) { /* Clock generated - required for DCE */
u32 n = 0, m = 0, divider;
int xtal;
xtal = dpriv->pci_priv->xtal_hz;
if (!xtal)
return -1;
+ if (dscc4_check_clock_ability(dpriv->dev_id) < 0)
+ return -1;
divider = xtal / *bps;
if (divider > BRR_DIVIDER_MAX) {
divider >>= 4;
@@ -933,8 +1047,9 @@ static int dscc4_set_clock(struct net_device *dev, u32 *bps, u32 *state)
if (!(*state & 0x00000001)) /* Clock mode 6b */
divider <<= 4;
*bps = xtal / divider;
- } else { /* DTE */
+ } else {
/*
+ * External clock - DTE
* "state" already reflects Clock mode 0a.
* Nothing more to be done
*/
@@ -945,214 +1060,201 @@ static int dscc4_set_clock(struct net_device *dev, u32 *bps, u32 *state)
return 0;
}
-#ifdef LATER_PLEASE
-/*
- * -*- [RFC] Configuring Synchronous Interfaces in Linux -*-
- */
-
-// FIXME: MEDIA already defined in linux/hdlc.h
-#define HDLC_MEDIA_V35 0
-#define HDLC_MEDIA_RS232 1
-#define HDLC_MEDIA_X21 2
-#define HDLC_MEDIA_E1 3
-#define HDLC_MEDIA_HSSI 4
-
-#define HDLC_CODING_NRZ 0
-#define HDLC_CODING_NRZI 1
-#define HDLC_CODING_FM0 2
-#define HDLC_CODING_FM1 3
-#define HDLC_CODING_MANCHESTER 4
-
-#define HDLC_CRC_NONE 0
-#define HDLC_CRC_16 1
-#define HDLC_CRC_32 2
-#define HDLC_CRC_CCITT 3
-
-/* RFC: add the crc reset value ? */
-struct hdlc_physical {
- u8 media;
- u8 coding;
- u32 rate;
- u8 crc;
- u8 crc_siz; /* 2 or 4 bytes */
- u8 shared_flags; /* Discouraged on the DSCC4 */
-};
-
-// FIXME: PROTO already defined in linux/hdlc.h
-#define HDLC_PROTO_RAW 0
-#define HDLC_PROTO_FR 1
-#define HDLC_PROTO_X25 2
-#define HDLC_PROTO_PPP 3
-#define HDLC_PROTO_CHDLC 4
-
-struct hdlc_protocol {
- u8 proto;
-
- union {
- } u;
-};
-
-struct screq {
- u16 media_group;
-
- union {
- struct hdlc_physical hdlc_phy;
- struct hdlc_protocol hdlc_proto;
- } u;
-};
-
-// FIXME: go sub-module
-static struct {
- u16 coding;
- u16 bits;
-} map[] = {
- {HDLC_CODING_NRZ, 0x00},
- {HDLC_CODING_NRZI, 0x20},
- {HDLC_CODING_FM0, 0x40},
- {HDLC_CODING_FM1, 0x50},
- {HDLC_CODING_MANCHESTER, 0x60},
- {65535, 0x00}
-};
-#endif /* LATER_PLEASE */
-
static int dscc4_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct dscc4_dev_priv *dpriv = dev->priv;
- u32 state, ioaddr;
+ struct if_settings *if_s = &ifr->ifr_settings;
+ const size_t size = sizeof(dpriv->settings);
+ int ret = 0;
if (dev->flags & IFF_UP)
return -EBUSY;
- switch (cmd) {
- /* Set built-in quartz frequency */
- case SIOCDEVPRIVATE: {
- u32 hz;
+ if (cmd != SIOCDEVICE)
+ return -EOPNOTSUPP;
- hz = ifr->ifr_ifru.ifru_ivalue;
- if (hz >= 33000000) /* 33 MHz */
- return -EOPNOTSUPP;
- dpriv->pci_priv->xtal_hz = hz;
- return 0;
- }
- /* Set/unset loopback */
- case SIOCDEVPRIVATE+1: {
- u32 flags;
-
- ioaddr = dev->base_addr + CCR1 +
- SCC_REG_START(dpriv->dev_id);
- state = readl(ioaddr);
- flags = ifr->ifr_ifru.ifru_ivalue;
- if (flags & 0x00000001) {
- printk(KERN_DEBUG "%s: loopback\n", dev->name);
- state |= 0x00000100;
- } else {
- printk(KERN_DEBUG "%s: normal\n", dev->name);
- state &= ~0x00000100;
- }
- writel(state, ioaddr);
+ switch(ifr->ifr_settings.type) {
+ case IF_GET_IFACE:
+ if_s->type = IF_IFACE_SYNC_SERIAL;
+ if (if_s->data_length == 0)
return 0;
- }
+ if (if_s->data_length < size)
+ return -ENOMEM;
+ if (copy_to_user(if_s->data, &dpriv->settings, size))
+ return -EFAULT;
+ if_s->data_length = size;
+ break;
+
+ case IF_IFACE_SYNC_SERIAL:
+ if(!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (if_s->data_length != size)
+ return -ENOMEM;
+
+ if (copy_from_user(&dpriv->settings, if_s->data, size))
+ return -EFAULT;
+ ret = dscc4_set_iface(dev);
+ break;
+
+ default:
+ ret = hdlc_ioctl(dev, ifr, cmd);
+ break;
+ }
-#ifdef LATER_PLEASE
- case SIOCDEVPRIVATE+2: {
- {
- struct screq scr;
+ return ret;
+}
- err = copy_from_user(&scr, ifr->ifr_ifru.ifru_data, sizeof(struct screq));
- if (err)
- return err;
- do {
- if (scr.u.hdlc_phy.coding == map[i].coding)
- break;
- } while (map[++i].coding != 65535);
- if (!map[i].coding)
- return -EOPNOTSUPP;
-
- ioaddr = dev->base_addr + CCR0 +
- SCC_REG_START(dpriv->dev_id);
- state = readl(ioaddr) & ~EncodingMask;
- state |= (u32)map[i].bits << 16;
- writel(state, ioaddr);
- printk("state: %08x\n", state); /* DEBUG */
- return 0;
- }
- case SIOCDEVPRIVATE+3: {
- struct screq *scr = (struct screq *)ifr->ifr_ifru.ifru_data;
+static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz)
+{
+ int ret = 0;
- ioaddr = dev->base_addr + CCR0 +
- SCC_REG_START(dpriv->dev_id);
- state = (readl(ioaddr) & EncodingMask) >> 16;
- do {
- if (state == map[i].bits)
- break;
- } while (map[++i].coding);
- return put_user(map[i].coding, (u16 *)scr->u.hdlc_phy.coding);
- }
-#endif /* LATER_PLEASE */
-
- case HDLCSCLOCKRATE:
- {
- u32 state, bps;
-
- bps = ifr->ifr_ifru.ifru_ivalue;
- ioaddr = dev->base_addr + CCR0 +
- SCC_REG_START(dpriv->dev_id);
- state = readl(ioaddr);
- if(dscc4_set_clock(dev, &bps, &state) < 0)
- return -EOPNOTSUPP;
- if (bps) { /* DCE */
- printk(KERN_DEBUG "%s: generated RxClk (DCE)\n",
- dev->name);
- ifr->ifr_ifru.ifru_ivalue = bps;
- } else { /* DTE */
- state = 0x80001000;
- printk(KERN_DEBUG "%s: external RxClk (DTE)\n",
- dev->name);
- }
- writel(state, ioaddr);
- return 0;
- }
- case HDLCGCLOCKRATE: {
- u32 brr;
- int bps;
-
- brr = readl(dev->base_addr + BRR +
- SCC_REG_START(dpriv->dev_id));
- bps = dpriv->pci_priv->xtal_hz >> (brr >> 8);
- bps /= (brr & 0x3f) + 1;
- ifr->ifr_ifru.ifru_ivalue = bps;
- return 0;
+ if ((hz < 0) || (hz > DSCC4_HZ_MAX))
+ ret = -EOPNOTSUPP;
+ else
+ dpriv->pci_priv->xtal_hz = hz;
+
+ return ret;
+}
+
+static int dscc4_match(struct thingie *p, int value)
+{
+ int i;
+
+ for (i = 0; p[i].define != -1; i++) {
+ if (value == p[i].define)
+ break;
+ }
+ if (p[i].define == -1)
+ return -1;
+ else
+ return i;
+}
+
+static int dscc4_clock_setting(struct net_device *dev)
+{
+ struct dscc4_dev_priv *dpriv = dev->priv;
+ sync_serial_settings *settings = &dpriv->settings;
+ u32 bps, state;
+ u32 ioaddr;
+
+ bps = settings->clock_rate;
+ ioaddr = dev->base_addr + CCR0 + SCC_REG_START(dpriv->dev_id);
+ state = readl(ioaddr);
+ if(dscc4_set_clock(dev, &bps, &state) < 0)
+ return -EOPNOTSUPP;
+ if (bps) { /* DCE */
+ printk(KERN_DEBUG "%s: generated RxClk (DCE)\n", dev->name);
+ if (settings->clock_rate != bps) {
+ settings->clock_rate = bps;
+ printk(KERN_DEBUG "%s: clock adjusted from %08d to %08d \n",
+ dev->name, dpriv->settings.clock_rate, bps);
}
+ } else { /* DTE */
+ state = 0x80001000;
+ printk(KERN_DEBUG "%s: external RxClk (DTE)\n", dev->name);
+ }
+ writel(state, ioaddr);
+ return 0;
+}
+
+static int dscc4_encoding_setting(struct net_device *dev)
+{
+ struct dscc4_dev_priv *dpriv = dev->priv;
+ struct thingie encoding[] = {
+ { ENCODING_NRZ, 0x00000000 },
+ { ENCODING_NRZI, 0x00200000 },
+ { ENCODING_FM_MARK, 0x00400000 },
+ { ENCODING_FM_SPACE, 0x00500000 },
+ { ENCODING_MANCHESTER, 0x00600000 },
+ { -1, 0}
+ };
+ int i, ret = 0;
+
+ i = dscc4_match(encoding, dpriv->encoding);
+ if (i >= 0) {
+ u32 ioaddr;
+
+ ioaddr = dev->base_addr + CCR0 + SCC_REG_START(dpriv->dev_id);
+ dscc4_patch_register(ioaddr, EncodingMask, encoding[i].bits);
+ } else
+ ret = -EOPNOTSUPP;
+ return ret;
+}
- default:
- return -EOPNOTSUPP;
+static int dscc4_loopback_setting(struct net_device *dev)
+{
+ struct dscc4_dev_priv *dpriv = dev->priv;
+ sync_serial_settings *settings = &dpriv->settings;
+ u32 ioaddr, state;
+
+ ioaddr = dev->base_addr + CCR1 + SCC_REG_START(dpriv->dev_id);
+ state = readl(ioaddr);
+ if (settings->loopback) {
+ printk(KERN_DEBUG "%s: loopback\n", dev->name);
+ state |= 0x00000100;
+ } else {
+ printk(KERN_DEBUG "%s: normal\n", dev->name);
+ state &= ~0x00000100;
}
+ writel(state, ioaddr);
+ return 0;
}
-static int dscc4_change_mtu(struct net_device *dev, int mtu)
+static int dscc4_crc_setting(struct net_device *dev)
{
- /* FIXME: chainsaw coded... */
- if ((mtu <= 3) || (mtu > 65531))
- return -EINVAL;
- if(dev->flags & IFF_UP)
- return -EBUSY;
- dev->mtu = mtu;
- return(0);
+ struct dscc4_dev_priv *dpriv = dev->priv;
+ struct thingie crc[] = {
+ { PARITY_CRC16_PR0_CCITT, 0x00000010 },
+ { PARITY_CRC16_PR1_CCITT, 0x00000000 },
+ { PARITY_CRC32_PR0_CCITT, 0x00000011 },
+ { PARITY_CRC32_PR1_CCITT, 0x00000001 }
+ };
+ int i, ret = 0;
+
+ i = dscc4_match(crc, dpriv->parity);
+ if (i >= 0) {
+ u32 ioaddr;
+
+ ioaddr = dev->base_addr + CCR1 + SCC_REG_START(dpriv->dev_id);
+ dscc4_patch_register(ioaddr, CrcMask, crc[i].bits);
+ } else
+ ret = -EOPNOTSUPP;
+ return ret;
+}
+
+static int dscc4_set_iface(struct net_device *dev)
+{
+ struct {
+ int (*action)(struct net_device *);
+ } *p, do_setting[] = {
+ { dscc4_encoding_setting },
+ { dscc4_clock_setting },
+ { dscc4_loopback_setting },
+ { dscc4_crc_setting },
+ { NULL }
+ };
+ int ret = 0;
+
+ for (p = do_setting; p->action; p++) {
+ if ((ret = p->action(dev)) < 0)
+ break;
+ }
+ return ret;
}
-static void dscc4_irq(int irq, void *dev_instance, struct pt_regs *ptregs)
+static void dscc4_irq(int irq, void *token, struct pt_regs *ptregs)
{
- struct net_device *dev = dev_instance;
+ struct dscc4_dev_priv *root = token;
struct dscc4_pci_priv *priv;
+ struct net_device *dev;
u32 ioaddr, state;
unsigned long flags;
int i;
- priv = ((struct dscc4_dev_priv *)dev->priv)->pci_priv;
- /*
- * FIXME: shorten the protected area (set some bit telling we're
- * in an interrupt or increment some work-to-do counter etc...)
- */
+ priv = root->pci_priv;
+ dev = hdlc_to_dev(&root->hdlc);
+
spin_lock_irqsave(&priv->lock, flags);
ioaddr = dev->base_addr;
@@ -1179,14 +1281,14 @@ static void dscc4_irq(int irq, void *dev_instance, struct pt_regs *ptregs)
if (state & RxEvt) {
i = dev_per_card - 1;
do {
- dscc4_rx_irq(priv, dev + i);
+ dscc4_rx_irq(priv, root + i);
} while (--i >= 0);
state &= ~RxEvt;
}
if (state & TxEvt) {
i = dev_per_card - 1;
do {
- dscc4_tx_irq(priv, dev + i);
+ dscc4_tx_irq(priv, root + i);
} while (--i >= 0);
state &= ~TxEvt;
}
@@ -1194,10 +1296,9 @@ out:
spin_unlock_irqrestore(&priv->lock, flags);
}
-static __inline__ void dscc4_tx_irq(struct dscc4_pci_priv *ppriv,
- struct net_device *dev)
+static inline void dscc4_tx_irq(struct dscc4_pci_priv *ppriv, struct dscc4_dev_priv *dpriv)
{
- struct dscc4_dev_priv *dpriv = dev->priv;
+ struct net_device *dev = hdlc_to_dev(&dpriv->hdlc);
u32 state;
int cur, loop = 0;
@@ -1218,18 +1319,9 @@ try:
dpriv->iqtx[cur] = 0;
dpriv->iqtx_current++;
-#ifdef DEBUG_PARANOID
- if (SOURCE_ID(state) != dpriv->dev_id) {
- printk(KERN_DEBUG "%s (Tx): Source Id=%d, state=%08x\n",
- dev->name, SOURCE_ID(state), state );
+ if (state_check(state, dpriv, dev, "Tx"))
return;
- }
- if (state & 0x0df80c00) {
- printk(KERN_DEBUG "%s (Tx): state=%08x (UFO alert)\n",
- dev->name, state);
- return;
- }
-#endif
+
// state &= 0x0fffffff; /* Tracking the analyzed bits */
if (state & SccEvt) {
if (state & Alls) {
@@ -1259,12 +1351,12 @@ try:
"%s: DataComplete=0 cur=%d isr=%08x state=%08x\n",
dev->name, cur, isr, state);
writel(isr, ioaddr);
- dpriv->stats.tx_dropped++;
+ dev_to_hdlc(dev)->stats.tx_dropped++;
} else {
tx_fd->complete &= ~DataComplete;
if (tx_fd->state & FrameEnd) {
- dpriv->stats.tx_packets++;
- dpriv->stats.tx_bytes += skb->len;
+ dev_to_hdlc(dev)->stats.tx_packets++;
+ dev_to_hdlc(dev)->stats.tx_bytes += skb->len;
}
}
@@ -1285,7 +1377,7 @@ try:
* Transmit Data Underrun
*/
if (state & Xdu) {
- printk(KERN_ERR "dscc4: XDU. Contact maintainer\n");
+ printk(KERN_ERR "%s: XDU. Ask maintainer\n", DRV_NAME);
dpriv->flags = NeedIDT;
/* Tx reset */
writel(MTFi | Rdt,
@@ -1340,7 +1432,7 @@ try:
}
} else { /* ! SccEvt */
if (state & Hi) {
-#ifdef EXPERIMENTAL_POLLING
+#ifdef DSCC4_POLLING
while(!dscc4_tx_poll(dpriv, dev));
#endif
state &= ~Hi;
@@ -1349,17 +1441,18 @@ try:
* FIXME: it may be avoided. Re-re-re-read the manual.
*/
if (state & Err) {
- printk(KERN_ERR "%s: Tx ERR\n", dev->name);
- dpriv->stats.tx_errors++;
+ printk(KERN_ERR "%s (Tx): ERR\n", dev->name);
+ dev_to_hdlc(dev)->stats.tx_errors++;
state &= ~Err;
}
}
goto try;
}
-static __inline__ void dscc4_rx_irq(struct dscc4_pci_priv *priv, struct net_device *dev)
+static inline void dscc4_rx_irq(struct dscc4_pci_priv *priv,
+ struct dscc4_dev_priv *dpriv)
{
- struct dscc4_dev_priv *dpriv = dev->priv;
+ struct net_device *dev = hdlc_to_dev(&dpriv->hdlc);
u32 state;
int cur;
@@ -1371,18 +1464,9 @@ try:
dpriv->iqrx[cur] = 0;
dpriv->iqrx_current++;
-#ifdef DEBUG_PARANOID
- if (SOURCE_ID(state) != dpriv->dev_id) {
- printk(KERN_DEBUG "%s (Rx): Source Id=%d, state=%08x\n",
- dev->name, SOURCE_ID(state), state);
- goto try;
- }
- if (state & 0x0df80c00) {
- printk(KERN_DEBUG "%s (Rx): state=%08x (UFO alert)\n",
- dev->name, state);
- goto try;
- }
-#endif
+ if (state_check(state, dpriv, dev, "Tx"))
+ return;
+
if (!(state & SccEvt)){
struct RxFD *rx_fd;
@@ -1450,8 +1534,8 @@ try:
#ifdef DEBUG_PARANOIA
for (i = 0; evts[i].irq_name; i++) {
if (state & evts[i].mask) {
- printk(KERN_DEBUG "dscc4(%s): %s\n",
- dev->name, evts[i].irq_name);
+ printk(KERN_DEBUG "%s: %s\n", dev->name,
+ evts[i].irq_name);
if (!(state &= ~evts[i].mask))
goto try;
}
@@ -1492,7 +1576,7 @@ try:
if (!(rx_fd->state2 & DataComplete))
break;
if (rx_fd->state2 & FrameAborted) {
- dpriv->stats.rx_over_errors++;
+ dev_to_hdlc(dev)->stats.rx_over_errors++;
rx_fd->state1 |= Hold;
rx_fd->state2 = 0x00000000;
rx_fd->end = 0xbabeface;
@@ -1503,7 +1587,7 @@ try:
if (debug) {
if (dpriv->flags & RdoSet)
printk(KERN_DEBUG
- "dscc4: no RDO in Rx data\n");
+ "%s: no RDO in Rx data\n", DRV_NAME);
}
#ifdef DSCC4_RDO_EXPERIMENTAL_RECOVERY
/*
@@ -1540,6 +1624,7 @@ try:
goto try;
}
if (state & Flex) {
+ printk(KERN_DEBUG "%s: Flex. Ttttt...\n", DRV_NAME);
if (!(state &= ~Flex))
goto try;
}
@@ -1588,7 +1673,7 @@ static int dscc4_init_ring(struct net_device *dev)
* - Fe=1 (required by No=0 or we got an Err irq and must reset).
* Alas, it fails (and locks solid). Thus the introduction of a dummy
* skb to avoid No=0 (choose one: Ugly [ ] Tasteless [ ] VMS [ ]).
- * TODO: fiddle the tx threshold when time permits.
+ * 2002/01: errata sheet acknowledges the problem [X].
*/
struct sk_buff *skb;
@@ -1609,7 +1694,7 @@ static int dscc4_init_ring(struct net_device *dev)
rx_fd->state1 = HiDesc; /* Hi, no Hold */
rx_fd->state2 = 0x00000000;
rx_fd->end = 0xbabeface;
- rx_fd->state1 |= ((u32)(HDLC_MAX_MRU & RxSizeMax)) << 16;
+ rx_fd->state1 |= (RX_MAX(HDLC_MAX_MRU) << 16);
try_get_rx_skb(dpriv, i, dev);
i++;
rx_fd->next = (u32)(dpriv->rx_fd_dma + i*sizeof(struct RxFD));
@@ -1627,44 +1712,36 @@ err_out:
return -1;
}
-static struct net_device_stats *dscc4_get_stats(struct net_device *dev)
-{
- struct dscc4_dev_priv *priv = (struct dscc4_dev_priv *)dev->priv;
-
- return &priv->stats;
-}
-
static void __exit dscc4_remove_one(struct pci_dev *pdev)
{
struct dscc4_pci_priv *ppriv;
- struct net_device *root;
+ struct dscc4_dev_priv *root;
+ u32 ioaddr;
int i;
ppriv = pci_get_drvdata(pdev);
root = ppriv->root;
+ ioaddr = hdlc_to_dev(&root->hdlc)->base_addr;
free_irq(pdev->irq, root);
pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), ppriv->iqcfg,
ppriv->iqcfg_dma);
- for (i=0; i < dev_per_card; i++) {
- struct dscc4_dev_priv *dpriv;
- struct net_device *dev;
+ for (i = 0; i < dev_per_card; i++) {
+ struct dscc4_dev_priv *dpriv = root + i;
+ hdlc_device *hdlc = &dpriv->hdlc;
- dev = ppriv->root + i;
- dscc4_unattach_hdlc_device(dev);
+ unregister_hdlc_device(hdlc);
- dpriv = (struct dscc4_dev_priv *)dev->priv;
pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
dpriv->iqrx, dpriv->iqrx_dma);
pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
dpriv->iqtx, dpriv->iqtx_dma);
- unregister_netdev(dev);
}
- kfree(root->priv);
- iounmap((void *)root->base_addr);
+ iounmap((void *)ioaddr);
kfree(root);
+ pci_set_drvdata(pdev, NULL);
kfree(ppriv);
release_mem_region(pci_resource_start(pdev, 1),
@@ -1673,72 +1750,28 @@ static void __exit dscc4_remove_one(struct pci_dev *pdev)
pci_resource_len(pdev, 0));
}
-static int dscc4_hdlc_ioctl(struct hdlc_device_struct *hdlc, struct ifreq *ifr, int cmd)
-{
- struct net_device *dev = (struct net_device *)hdlc->netdev.base_addr;
- int result;
-
- /* FIXME: locking ? */
- result = dscc4_ioctl(dev, ifr, cmd);
- return result;
-}
-
-static int dscc4_hdlc_open(struct hdlc_device_struct *hdlc)
-{
- struct net_device *dev = (struct net_device *)(hdlc->netdev.base_addr);
-
- if (netif_running(dev)) {
- printk(KERN_DEBUG "%s: already running\n", dev->name); // DEBUG
- return 0;
- }
- return dscc4_open(dev);
-}
-
-static int dscc4_hdlc_xmit(hdlc_device *hdlc, struct sk_buff *skb)
-{
- struct net_device *dev = (struct net_device *)hdlc->netdev.base_addr;
-
- return dscc4_start_xmit(skb, dev);
-}
-
-static void dscc4_hdlc_close(struct hdlc_device_struct *hdlc)
+static int dscc4_hdlc_attach(hdlc_device *hdlc, unsigned short encoding,
+ unsigned short parity)
{
- struct net_device *dev = (struct net_device *)hdlc->netdev.base_addr;
- struct dscc4_dev_priv *dpriv;
+ struct dscc4_dev_priv *dpriv = hdlc_to_dev(hdlc)->priv;
- dpriv = dev->priv;
- --dpriv->usecount;
-}
-
-/* Operated under dev lock */
-static int dscc4_attach_hdlc_device(struct net_device *dev)
-{
- struct dscc4_dev_priv *dpriv = dev->priv;
- struct hdlc_device_struct *hdlc;
- int result;
-
- hdlc = &dpriv->hdlc;
- /* XXX: Don't look at the next line */
- hdlc->netdev.base_addr = (unsigned long)dev;
- hdlc->set_mode = NULL;
- hdlc->open = dscc4_hdlc_open;
- hdlc->close = dscc4_hdlc_close;
- hdlc->ioctl = dscc4_hdlc_ioctl;
- hdlc->xmit = dscc4_hdlc_xmit;
-
- result = register_hdlc_device(hdlc);
- if (!result)
- dpriv->usecount++;
- return result;
-}
+ if (encoding != ENCODING_NRZ &&
+ encoding != ENCODING_NRZI &&
+ encoding != ENCODING_FM_MARK &&
+ encoding != ENCODING_FM_SPACE &&
+ encoding != ENCODING_MANCHESTER)
+ return -EINVAL;
-/* Operated under dev lock */
-static void dscc4_unattach_hdlc_device(struct net_device *dev)
-{
- struct dscc4_dev_priv *dpriv = dev->priv;
+ if (parity != PARITY_NONE &&
+ parity != PARITY_CRC16_PR0_CCITT &&
+ parity != PARITY_CRC16_PR1_CCITT &&
+ parity != PARITY_CRC32_PR0_CCITT &&
+ parity != PARITY_CRC32_PR1_CCITT)
+ return -EINVAL;
- unregister_hdlc_device(&dpriv->hdlc);
- dpriv->usecount--;
+ dpriv->encoding = encoding;
+ dpriv->parity = parity;
+ return 0;
}
static struct pci_device_id dscc4_pci_tbl[] __devinitdata = {
diff --git a/drivers/net/zlib.c b/drivers/net/zlib.c
deleted file mode 100644
index 0a8eb7f71316..000000000000
--- a/drivers/net/zlib.c
+++ /dev/null
@@ -1,5371 +0,0 @@
-/*
- * This file is derived from various .h and .c files from the zlib-1.0.4
- * distribution by Jean-loup Gailly and Mark Adler, with some additions
- * by Paul Mackerras to aid in implementing Deflate compression and
- * decompression for PPP packets. See zlib.h for conditions of
- * distribution and use.
- *
- * Changes that have been made include:
- * - added Z_PACKET_FLUSH (see zlib.h for details)
- * - added inflateIncomp and deflateOutputPending
- * - allow strm->next_out to be NULL, meaning discard the output
- *
- * $Id: zlib.c,v 1.3 1997/12/23 10:47:42 paulus Exp $
- */
-
-/*
- * ==FILEVERSION 971210==
- *
- * This marker is used by the Linux installation script to determine
- * whether an up-to-date version of this file is already installed.
- */
-
-#define NO_DUMMY_DECL
-#define NO_ZCFUNCS
-#define MY_ZCALLOC
-
-#if defined(__FreeBSD__) && (defined(KERNEL) || defined(_KERNEL))
-#define inflate inflate_ppp /* FreeBSD already has an inflate :-( */
-#endif
-
-
-/* +++ zutil.h */
-/* zutil.h -- internal interface and configuration of the compression library
- * Copyright (C) 1995-1996 Jean-loup Gailly.
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-/* From: zutil.h,v 1.16 1996/07/24 13:41:13 me Exp $ */
-
-#ifndef _Z_UTIL_H
-#define _Z_UTIL_H
-
-#include "zlib.h"
-
-#if defined(KERNEL) || defined(_KERNEL)
-/* Assume this is a *BSD or SVR4 kernel */
-#include <sys/types.h>
-#include <sys/time.h>
-#include <sys/systm.h>
-# define HAVE_MEMCPY
-# define memcpy(d, s, n) bcopy((s), (d), (n))
-# define memset(d, v, n) bzero((d), (n))
-# define memcmp bcmp
-
-#else
-#if defined(__KERNEL__)
-/* Assume this is a Linux kernel */
-#include <linux/string.h>
-#define HAVE_MEMCPY
-
-#else /* not kernel */
-
-#if defined(MSDOS)||defined(VMS)||defined(CRAY)||defined(WIN32)||defined(RISCOS)
-# include <stddef.h>
-# include <errno.h>
-#else
- extern int errno;
-#endif
-#ifdef STDC
-# include <string.h>
-# include <stdlib.h>
-#endif
-#endif /* __KERNEL__ */
-#endif /* _KERNEL || KERNEL */
-
-#ifndef local
-# define local static
-#endif
-/* compile with -Dlocal if your debugger can't find static symbols */
-
-typedef unsigned char uch;
-typedef uch FAR uchf;
-typedef unsigned short ush;
-typedef ush FAR ushf;
-typedef unsigned long ulg;
-
-extern const char *z_errmsg[10]; /* indexed by 2-zlib_error */
-/* (size given to avoid silly warnings with Visual C++) */
-
-#define ERR_MSG(err) z_errmsg[Z_NEED_DICT-(err)]
-
-#define ERR_RETURN(strm,err) \
- return (strm->msg = (char*)ERR_MSG(err), (err))
-/* To be used only when the state is known to be valid */
-
- /* common constants */
-
-#ifndef DEF_WBITS
-# define DEF_WBITS MAX_WBITS
-#endif
-/* default windowBits for decompression. MAX_WBITS is for compression only */
-
-#if MAX_MEM_LEVEL >= 8
-# define DEF_MEM_LEVEL 8
-#else
-# define DEF_MEM_LEVEL MAX_MEM_LEVEL
-#endif
-/* default memLevel */
-
-#define STORED_BLOCK 0
-#define STATIC_TREES 1
-#define DYN_TREES 2
-/* The three kinds of block type */
-
-#define MIN_MATCH 3
-#define MAX_MATCH 258
-/* The minimum and maximum match lengths */
-
-#define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */
-
- /* target dependencies */
-
-#ifdef MSDOS
-# define OS_CODE 0x00
-# ifdef __TURBOC__
-# include <alloc.h>
-# else /* MSC or DJGPP */
-# include <malloc.h>
-# endif
-#endif
-
-#ifdef OS2
-# define OS_CODE 0x06
-#endif
-
-#ifdef WIN32 /* Window 95 & Windows NT */
-# define OS_CODE 0x0b
-#endif
-
-#if defined(VAXC) || defined(VMS)
-# define OS_CODE 0x02
-# define FOPEN(name, mode) \
- fopen((name), (mode), "mbc=60", "ctx=stm", "rfm=fix", "mrs=512")
-#endif
-
-#ifdef AMIGA
-# define OS_CODE 0x01
-#endif
-
-#if defined(ATARI) || defined(atarist)
-# define OS_CODE 0x05
-#endif
-
-#ifdef MACOS
-# define OS_CODE 0x07
-#endif
-
-#ifdef __50SERIES /* Prime/PRIMOS */
-# define OS_CODE 0x0F
-#endif
-
-#ifdef TOPS20
-# define OS_CODE 0x0a
-#endif
-
-#if defined(_BEOS_) || defined(RISCOS)
-# define fdopen(fd,mode) NULL /* No fdopen() */
-#endif
-
- /* Common defaults */
-
-#ifndef OS_CODE
-# define OS_CODE 0x03 /* assume Unix */
-#endif
-
-#ifndef FOPEN
-# define FOPEN(name, mode) fopen((name), (mode))
-#endif
-
- /* functions */
-
-#ifdef HAVE_STRERROR
- extern char *strerror OF((int));
-# define zstrerror(errnum) strerror(errnum)
-#else
-# define zstrerror(errnum) ""
-#endif
-
-#if defined(pyr)
-# define NO_MEMCPY
-#endif
-#if (defined(M_I86SM) || defined(M_I86MM)) && !defined(_MSC_VER)
- /* Use our own functions for small and medium model with MSC <= 5.0.
- * You may have to use the same strategy for Borland C (untested).
- */
-# define NO_MEMCPY
-#endif
-#if defined(STDC) && !defined(HAVE_MEMCPY) && !defined(NO_MEMCPY)
-# define HAVE_MEMCPY
-#endif
-#ifdef HAVE_MEMCPY
-# ifdef SMALL_MEDIUM /* MSDOS small or medium model */
-# define zmemcpy _fmemcpy
-# define zmemcmp _fmemcmp
-# define zmemzero(dest, len) _fmemset(dest, 0, len)
-# else
-# define zmemcpy memcpy
-# define zmemcmp memcmp
-# define zmemzero(dest, len) memset(dest, 0, len)
-# endif
-#else
- extern void zmemcpy OF((Bytef* dest, Bytef* source, uInt len));
- extern int zmemcmp OF((Bytef* s1, Bytef* s2, uInt len));
- extern void zmemzero OF((Bytef* dest, uInt len));
-#endif
-
-/* Diagnostic functions */
-#ifdef DEBUG_ZLIB
-# include <stdio.h>
-# ifndef verbose
-# define verbose 0
-# endif
- extern void z_error OF((char *m));
-# define Assert(cond,msg) {if(!(cond)) z_error(msg);}
-# define Trace(x) fprintf x
-# define Tracev(x) {if (verbose) fprintf x ;}
-# define Tracevv(x) {if (verbose>1) fprintf x ;}
-# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
-# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
-#else
-# define Assert(cond,msg)
-# define Trace(x)
-# define Tracev(x)
-# define Tracevv(x)
-# define Tracec(c,x)
-# define Tracecv(c,x)
-#endif
-
-
-typedef uLong (*check_func) OF((uLong check, const Bytef *buf, uInt len));
-
-voidpf zcalloc OF((voidpf opaque, unsigned items, unsigned size));
-void zcfree OF((voidpf opaque, voidpf ptr));
-
-#define ZALLOC(strm, items, size) \
- (*((strm)->zalloc))((strm)->opaque, (items), (size))
-#define ZFREE(strm, addr) (*((strm)->zfree))((strm)->opaque, (voidpf)(addr))
-#define TRY_FREE(s, p) {if (p) ZFREE(s, p);}
-
-#endif /* _Z_UTIL_H */
-/* --- zutil.h */
-
-/* +++ deflate.h */
-/* deflate.h -- internal compression state
- * Copyright (C) 1995-1996 Jean-loup Gailly
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-/* From: deflate.h,v 1.10 1996/07/02 12:41:00 me Exp $ */
-
-#ifndef _DEFLATE_H
-#define _DEFLATE_H
-
-/* #include "zutil.h" */
-
-/* ===========================================================================
- * Internal compression state.
- */
-
-#define LENGTH_CODES 29
-/* number of length codes, not counting the special END_BLOCK code */
-
-#define LITERALS 256
-/* number of literal bytes 0..255 */
-
-#define L_CODES (LITERALS+1+LENGTH_CODES)
-/* number of Literal or Length codes, including the END_BLOCK code */
-
-#define D_CODES 30
-/* number of distance codes */
-
-#define BL_CODES 19
-/* number of codes used to transfer the bit lengths */
-
-#define HEAP_SIZE (2*L_CODES+1)
-/* maximum heap size */
-
-#define MAX_BITS 15
-/* All codes must not exceed MAX_BITS bits */
-
-#define INIT_STATE 42
-#define BUSY_STATE 113
-#define FINISH_STATE 666
-/* Stream status */
-
-
-/* Data structure describing a single value and its code string. */
-typedef struct ct_data_s {
- union {
- ush freq; /* frequency count */
- ush code; /* bit string */
- } fc;
- union {
- ush dad; /* father node in Huffman tree */
- ush len; /* length of bit string */
- } dl;
-} FAR ct_data;
-
-#define Freq fc.freq
-#define Code fc.code
-#define Dad dl.dad
-#define Len dl.len
-
-typedef struct static_tree_desc_s static_tree_desc;
-
-typedef struct tree_desc_s {
- ct_data *dyn_tree; /* the dynamic tree */
- int max_code; /* largest code with non zero frequency */
- static_tree_desc *stat_desc; /* the corresponding static tree */
-} FAR tree_desc;
-
-typedef ush Pos;
-typedef Pos FAR Posf;
-typedef unsigned IPos;
-
-/* A Pos is an index in the character window. We use short instead of int to
- * save space in the various tables. IPos is used only for parameter passing.
- */
-
-typedef struct deflate_state {
- z_streamp strm; /* pointer back to this zlib stream */
- int status; /* as the name implies */
- Bytef *pending_buf; /* output still pending */
- ulg pending_buf_size; /* size of pending_buf */
- Bytef *pending_out; /* next pending byte to output to the stream */
- int pending; /* nb of bytes in the pending buffer */
- int noheader; /* suppress zlib header and adler32 */
- Byte data_type; /* UNKNOWN, BINARY or ASCII */
- Byte method; /* STORED (for zip only) or DEFLATED */
- int last_flush; /* value of flush param for previous deflate call */
-
- /* used by deflate.c: */
-
- uInt w_size; /* LZ77 window size (32K by default) */
- uInt w_bits; /* log2(w_size) (8..16) */
- uInt w_mask; /* w_size - 1 */
-
- Bytef *window;
- /* Sliding window. Input bytes are read into the second half of the window,
- * and move to the first half later to keep a dictionary of at least wSize
- * bytes. With this organization, matches are limited to a distance of
- * wSize-MAX_MATCH bytes, but this ensures that IO is always
- * performed with a length multiple of the block size. Also, it limits
- * the window size to 64K, which is quite useful on MSDOS.
- * To do: use the user input buffer as sliding window.
- */
-
- ulg window_size;
- /* Actual size of window: 2*wSize, except when the user input buffer
- * is directly used as sliding window.
- */
-
- Posf *prev;
- /* Link to older string with same hash index. To limit the size of this
- * array to 64K, this link is maintained only for the last 32K strings.
- * An index in this array is thus a window index modulo 32K.
- */
-
- Posf *head; /* Heads of the hash chains or NIL. */
-
- uInt ins_h; /* hash index of string to be inserted */
- uInt hash_size; /* number of elements in hash table */
- uInt hash_bits; /* log2(hash_size) */
- uInt hash_mask; /* hash_size-1 */
-
- uInt hash_shift;
- /* Number of bits by which ins_h must be shifted at each input
- * step. It must be such that after MIN_MATCH steps, the oldest
- * byte no longer takes part in the hash key, that is:
- * hash_shift * MIN_MATCH >= hash_bits
- */
-
- long block_start;
- /* Window position at the beginning of the current output block. Gets
- * negative when the window is moved backwards.
- */
-
- uInt match_length; /* length of best match */
- IPos prev_match; /* previous match */
- int match_available; /* set if previous match exists */
- uInt strstart; /* start of string to insert */
- uInt match_start; /* start of matching string */
- uInt lookahead; /* number of valid bytes ahead in window */
-
- uInt prev_length;
- /* Length of the best match at previous step. Matches not greater than this
- * are discarded. This is used in the lazy match evaluation.
- */
-
- uInt max_chain_length;
- /* To speed up deflation, hash chains are never searched beyond this
- * length. A higher limit improves compression ratio but degrades the
- * speed.
- */
-
- uInt max_lazy_match;
- /* Attempt to find a better match only when the current match is strictly
- * smaller than this value. This mechanism is used only for compression
- * levels >= 4.
- */
-# define max_insert_length max_lazy_match
- /* Insert new strings in the hash table only if the match length is not
- * greater than this length. This saves time but degrades compression.
- * max_insert_length is used only for compression levels <= 3.
- */
-
- int level; /* compression level (1..9) */
- int strategy; /* favor or force Huffman coding*/
-
- uInt good_match;
- /* Use a faster search when the previous match is longer than this */
-
- int nice_match; /* Stop searching when current match exceeds this */
-
- /* used by trees.c: */
- /* Didn't use ct_data typedef below to suppress compiler warning */
- struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */
- struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */
- struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */
-
- struct tree_desc_s l_desc; /* desc. for literal tree */
- struct tree_desc_s d_desc; /* desc. for distance tree */
- struct tree_desc_s bl_desc; /* desc. for bit length tree */
-
- ush bl_count[MAX_BITS+1];
- /* number of codes at each bit length for an optimal tree */
-
- int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */
- int heap_len; /* number of elements in the heap */
- int heap_max; /* element of largest frequency */
- /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
- * The same heap array is used to build all trees.
- */
-
- uch depth[2*L_CODES+1];
- /* Depth of each subtree used as tie breaker for trees of equal frequency
- */
-
- uchf *l_buf; /* buffer for literals or lengths */
-
- uInt lit_bufsize;
- /* Size of match buffer for literals/lengths. There are 4 reasons for
- * limiting lit_bufsize to 64K:
- * - frequencies can be kept in 16 bit counters
- * - if compression is not successful for the first block, all input
- * data is still in the window so we can still emit a stored block even
- * when input comes from standard input. (This can also be done for
- * all blocks if lit_bufsize is not greater than 32K.)
- * - if compression is not successful for a file smaller than 64K, we can
- * even emit a stored file instead of a stored block (saving 5 bytes).
- * This is applicable only for zip (not gzip or zlib).
- * - creating new Huffman trees less frequently may not provide fast
- * adaptation to changes in the input data statistics. (Take for
- * example a binary file with poorly compressible code followed by
- * a highly compressible string table.) Smaller buffer sizes give
- * fast adaptation but have of course the overhead of transmitting
- * trees more frequently.
- * - I can't count above 4
- */
-
- uInt last_lit; /* running index in l_buf */
-
- ushf *d_buf;
- /* Buffer for distances. To simplify the code, d_buf and l_buf have
- * the same number of elements. To use different lengths, an extra flag
- * array would be necessary.
- */
-
- ulg opt_len; /* bit length of current block with optimal trees */
- ulg static_len; /* bit length of current block with static trees */
- ulg compressed_len; /* total bit length of compressed file */
- uInt matches; /* number of string matches in current block */
- int last_eob_len; /* bit length of EOB code for last block */
-
-#ifdef DEBUG_ZLIB
- ulg bits_sent; /* bit length of the compressed data */
-#endif
-
- ush bi_buf;
- /* Output buffer. bits are inserted starting at the bottom (least
- * significant bits).
- */
- int bi_valid;
- /* Number of valid bits in bi_buf. All bits above the last valid bit
- * are always zero.
- */
-
-} FAR deflate_state;
-
-/* Output a byte on the stream.
- * IN assertion: there is enough room in pending_buf.
- */
-#define put_byte(s, c) {s->pending_buf[s->pending++] = (c);}
-
-
-#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
-/* Minimum amount of lookahead, except at the end of the input file.
- * See deflate.c for comments about the MIN_MATCH+1.
- */
-
-#define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD)
-/* In order to simplify the code, particularly on 16 bit machines, match
- * distances are limited to MAX_DIST instead of WSIZE.
- */
-
- /* in trees.c */
-void _tr_init OF((deflate_state *s));
-int _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc));
-ulg _tr_flush_block OF((deflate_state *s, charf *buf, ulg stored_len,
- int eof));
-void _tr_align OF((deflate_state *s));
-void _tr_stored_block OF((deflate_state *s, charf *buf, ulg stored_len,
- int eof));
-void _tr_stored_type_only OF((deflate_state *));
-
-#endif
-/* --- deflate.h */
-
-/* +++ deflate.c */
-/* deflate.c -- compress data using the deflation algorithm
- * Copyright (C) 1995-1996 Jean-loup Gailly.
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/*
- * ALGORITHM
- *
- * The "deflation" process depends on being able to identify portions
- * of the input text which are identical to earlier input (within a
- * sliding window trailing behind the input currently being processed).
- *
- * The most straightforward technique turns out to be the fastest for
- * most input files: try all possible matches and select the longest.
- * The key feature of this algorithm is that insertions into the string
- * dictionary are very simple and thus fast, and deletions are avoided
- * completely. Insertions are performed at each input character, whereas
- * string matches are performed only when the previous match ends. So it
- * is preferable to spend more time in matches to allow very fast string
- * insertions and avoid deletions. The matching algorithm for small
- * strings is inspired from that of Rabin & Karp. A brute force approach
- * is used to find longer strings when a small match has been found.
- * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze
- * (by Leonid Broukhis).
- * A previous version of this file used a more sophisticated algorithm
- * (by Fiala and Greene) which is guaranteed to run in linear amortized
- * time, but has a larger average cost, uses more memory and is patented.
- * However the F&G algorithm may be faster for some highly redundant
- * files if the parameter max_chain_length (described below) is too large.
- *
- * ACKNOWLEDGEMENTS
- *
- * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and
- * I found it in 'freeze' written by Leonid Broukhis.
- * Thanks to many people for bug reports and testing.
- *
- * REFERENCES
- *
- * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
- * Available in ftp://ds.internic.net/rfc/rfc1951.txt
- *
- * A description of the Rabin and Karp algorithm is given in the book
- * "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
- *
- * Fiala,E.R., and Greene,D.H.
- * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595
- *
- */
-
-/* From: deflate.c,v 1.15 1996/07/24 13:40:58 me Exp $ */
-
-/* #include "deflate.h" */
-
-char deflate_copyright[] = " deflate 1.0.4 Copyright 1995-1996 Jean-loup Gailly ";
-/*
- If you use the zlib library in a product, an acknowledgment is welcome
- in the documentation of your product. If for some reason you cannot
- include such an acknowledgment, I would appreciate that you keep this
- copyright string in the executable of your product.
- */
-
-/* ===========================================================================
- * Function prototypes.
- */
-typedef enum {
- need_more, /* block not completed, need more input or more output */
- block_done, /* block flush performed */
- finish_started, /* finish started, need only more output at next deflate */
- finish_done /* finish done, accept no more input or output */
-} block_state;
-
-typedef block_state (*compress_func) OF((deflate_state *s, int flush));
-/* Compression function. Returns the block state after the call. */
-
-local void fill_window OF((deflate_state *s));
-local block_state deflate_stored OF((deflate_state *s, int flush));
-local block_state deflate_fast OF((deflate_state *s, int flush));
-local block_state deflate_slow OF((deflate_state *s, int flush));
-local void lm_init OF((deflate_state *s));
-local void putShortMSB OF((deflate_state *s, uInt b));
-local void flush_pending OF((z_streamp strm));
-local int read_buf OF((z_streamp strm, charf *buf, unsigned size));
-#ifdef ASMV
- void match_init OF((void)); /* asm code initialization */
- uInt longest_match OF((deflate_state *s, IPos cur_match));
-#else
-local uInt longest_match OF((deflate_state *s, IPos cur_match));
-#endif
-
-#ifdef DEBUG_ZLIB
-local void check_match OF((deflate_state *s, IPos start, IPos match,
- int length));
-#endif
-
-/* ===========================================================================
- * Local data
- */
-
-#define NIL 0
-/* Tail of hash chains */
-
-#ifndef TOO_FAR
-# define TOO_FAR 4096
-#endif
-/* Matches of length 3 are discarded if their distance exceeds TOO_FAR */
-
-#define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
-/* Minimum amount of lookahead, except at the end of the input file.
- * See deflate.c for comments about the MIN_MATCH+1.
- */
-
-/* Values for max_lazy_match, good_match and max_chain_length, depending on
- * the desired pack level (0..9). The values given below have been tuned to
- * exclude worst case performance for pathological files. Better values may be
- * found for specific files.
- */
-typedef struct config_s {
- ush good_length; /* reduce lazy search above this match length */
- ush max_lazy; /* do not perform lazy search above this match length */
- ush nice_length; /* quit search above this match length */
- ush max_chain;
- compress_func func;
-} config;
-
-local config configuration_table[10] = {
-/* good lazy nice chain */
-/* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */
-/* 1 */ {4, 4, 8, 4, deflate_fast}, /* maximum speed, no lazy matches */
-/* 2 */ {4, 5, 16, 8, deflate_fast},
-/* 3 */ {4, 6, 32, 32, deflate_fast},
-
-/* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */
-/* 5 */ {8, 16, 32, 32, deflate_slow},
-/* 6 */ {8, 16, 128, 128, deflate_slow},
-/* 7 */ {8, 32, 128, 256, deflate_slow},
-/* 8 */ {32, 128, 258, 1024, deflate_slow},
-/* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* maximum compression */
-
-/* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4
- * For deflate_fast() (levels <= 3) good is ignored and lazy has a different
- * meaning.
- */
-
-#define EQUAL 0
-/* result of memcmp for equal strings */
-
-#ifndef NO_DUMMY_DECL
-struct static_tree_desc_s {int dummy;}; /* for buggy compilers */
-#endif
-
-/* ===========================================================================
- * Update a hash value with the given input byte
- * IN assertion: all calls to UPDATE_HASH are made with consecutive
- * input characters, so that a running hash key can be computed from the
- * previous key instead of complete recalculation each time.
- */
-#define UPDATE_HASH(s,h,c) (h = (((h)<<s->hash_shift) ^ (c)) & s->hash_mask)
-
-
-/* ===========================================================================
- * Insert string str in the dictionary and set match_head to the previous head
- * of the hash chain (the most recent string with same hash key). Return
- * the previous length of the hash chain.
- * IN assertion: all calls to INSERT_STRING are made with consecutive
- * input characters and the first MIN_MATCH bytes of str are valid
- * (except for the last MIN_MATCH-1 bytes of the input file).
- */
-#define INSERT_STRING(s, str, match_head) \
- (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \
- s->prev[(str) & s->w_mask] = match_head = s->head[s->ins_h], \
- s->head[s->ins_h] = (Pos)(str))
-
-/* ===========================================================================
- * Initialize the hash table (avoiding 64K overflow for 16 bit systems).
- * prev[] will be initialized on the fly.
- */
-#define CLEAR_HASH(s) \
- s->head[s->hash_size-1] = NIL; \
- zmemzero((charf *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head));
-
-/* ========================================================================= */
-int deflateInit_(strm, level, version, stream_size)
- z_streamp strm;
- int level;
- const char *version;
- int stream_size;
-{
- return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL,
- Z_DEFAULT_STRATEGY, version, stream_size);
- /* To do: ignore strm->next_in if we use it as window */
-}
-
-/* ========================================================================= */
-int deflateInit2_(strm, level, method, windowBits, memLevel, strategy,
- version, stream_size)
- z_streamp strm;
- int level;
- int method;
- int windowBits;
- int memLevel;
- int strategy;
- const char *version;
- int stream_size;
-{
- deflate_state *s;
- int noheader = 0;
- static char* my_version = ZLIB_VERSION;
-
- ushf *overlay;
- /* We overlay pending_buf and d_buf+l_buf. This works since the average
- * output size for (length,distance) codes is <= 24 bits.
- */
-
- if (version == Z_NULL || version[0] != my_version[0] ||
- stream_size != sizeof(z_stream)) {
- return Z_VERSION_ERROR;
- }
- if (strm == Z_NULL) return Z_STREAM_ERROR;
-
- strm->msg = Z_NULL;
-#ifndef NO_ZCFUNCS
- if (strm->zalloc == Z_NULL) {
- strm->zalloc = zcalloc;
- strm->opaque = (voidpf)0;
- }
- if (strm->zfree == Z_NULL) strm->zfree = zcfree;
-#endif
-
- if (level == Z_DEFAULT_COMPRESSION) level = 6;
-
- if (windowBits < 0) { /* undocumented feature: suppress zlib header */
- noheader = 1;
- windowBits = -windowBits;
- }
- if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED ||
- windowBits < 8 || windowBits > 15 || level < 0 || level > 9 ||
- strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
- return Z_STREAM_ERROR;
- }
- s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state));
- if (s == Z_NULL) return Z_MEM_ERROR;
- strm->state = (struct internal_state FAR *)s;
- s->strm = strm;
-
- s->noheader = noheader;
- s->w_bits = windowBits;
- s->w_size = 1 << s->w_bits;
- s->w_mask = s->w_size - 1;
-
- s->hash_bits = memLevel + 7;
- s->hash_size = 1 << s->hash_bits;
- s->hash_mask = s->hash_size - 1;
- s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH);
-
- s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte));
- s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos));
- s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos));
-
- s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
-
- overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2);
- s->pending_buf = (uchf *) overlay;
- s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L);
-
- if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL ||
- s->pending_buf == Z_NULL) {
- strm->msg = (char*)ERR_MSG(Z_MEM_ERROR);
- deflateEnd (strm);
- return Z_MEM_ERROR;
- }
- s->d_buf = overlay + s->lit_bufsize/sizeof(ush);
- s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize;
-
- s->level = level;
- s->strategy = strategy;
- s->method = (Byte)method;
-
- return deflateReset(strm);
-}
-
-/* ========================================================================= */
-int deflateSetDictionary (strm, dictionary, dictLength)
- z_streamp strm;
- const Bytef *dictionary;
- uInt dictLength;
-{
- deflate_state *s;
- uInt length = dictLength;
- uInt n;
- IPos hash_head = 0;
-
- if (strm == Z_NULL || strm->state == Z_NULL || dictionary == Z_NULL)
- return Z_STREAM_ERROR;
-
- s = (deflate_state *) strm->state;
- if (s->status != INIT_STATE) return Z_STREAM_ERROR;
-
- strm->adler = adler32(strm->adler, dictionary, dictLength);
-
- if (length < MIN_MATCH) return Z_OK;
- if (length > MAX_DIST(s)) {
- length = MAX_DIST(s);
-#ifndef USE_DICT_HEAD
- dictionary += dictLength - length; /* use the tail of the dictionary */
-#endif
- }
- zmemcpy((charf *)s->window, dictionary, length);
- s->strstart = length;
- s->block_start = (long)length;
-
- /* Insert all strings in the hash table (except for the last two bytes).
- * s->lookahead stays null, so s->ins_h will be recomputed at the next
- * call of fill_window.
- */
- s->ins_h = s->window[0];
- UPDATE_HASH(s, s->ins_h, s->window[1]);
- for (n = 0; n <= length - MIN_MATCH; n++) {
- INSERT_STRING(s, n, hash_head);
- }
- if (hash_head) hash_head = 0; /* to make compiler happy */
- return Z_OK;
-}
-
-/* ========================================================================= */
-int deflateReset (strm)
- z_streamp strm;
-{
- deflate_state *s;
-
- if (strm == Z_NULL || strm->state == Z_NULL ||
- strm->zalloc == Z_NULL || strm->zfree == Z_NULL) return Z_STREAM_ERROR;
-
- strm->total_in = strm->total_out = 0;
- strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */
- strm->data_type = Z_UNKNOWN;
-
- s = (deflate_state *)strm->state;
- s->pending = 0;
- s->pending_out = s->pending_buf;
-
- if (s->noheader < 0) {
- s->noheader = 0; /* was set to -1 by deflate(..., Z_FINISH); */
- }
- s->status = s->noheader ? BUSY_STATE : INIT_STATE;
- strm->adler = 1;
- s->last_flush = Z_NO_FLUSH;
-
- _tr_init(s);
- lm_init(s);
-
- return Z_OK;
-}
-
-/* ========================================================================= */
-int deflateParams(strm, level, strategy)
- z_streamp strm;
- int level;
- int strategy;
-{
- deflate_state *s;
- compress_func func;
- int err = Z_OK;
-
- if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
- s = (deflate_state *) strm->state;
-
- if (level == Z_DEFAULT_COMPRESSION) {
- level = 6;
- }
- if (level < 0 || level > 9 || strategy < 0 || strategy > Z_HUFFMAN_ONLY) {
- return Z_STREAM_ERROR;
- }
- func = configuration_table[s->level].func;
-
- if (func != configuration_table[level].func && strm->total_in != 0) {
- /* Flush the last buffer: */
- err = deflate(strm, Z_PARTIAL_FLUSH);
- }
- if (s->level != level) {
- s->level = level;
- s->max_lazy_match = configuration_table[level].max_lazy;
- s->good_match = configuration_table[level].good_length;
- s->nice_match = configuration_table[level].nice_length;
- s->max_chain_length = configuration_table[level].max_chain;
- }
- s->strategy = strategy;
- return err;
-}
-
-/* =========================================================================
- * Put a short in the pending buffer. The 16-bit value is put in MSB order.
- * IN assertion: the stream state is correct and there is enough room in
- * pending_buf.
- */
-local void putShortMSB (s, b)
- deflate_state *s;
- uInt b;
-{
- put_byte(s, (Byte)(b >> 8));
- put_byte(s, (Byte)(b & 0xff));
-}
-
-/* =========================================================================
- * Flush as much pending output as possible. All deflate() output goes
- * through this function so some applications may wish to modify it
- * to avoid allocating a large strm->next_out buffer and copying into it.
- * (See also read_buf()).
- */
-local void flush_pending(strm)
- z_streamp strm;
-{
- deflate_state *s = (deflate_state *) strm->state;
- unsigned len = s->pending;
-
- if (len > strm->avail_out) len = strm->avail_out;
- if (len == 0) return;
-
- if (strm->next_out != Z_NULL) {
- zmemcpy(strm->next_out, s->pending_out, len);
- strm->next_out += len;
- }
- s->pending_out += len;
- strm->total_out += len;
- strm->avail_out -= len;
- s->pending -= len;
- if (s->pending == 0) {
- s->pending_out = s->pending_buf;
- }
-}
-
-/* ========================================================================= */
-int deflate (strm, flush)
- z_streamp strm;
- int flush;
-{
- int old_flush; /* value of flush param for previous deflate call */
- deflate_state *s;
-
- if (strm == Z_NULL || strm->state == Z_NULL ||
- flush > Z_FINISH || flush < 0) {
- return Z_STREAM_ERROR;
- }
- s = (deflate_state *) strm->state;
-
- if ((strm->next_in == Z_NULL && strm->avail_in != 0) ||
- (s->status == FINISH_STATE && flush != Z_FINISH)) {
- ERR_RETURN(strm, Z_STREAM_ERROR);
- }
- if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR);
-
- s->strm = strm; /* just in case */
- old_flush = s->last_flush;
- s->last_flush = flush;
-
- /* Write the zlib header */
- if (s->status == INIT_STATE) {
-
- uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8;
- uInt level_flags = (s->level-1) >> 1;
-
- if (level_flags > 3) level_flags = 3;
- header |= (level_flags << 6);
- if (s->strstart != 0) header |= PRESET_DICT;
- header += 31 - (header % 31);
-
- s->status = BUSY_STATE;
- putShortMSB(s, header);
-
- /* Save the adler32 of the preset dictionary: */
- if (s->strstart != 0) {
- putShortMSB(s, (uInt)(strm->adler >> 16));
- putShortMSB(s, (uInt)(strm->adler & 0xffff));
- }
- strm->adler = 1L;
- }
-
- /* Flush as much pending output as possible */
- if (s->pending != 0) {
- flush_pending(strm);
- if (strm->avail_out == 0) {
- /* Since avail_out is 0, deflate will be called again with
- * more output space, but possibly with both pending and
- * avail_in equal to zero. There won't be anything to do,
- * but this is not an error situation so make sure we
- * return OK instead of BUF_ERROR at next call of deflate:
- */
- s->last_flush = -1;
- return Z_OK;
- }
-
- /* Make sure there is something to do and avoid duplicate consecutive
- * flushes. For repeated and useless calls with Z_FINISH, we keep
- * returning Z_STREAM_END instead of Z_BUFF_ERROR.
- */
- } else if (strm->avail_in == 0 && flush <= old_flush &&
- flush != Z_FINISH) {
- ERR_RETURN(strm, Z_BUF_ERROR);
- }
-
- /* User must not provide more input after the first FINISH: */
- if (s->status == FINISH_STATE && strm->avail_in != 0) {
- ERR_RETURN(strm, Z_BUF_ERROR);
- }
-
- /* Start a new block or continue the current one.
- */
- if (strm->avail_in != 0 || s->lookahead != 0 ||
- (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) {
- block_state bstate;
-
- bstate = (*(configuration_table[s->level].func))(s, flush);
-
- if (bstate == finish_started || bstate == finish_done) {
- s->status = FINISH_STATE;
- }
- if (bstate == need_more || bstate == finish_started) {
- if (strm->avail_out == 0) {
- s->last_flush = -1; /* avoid BUF_ERROR next call, see above */
- }
- return Z_OK;
- /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
- * of deflate should use the same flush parameter to make sure
- * that the flush is complete. So we don't have to output an
- * empty block here, this will be done at next call. This also
- * ensures that for a very small output buffer, we emit at most
- * one empty block.
- */
- }
- if (bstate == block_done) {
- if (flush == Z_PARTIAL_FLUSH) {
- _tr_align(s);
- } else if (flush == Z_PACKET_FLUSH) {
- /* Output just the 3-bit `stored' block type value,
- but not a zero length. */
- _tr_stored_type_only(s);
- } else { /* FULL_FLUSH or SYNC_FLUSH */
- _tr_stored_block(s, (char*)0, 0L, 0);
- /* For a full flush, this empty block will be recognized
- * as a special marker by inflate_sync().
- */
- if (flush == Z_FULL_FLUSH) {
- CLEAR_HASH(s); /* forget history */
- }
- }
- flush_pending(strm);
- if (strm->avail_out == 0) {
- s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */
- return Z_OK;
- }
- }
- }
- Assert(strm->avail_out > 0, "bug2");
-
- if (flush != Z_FINISH) return Z_OK;
- if (s->noheader) return Z_STREAM_END;
-
- /* Write the zlib trailer (adler32) */
- putShortMSB(s, (uInt)(strm->adler >> 16));
- putShortMSB(s, (uInt)(strm->adler & 0xffff));
- flush_pending(strm);
- /* If avail_out is zero, the application will call deflate again
- * to flush the rest.
- */
- s->noheader = -1; /* write the trailer only once! */
- return s->pending != 0 ? Z_OK : Z_STREAM_END;
-}
-
-/* ========================================================================= */
-int deflateEnd (strm)
- z_streamp strm;
-{
- int status;
- deflate_state *s;
-
- if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR;
- s = (deflate_state *) strm->state;
-
- status = s->status;
- if (status != INIT_STATE && status != BUSY_STATE &&
- status != FINISH_STATE) {
- return Z_STREAM_ERROR;
- }
-
- /* Deallocate in reverse order of allocations: */
- TRY_FREE(strm, s->pending_buf);
- TRY_FREE(strm, s->head);
- TRY_FREE(strm, s->prev);
- TRY_FREE(strm, s->window);
-
- ZFREE(strm, s);
- strm->state = Z_NULL;
-
- return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK;
-}
-
-/* =========================================================================
- * Copy the source state to the destination state.
- */
-int deflateCopy (dest, source)
- z_streamp dest;
- z_streamp source;
-{
- deflate_state *ds;
- deflate_state *ss;
- ushf *overlay;
-
- if (source == Z_NULL || dest == Z_NULL || source->state == Z_NULL)
- return Z_STREAM_ERROR;
- ss = (deflate_state *) source->state;
-
- *dest = *source;
-
- ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state));
- if (ds == Z_NULL) return Z_MEM_ERROR;
- dest->state = (struct internal_state FAR *) ds;
- *ds = *ss;
- ds->strm = dest;
-
- ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));
- ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos));
- ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos));
- overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2);
- ds->pending_buf = (uchf *) overlay;
-
- if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL ||
- ds->pending_buf == Z_NULL) {
- deflateEnd (dest);
- return Z_MEM_ERROR;
- }
- /* ??? following zmemcpy doesn't work for 16-bit MSDOS */
- zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
- zmemcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos));
- zmemcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos));
- zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size);
-
- ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
- ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush);
- ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize;
-
- ds->l_desc.dyn_tree = ds->dyn_ltree;
- ds->d_desc.dyn_tree = ds->dyn_dtree;
- ds->bl_desc.dyn_tree = ds->bl_tree;
-
- return Z_OK;
-}
-
-/* ===========================================================================
- * Return the number of bytes of output which are immediately available
- * for output from the decompressor.
- */
-int deflateOutputPending (strm)
- z_streamp strm;
-{
- if (strm == Z_NULL || strm->state == Z_NULL) return 0;
-
- return ((deflate_state *)(strm->state))->pending;
-}
-
-/* ===========================================================================
- * Read a new buffer from the current input stream, update the adler32
- * and total number of bytes read. All deflate() input goes through
- * this function so some applications may wish to modify it to avoid
- * allocating a large strm->next_in buffer and copying from it.
- * (See also flush_pending()).
- */
-local int read_buf(strm, buf, size)
- z_streamp strm;
- charf *buf;
- unsigned size;
-{
- unsigned len = strm->avail_in;
-
- if (len > size) len = size;
- if (len == 0) return 0;
-
- strm->avail_in -= len;
-
- if (!((deflate_state *)(strm->state))->noheader) {
- strm->adler = adler32(strm->adler, strm->next_in, len);
- }
- zmemcpy(buf, strm->next_in, len);
- strm->next_in += len;
- strm->total_in += len;
-
- return (int)len;
-}
-
-/* ===========================================================================
- * Initialize the "longest match" routines for a new zlib stream
- */
-local void lm_init (s)
- deflate_state *s;
-{
- s->window_size = (ulg)2L*s->w_size;
-
- CLEAR_HASH(s);
-
- /* Set the default configuration parameters:
- */
- s->max_lazy_match = configuration_table[s->level].max_lazy;
- s->good_match = configuration_table[s->level].good_length;
- s->nice_match = configuration_table[s->level].nice_length;
- s->max_chain_length = configuration_table[s->level].max_chain;
-
- s->strstart = 0;
- s->block_start = 0L;
- s->lookahead = 0;
- s->match_length = s->prev_length = MIN_MATCH-1;
- s->match_available = 0;
- s->ins_h = 0;
-#ifdef ASMV
- match_init(); /* initialize the asm code */
-#endif
-}
-
-/* ===========================================================================
- * Set match_start to the longest match starting at the given string and
- * return its length. Matches shorter or equal to prev_length are discarded,
- * in which case the result is equal to prev_length and match_start is
- * garbage.
- * IN assertions: cur_match is the head of the hash chain for the current
- * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
- * OUT assertion: the match length is not greater than s->lookahead.
- */
-#ifndef ASMV
-/* For 80x86 and 680x0, an optimized version will be provided in match.asm or
- * match.S. The code will be functionally equivalent.
- */
-local uInt longest_match(s, cur_match)
- deflate_state *s;
- IPos cur_match; /* current match */
-{
- unsigned chain_length = s->max_chain_length;/* max hash chain length */
- register Bytef *scan = s->window + s->strstart; /* current string */
- register Bytef *match; /* matched string */
- register int len; /* length of current match */
- int best_len = s->prev_length; /* best match length so far */
- int nice_match = s->nice_match; /* stop if match long enough */
- IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
- s->strstart - (IPos)MAX_DIST(s) : NIL;
- /* Stop when cur_match becomes <= limit. To simplify the code,
- * we prevent matches with the string of window index 0.
- */
- Posf *prev = s->prev;
- uInt wmask = s->w_mask;
-
-#ifdef UNALIGNED_OK
- /* Compare two bytes at a time. Note: this is not always beneficial.
- * Try with and without -DUNALIGNED_OK to check.
- */
- register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1;
- register ush scan_start = *(ushf*)scan;
- register ush scan_end = *(ushf*)(scan+best_len-1);
-#else
- register Bytef *strend = s->window + s->strstart + MAX_MATCH;
- register Byte scan_end1 = scan[best_len-1];
- register Byte scan_end = scan[best_len];
-#endif
-
- /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
- * It is easy to get rid of this optimization if necessary.
- */
- Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
-
- /* Do not waste too much time if we already have a good match: */
- if (s->prev_length >= s->good_match) {
- chain_length >>= 2;
- }
- /* Do not look for matches beyond the end of the input. This is necessary
- * to make deflate deterministic.
- */
- if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
-
- Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
-
- do {
- Assert(cur_match < s->strstart, "no future");
- match = s->window + cur_match;
-
- /* Skip to next match if the match length cannot increase
- * or if the match length is less than 2:
- */
-#if (defined(UNALIGNED_OK) && MAX_MATCH == 258)
- /* This code assumes sizeof(unsigned short) == 2. Do not use
- * UNALIGNED_OK if your compiler uses a different size.
- */
- if (*(ushf*)(match+best_len-1) != scan_end ||
- *(ushf*)match != scan_start) continue;
-
- /* It is not necessary to compare scan[2] and match[2] since they are
- * always equal when the other bytes match, given that the hash keys
- * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at
- * strstart+3, +5, ... up to strstart+257. We check for insufficient
- * lookahead only every 4th comparison; the 128th check will be made
- * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is
- * necessary to put more guard bytes at the end of the window, or
- * to check more often for insufficient lookahead.
- */
- Assert(scan[2] == match[2], "scan[2]?");
- scan++, match++;
- do {
- } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
- *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
- *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
- *(ushf*)(scan+=2) == *(ushf*)(match+=2) &&
- scan < strend);
- /* The funny "do {}" generates better code on most compilers */
-
- /* Here, scan <= window+strstart+257 */
- Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
- if (*scan == *match) scan++;
-
- len = (MAX_MATCH - 1) - (int)(strend-scan);
- scan = strend - (MAX_MATCH-1);
-
-#else /* UNALIGNED_OK */
-
- if (match[best_len] != scan_end ||
- match[best_len-1] != scan_end1 ||
- *match != *scan ||
- *++match != scan[1]) continue;
-
- /* The check at best_len-1 can be removed because it will be made
- * again later. (This heuristic is not always a win.)
- * It is not necessary to compare scan[2] and match[2] since they
- * are always equal when the other bytes match, given that
- * the hash keys are equal and that HASH_BITS >= 8.
- */
- scan += 2, match++;
- Assert(*scan == *match, "match[2]?");
-
- /* We check for insufficient lookahead only every 8th comparison;
- * the 256th check will be made at strstart+258.
- */
- do {
- } while (*++scan == *++match && *++scan == *++match &&
- *++scan == *++match && *++scan == *++match &&
- *++scan == *++match && *++scan == *++match &&
- *++scan == *++match && *++scan == *++match &&
- scan < strend);
-
- Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
-
- len = MAX_MATCH - (int)(strend - scan);
- scan = strend - MAX_MATCH;
-
-#endif /* UNALIGNED_OK */
-
- if (len > best_len) {
- s->match_start = cur_match;
- best_len = len;
- if (len >= nice_match) break;
-#ifdef UNALIGNED_OK
- scan_end = *(ushf*)(scan+best_len-1);
-#else
- scan_end1 = scan[best_len-1];
- scan_end = scan[best_len];
-#endif
- }
- } while ((cur_match = prev[cur_match & wmask]) > limit
- && --chain_length != 0);
-
- if ((uInt)best_len <= s->lookahead) return best_len;
- return s->lookahead;
-}
-#endif /* ASMV */
-
-#ifdef DEBUG_ZLIB
-/* ===========================================================================
- * Check that the match at match_start is indeed a match.
- */
-local void check_match(s, start, match, length)
- deflate_state *s;
- IPos start, match;
- int length;
-{
- /* check that the match is indeed a match */
- if (zmemcmp((charf *)s->window + match,
- (charf *)s->window + start, length) != EQUAL) {
- fprintf(stderr, " start %u, match %u, length %d\n",
- start, match, length);
- do {
- fprintf(stderr, "%c%c", s->window[match++], s->window[start++]);
- } while (--length != 0);
- z_error("invalid match");
- }
- if (z_verbose > 1) {
- fprintf(stderr,"\\[%d,%d]", start-match, length);
- do { putc(s->window[start++], stderr); } while (--length != 0);
- }
-}
-#else
-# define check_match(s, start, match, length)
-#endif
-
-/* ===========================================================================
- * Fill the window when the lookahead becomes insufficient.
- * Updates strstart and lookahead.
- *
- * IN assertion: lookahead < MIN_LOOKAHEAD
- * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
- * At least one byte has been read, or avail_in == 0; reads are
- * performed for at least two bytes (required for the zip translate_eol
- * option -- not supported here).
- */
-local void fill_window(s)
- deflate_state *s;
-{
- register unsigned n, m;
- register Posf *p;
- unsigned more; /* Amount of free space at the end of the window. */
- uInt wsize = s->w_size;
-
- do {
- more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
-
- /* Deal with !@#$% 64K limit: */
- if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
- more = wsize;
-
- } else if (more == (unsigned)(-1)) {
- /* Very unlikely, but possible on 16 bit machine if strstart == 0
- * and lookahead == 1 (input done one byte at time)
- */
- more--;
-
- /* If the window is almost full and there is insufficient lookahead,
- * move the upper half to the lower one to make room in the upper half.
- */
- } else if (s->strstart >= wsize+MAX_DIST(s)) {
-
- zmemcpy((charf *)s->window, (charf *)s->window+wsize,
- (unsigned)wsize);
- s->match_start -= wsize;
- s->strstart -= wsize; /* we now have strstart >= MAX_DIST */
- s->block_start -= (long) wsize;
-
- /* Slide the hash table (could be avoided with 32 bit values
- at the expense of memory usage). We slide even when level == 0
- to keep the hash table consistent if we switch back to level > 0
- later. (Using level 0 permanently is not an optimal usage of
- zlib, so we don't care about this pathological case.)
- */
- n = s->hash_size;
- p = &s->head[n];
- do {
- m = *--p;
- *p = (Pos)(m >= wsize ? m-wsize : NIL);
- } while (--n);
-
- n = wsize;
- p = &s->prev[n];
- do {
- m = *--p;
- *p = (Pos)(m >= wsize ? m-wsize : NIL);
- /* If n is not on any hash chain, prev[n] is garbage but
- * its value will never be used.
- */
- } while (--n);
- more += wsize;
- }
- if (s->strm->avail_in == 0) return;
-
- /* If there was no sliding:
- * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
- * more == window_size - lookahead - strstart
- * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
- * => more >= window_size - 2*WSIZE + 2
- * In the BIG_MEM or MMAP case (not yet supported),
- * window_size == input_size + MIN_LOOKAHEAD &&
- * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
- * Otherwise, window_size == 2*WSIZE so more >= 2.
- * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
- */
- Assert(more >= 2, "more < 2");
-
- n = read_buf(s->strm, (charf *)s->window + s->strstart + s->lookahead,
- more);
- s->lookahead += n;
-
- /* Initialize the hash value now that we have some input: */
- if (s->lookahead >= MIN_MATCH) {
- s->ins_h = s->window[s->strstart];
- UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
-#if MIN_MATCH != 3
- Call UPDATE_HASH() MIN_MATCH-3 more times
-#endif
- }
- /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
- * but this is not important since only literal bytes will be emitted.
- */
-
- } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0);
-}
-
-/* ===========================================================================
- * Flush the current block, with given end-of-file flag.
- * IN assertion: strstart is set to the end of the current match.
- */
-#define FLUSH_BLOCK_ONLY(s, eof) { \
- _tr_flush_block(s, (s->block_start >= 0L ? \
- (charf *)&s->window[(unsigned)s->block_start] : \
- (charf *)Z_NULL), \
- (ulg)((long)s->strstart - s->block_start), \
- (eof)); \
- s->block_start = s->strstart; \
- flush_pending(s->strm); \
- Tracev((stderr,"[FLUSH]")); \
-}
-
-/* Same but force premature exit if necessary. */
-#define FLUSH_BLOCK(s, eof) { \
- FLUSH_BLOCK_ONLY(s, eof); \
- if (s->strm->avail_out == 0) return (eof) ? finish_started : need_more; \
-}
-
-/* ===========================================================================
- * Copy without compression as much as possible from the input stream, return
- * the current block state.
- * This function does not insert new strings in the dictionary since
- * uncompressible data is probably not useful. This function is used
- * only for the level=0 compression option.
- * NOTE: this function should be optimized to avoid extra copying from
- * window to pending_buf.
- */
-local block_state deflate_stored(s, flush)
- deflate_state *s;
- int flush;
-{
- /* Stored blocks are limited to 0xffff bytes, pending_buf is limited
- * to pending_buf_size, and each stored block has a 5 byte header:
- */
- ulg max_block_size = 0xffff;
- ulg max_start;
-
- if (max_block_size > s->pending_buf_size - 5) {
- max_block_size = s->pending_buf_size - 5;
- }
-
- /* Copy as much as possible from input to output: */
- for (;;) {
- /* Fill the window as much as possible: */
- if (s->lookahead <= 1) {
-
- Assert(s->strstart < s->w_size+MAX_DIST(s) ||
- s->block_start >= (long)s->w_size, "slide too late");
-
- fill_window(s);
- if (s->lookahead == 0 && flush == Z_NO_FLUSH) return need_more;
-
- if (s->lookahead == 0) break; /* flush the current block */
- }
- Assert(s->block_start >= 0L, "block gone");
-
- s->strstart += s->lookahead;
- s->lookahead = 0;
-
- /* Emit a stored block if pending_buf will be full: */
- max_start = s->block_start + max_block_size;
- if (s->strstart == 0 || (ulg)s->strstart >= max_start) {
- /* strstart == 0 is possible when wraparound on 16-bit machine */
- s->lookahead = (uInt)(s->strstart - max_start);
- s->strstart = (uInt)max_start;
- FLUSH_BLOCK(s, 0);
- }
- /* Flush if we may have to slide, otherwise block_start may become
- * negative and the data will be gone:
- */
- if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) {
- FLUSH_BLOCK(s, 0);
- }
- }
- FLUSH_BLOCK(s, flush == Z_FINISH);
- return flush == Z_FINISH ? finish_done : block_done;
-}
-
-/* ===========================================================================
- * Compress as much as possible from the input stream, return the current
- * block state.
- * This function does not perform lazy evaluation of matches and inserts
- * new strings in the dictionary only for unmatched strings or for short
- * matches. It is used only for the fast compression options.
- */
-local block_state deflate_fast(s, flush)
- deflate_state *s;
- int flush;
-{
- IPos hash_head = NIL; /* head of the hash chain */
- int bflush; /* set if current block must be flushed */
-
- for (;;) {
- /* Make sure that we always have enough lookahead, except
- * at the end of the input file. We need MAX_MATCH bytes
- * for the next match, plus MIN_MATCH bytes to insert the
- * string following the next match.
- */
- if (s->lookahead < MIN_LOOKAHEAD) {
- fill_window(s);
- if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
- return need_more;
- }
- if (s->lookahead == 0) break; /* flush the current block */
- }
-
- /* Insert the string window[strstart .. strstart+2] in the
- * dictionary, and set hash_head to the head of the hash chain:
- */
- if (s->lookahead >= MIN_MATCH) {
- INSERT_STRING(s, s->strstart, hash_head);
- }
-
- /* Find the longest match, discarding those <= prev_length.
- * At this point we have always match_length < MIN_MATCH
- */
- if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) {
- /* To simplify the code, we prevent matches with the string
- * of window index 0 (in particular we have to avoid a match
- * of the string with itself at the start of the input file).
- */
- if (s->strategy != Z_HUFFMAN_ONLY) {
- s->match_length = longest_match (s, hash_head);
- }
- /* longest_match() sets match_start */
- }
- if (s->match_length >= MIN_MATCH) {
- check_match(s, s->strstart, s->match_start, s->match_length);
-
- bflush = _tr_tally(s, s->strstart - s->match_start,
- s->match_length - MIN_MATCH);
-
- s->lookahead -= s->match_length;
-
- /* Insert new strings in the hash table only if the match length
- * is not too large. This saves time but degrades compression.
- */
- if (s->match_length <= s->max_insert_length &&
- s->lookahead >= MIN_MATCH) {
- s->match_length--; /* string at strstart already in hash table */
- do {
- s->strstart++;
- INSERT_STRING(s, s->strstart, hash_head);
- /* strstart never exceeds WSIZE-MAX_MATCH, so there are
- * always MIN_MATCH bytes ahead.
- */
- } while (--s->match_length != 0);
- s->strstart++;
- } else {
- s->strstart += s->match_length;
- s->match_length = 0;
- s->ins_h = s->window[s->strstart];
- UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]);
-#if MIN_MATCH != 3
- Call UPDATE_HASH() MIN_MATCH-3 more times
-#endif
- /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
- * matter since it will be recomputed at next deflate call.
- */
- }
- } else {
- /* No match, output a literal byte */
- Tracevv((stderr,"%c", s->window[s->strstart]));
- bflush = _tr_tally (s, 0, s->window[s->strstart]);
- s->lookahead--;
- s->strstart++;
- }
- if (bflush) FLUSH_BLOCK(s, 0);
- }
- FLUSH_BLOCK(s, flush == Z_FINISH);
- return flush == Z_FINISH ? finish_done : block_done;
-}
-
-/* ===========================================================================
- * Same as above, but achieves better compression. We use a lazy
- * evaluation for matches: a match is finally adopted only if there is
- * no better match at the next window position.
- */
-local block_state deflate_slow(s, flush)
- deflate_state *s;
- int flush;
-{
- IPos hash_head = NIL; /* head of hash chain */
- int bflush; /* set if current block must be flushed */
-
- /* Process the input block. */
- for (;;) {
- /* Make sure that we always have enough lookahead, except
- * at the end of the input file. We need MAX_MATCH bytes
- * for the next match, plus MIN_MATCH bytes to insert the
- * string following the next match.
- */
- if (s->lookahead < MIN_LOOKAHEAD) {
- fill_window(s);
- if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) {
- return need_more;
- }
- if (s->lookahead == 0) break; /* flush the current block */
- }
-
- /* Insert the string window[strstart .. strstart+2] in the
- * dictionary, and set hash_head to the head of the hash chain:
- */
- if (s->lookahead >= MIN_MATCH) {
- INSERT_STRING(s, s->strstart, hash_head);
- }
-
- /* Find the longest match, discarding those <= prev_length.
- */
- s->prev_length = s->match_length, s->prev_match = s->match_start;
- s->match_length = MIN_MATCH-1;
-
- if (hash_head != NIL && s->prev_length < s->max_lazy_match &&
- s->strstart - hash_head <= MAX_DIST(s)) {
- /* To simplify the code, we prevent matches with the string
- * of window index 0 (in particular we have to avoid a match
- * of the string with itself at the start of the input file).
- */
- if (s->strategy != Z_HUFFMAN_ONLY) {
- s->match_length = longest_match (s, hash_head);
- }
- /* longest_match() sets match_start */
-
- if (s->match_length <= 5 && (s->strategy == Z_FILTERED ||
- (s->match_length == MIN_MATCH &&
- s->strstart - s->match_start > TOO_FAR))) {
-
- /* If prev_match is also MIN_MATCH, match_start is garbage
- * but we will ignore the current match anyway.
- */
- s->match_length = MIN_MATCH-1;
- }
- }
- /* If there was a match at the previous step and the current
- * match is not better, output the previous match:
- */
- if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) {
- uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
- /* Do not insert strings in hash table beyond this. */
-
- check_match(s, s->strstart-1, s->prev_match, s->prev_length);
-
- bflush = _tr_tally(s, s->strstart -1 - s->prev_match,
- s->prev_length - MIN_MATCH);
-
- /* Insert in hash table all strings up to the end of the match.
- * strstart-1 and strstart are already inserted. If there is not
- * enough lookahead, the last two strings are not inserted in
- * the hash table.
- */
- s->lookahead -= s->prev_length-1;
- s->prev_length -= 2;
- do {
- if (++s->strstart <= max_insert) {
- INSERT_STRING(s, s->strstart, hash_head);
- }
- } while (--s->prev_length != 0);
- s->match_available = 0;
- s->match_length = MIN_MATCH-1;
- s->strstart++;
-
- if (bflush) FLUSH_BLOCK(s, 0);
-
- } else if (s->match_available) {
- /* If there was no match at the previous position, output a
- * single literal. If there was a match but the current match
- * is longer, truncate the previous match to a single literal.
- */
- Tracevv((stderr,"%c", s->window[s->strstart-1]));
- if (_tr_tally (s, 0, s->window[s->strstart-1])) {
- FLUSH_BLOCK_ONLY(s, 0);
- }
- s->strstart++;
- s->lookahead--;
- if (s->strm->avail_out == 0) return need_more;
- } else {
- /* There is no previous match to compare with, wait for
- * the next step to decide.
- */
- s->match_available = 1;
- s->strstart++;
- s->lookahead--;
- }
- }
- Assert (flush != Z_NO_FLUSH, "no flush?");
- if (s->match_available) {
- Tracevv((stderr,"%c", s->window[s->strstart-1]));
- _tr_tally (s, 0, s->window[s->strstart-1]);
- s->match_available = 0;
- }
- FLUSH_BLOCK(s, flush == Z_FINISH);
- return flush == Z_FINISH ? finish_done : block_done;
-}
-/* --- deflate.c */
-
-/* +++ trees.c */
-/* trees.c -- output deflated data using Huffman coding
- * Copyright (C) 1995-1996 Jean-loup Gailly
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/*
- * ALGORITHM
- *
- * The "deflation" process uses several Huffman trees. The more
- * common source values are represented by shorter bit sequences.
- *
- * Each code tree is stored in a compressed form which is itself
- * a Huffman encoding of the lengths of all the code strings (in
- * ascending order by source values). The actual code strings are
- * reconstructed from the lengths in the inflate process, as described
- * in the deflate specification.
- *
- * REFERENCES
- *
- * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification".
- * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc
- *
- * Storer, James A.
- * Data Compression: Methods and Theory, pp. 49-50.
- * Computer Science Press, 1988. ISBN 0-7167-8156-5.
- *
- * Sedgewick, R.
- * Algorithms, p290.
- * Addison-Wesley, 1983. ISBN 0-201-06672-6.
- */
-
-/* From: trees.c,v 1.11 1996/07/24 13:41:06 me Exp $ */
-
-/* #include "deflate.h" */
-
-#ifdef DEBUG_ZLIB
-# include <ctype.h>
-#endif
-
-/* ===========================================================================
- * Constants
- */
-
-#define MAX_BL_BITS 7
-/* Bit length codes must not exceed MAX_BL_BITS bits */
-
-#define END_BLOCK 256
-/* end of block literal code */
-
-#define REP_3_6 16
-/* repeat previous bit length 3-6 times (2 bits of repeat count) */
-
-#define REPZ_3_10 17
-/* repeat a zero length 3-10 times (3 bits of repeat count) */
-
-#define REPZ_11_138 18
-/* repeat a zero length 11-138 times (7 bits of repeat count) */
-
-local int extra_lbits[LENGTH_CODES] /* extra bits for each length code */
- = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0};
-
-local int extra_dbits[D_CODES] /* extra bits for each distance code */
- = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
-
-local int extra_blbits[BL_CODES]/* extra bits for each bit length code */
- = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7};
-
-local uch bl_order[BL_CODES]
- = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15};
-/* The lengths of the bit length codes are sent in order of decreasing
- * probability, to avoid transmitting the lengths for unused bit length codes.
- */
-
-#define Buf_size (8 * 2*sizeof(char))
-/* Number of bits used within bi_buf. (bi_buf might be implemented on
- * more than 16 bits on some systems.)
- */
-
-/* ===========================================================================
- * Local data. These are initialized only once.
- */
-
-local ct_data static_ltree[L_CODES+2];
-/* The static literal tree. Since the bit lengths are imposed, there is no
- * need for the L_CODES extra codes used during heap construction. However
- * The codes 286 and 287 are needed to build a canonical tree (see _tr_init
- * below).
- */
-
-local ct_data static_dtree[D_CODES];
-/* The static distance tree. (Actually a trivial tree since all codes use
- * 5 bits.)
- */
-
-local uch dist_code[512];
-/* distance codes. The first 256 values correspond to the distances
- * 3 .. 258, the last 256 values correspond to the top 8 bits of
- * the 15 bit distances.
- */
-
-local uch length_code[MAX_MATCH-MIN_MATCH+1];
-/* length code for each normalized match length (0 == MIN_MATCH) */
-
-local int base_length[LENGTH_CODES];
-/* First normalized length for each code (0 = MIN_MATCH) */
-
-local int base_dist[D_CODES];
-/* First normalized distance for each code (0 = distance of 1) */
-
-struct static_tree_desc_s {
- ct_data *static_tree; /* static tree or NULL */
- intf *extra_bits; /* extra bits for each code or NULL */
- int extra_base; /* base index for extra_bits */
- int elems; /* max number of elements in the tree */
- int max_length; /* max bit length for the codes */
-};
-
-local static_tree_desc static_l_desc =
-{static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS};
-
-local static_tree_desc static_d_desc =
-{static_dtree, extra_dbits, 0, D_CODES, MAX_BITS};
-
-local static_tree_desc static_bl_desc =
-{(ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS};
-
-/* ===========================================================================
- * Local (static) routines in this file.
- */
-
-local void tr_static_init OF((void));
-local void init_block OF((deflate_state *s));
-local void pqdownheap OF((deflate_state *s, ct_data *tree, int k));
-local void gen_bitlen OF((deflate_state *s, tree_desc *desc));
-local void gen_codes OF((ct_data *tree, int max_code, ushf *bl_count));
-local void build_tree OF((deflate_state *s, tree_desc *desc));
-local void scan_tree OF((deflate_state *s, ct_data *tree, int max_code));
-local void send_tree OF((deflate_state *s, ct_data *tree, int max_code));
-local int build_bl_tree OF((deflate_state *s));
-local void send_all_trees OF((deflate_state *s, int lcodes, int dcodes,
- int blcodes));
-local void compress_block OF((deflate_state *s, ct_data *ltree,
- ct_data *dtree));
-local void set_data_type OF((deflate_state *s));
-local unsigned bi_reverse OF((unsigned value, int length));
-local void bi_windup OF((deflate_state *s));
-local void bi_flush OF((deflate_state *s));
-local void copy_block OF((deflate_state *s, charf *buf, unsigned len,
- int header));
-
-#ifndef DEBUG_ZLIB
-# define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len)
- /* Send a code of the given tree. c and tree must not have side effects */
-
-#else /* DEBUG_ZLIB */
-# define send_code(s, c, tree) \
- { if (verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \
- send_bits(s, tree[c].Code, tree[c].Len); }
-#endif
-
-#define d_code(dist) \
- ((dist) < 256 ? dist_code[dist] : dist_code[256+((dist)>>7)])
-/* Mapping from a distance to a distance code. dist is the distance - 1 and
- * must not have side effects. dist_code[256] and dist_code[257] are never
- * used.
- */
-
-/* ===========================================================================
- * Output a short LSB first on the stream.
- * IN assertion: there is enough room in pendingBuf.
- */
-#define put_short(s, w) { \
- put_byte(s, (uch)((w) & 0xff)); \
- put_byte(s, (uch)((ush)(w) >> 8)); \
-}
-
-/* ===========================================================================
- * Send a value on a given number of bits.
- * IN assertion: length <= 16 and value fits in length bits.
- */
-#ifdef DEBUG_ZLIB
-local void send_bits OF((deflate_state *s, int value, int length));
-
-local void send_bits(s, value, length)
- deflate_state *s;
- int value; /* value to send */
- int length; /* number of bits */
-{
- Tracevv((stderr," l %2d v %4x ", length, value));
- Assert(length > 0 && length <= 15, "invalid length");
- s->bits_sent += (ulg)length;
-
- /* If not enough room in bi_buf, use (valid) bits from bi_buf and
- * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid))
- * unused bits in value.
- */
- if (s->bi_valid > (int)Buf_size - length) {
- s->bi_buf |= (value << s->bi_valid);
- put_short(s, s->bi_buf);
- s->bi_buf = (ush)value >> (Buf_size - s->bi_valid);
- s->bi_valid += length - Buf_size;
- } else {
- s->bi_buf |= value << s->bi_valid;
- s->bi_valid += length;
- }
-}
-#else /* !DEBUG_ZLIB */
-
-#define send_bits(s, value, length) \
-{ int len = length;\
- if (s->bi_valid > (int)Buf_size - len) {\
- int val = value;\
- s->bi_buf |= (val << s->bi_valid);\
- put_short(s, s->bi_buf);\
- s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\
- s->bi_valid += len - Buf_size;\
- } else {\
- s->bi_buf |= (value) << s->bi_valid;\
- s->bi_valid += len;\
- }\
-}
-#endif /* DEBUG_ZLIB */
-
-
-#define MAX(a,b) (a >= b ? a : b)
-/* the arguments must not have side effects */
-
-/* ===========================================================================
- * Initialize the various 'constant' tables. In a multi-threaded environment,
- * this function may be called by two threads concurrently, but this is
- * harmless since both invocations do exactly the same thing.
- */
-local void tr_static_init()
-{
- static int static_init_done;
- int n; /* iterates over tree elements */
- int bits; /* bit counter */
- int length; /* length value */
- int code; /* code value */
- int dist; /* distance index */
- ush bl_count[MAX_BITS+1];
- /* number of codes at each bit length for an optimal tree */
-
- if (static_init_done) return;
-
- /* Initialize the mapping length (0..255) -> length code (0..28) */
- length = 0;
- for (code = 0; code < LENGTH_CODES-1; code++) {
- base_length[code] = length;
- for (n = 0; n < (1<<extra_lbits[code]); n++) {
- length_code[length++] = (uch)code;
- }
- }
- Assert (length == 256, "tr_static_init: length != 256");
- /* Note that the length 255 (match length 258) can be represented
- * in two different ways: code 284 + 5 bits or code 285, so we
- * overwrite length_code[255] to use the best encoding:
- */
- length_code[length-1] = (uch)code;
-
- /* Initialize the mapping dist (0..32K) -> dist code (0..29) */
- dist = 0;
- for (code = 0 ; code < 16; code++) {
- base_dist[code] = dist;
- for (n = 0; n < (1<<extra_dbits[code]); n++) {
- dist_code[dist++] = (uch)code;
- }
- }
- Assert (dist == 256, "tr_static_init: dist != 256");
- dist >>= 7; /* from now on, all distances are divided by 128 */
- for ( ; code < D_CODES; code++) {
- base_dist[code] = dist << 7;
- for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) {
- dist_code[256 + dist++] = (uch)code;
- }
- }
- Assert (dist == 256, "tr_static_init: 256+dist != 512");
-
- /* Construct the codes of the static literal tree */
- for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0;
- n = 0;
- while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++;
- while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++;
- while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++;
- while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++;
- /* Codes 286 and 287 do not exist, but we must include them in the
- * tree construction to get a canonical Huffman tree (longest code
- * all ones)
- */
- gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count);
-
- /* The static distance tree is trivial: */
- for (n = 0; n < D_CODES; n++) {
- static_dtree[n].Len = 5;
- static_dtree[n].Code = bi_reverse((unsigned)n, 5);
- }
- static_init_done = 1;
-}
-
-/* ===========================================================================
- * Initialize the tree data structures for a new zlib stream.
- */
-void _tr_init(s)
- deflate_state *s;
-{
- tr_static_init();
-
- s->compressed_len = 0L;
-
- s->l_desc.dyn_tree = s->dyn_ltree;
- s->l_desc.stat_desc = &static_l_desc;
-
- s->d_desc.dyn_tree = s->dyn_dtree;
- s->d_desc.stat_desc = &static_d_desc;
-
- s->bl_desc.dyn_tree = s->bl_tree;
- s->bl_desc.stat_desc = &static_bl_desc;
-
- s->bi_buf = 0;
- s->bi_valid = 0;
- s->last_eob_len = 8; /* enough lookahead for inflate */
-#ifdef DEBUG_ZLIB
- s->bits_sent = 0L;
-#endif
-
- /* Initialize the first block of the first file: */
- init_block(s);
-}
-
-/* ===========================================================================
- * Initialize a new block.
- */
-local void init_block(s)
- deflate_state *s;
-{
- int n; /* iterates over tree elements */
-
- /* Initialize the trees. */
- for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0;
- for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0;
- for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0;
-
- s->dyn_ltree[END_BLOCK].Freq = 1;
- s->opt_len = s->static_len = 0L;
- s->last_lit = s->matches = 0;
-}
-
-#define SMALLEST 1
-/* Index within the heap array of least frequent node in the Huffman tree */
-
-
-/* ===========================================================================
- * Remove the smallest element from the heap and recreate the heap with
- * one less element. Updates heap and heap_len.
- */
-#define pqremove(s, tree, top) \
-{\
- top = s->heap[SMALLEST]; \
- s->heap[SMALLEST] = s->heap[s->heap_len--]; \
- pqdownheap(s, tree, SMALLEST); \
-}
-
-/* ===========================================================================
- * Compares to subtrees, using the tree depth as tie breaker when
- * the subtrees have equal frequency. This minimizes the worst case length.
- */
-#define smaller(tree, n, m, depth) \
- (tree[n].Freq < tree[m].Freq || \
- (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m]))
-
-/* ===========================================================================
- * Restore the heap property by moving down the tree starting at node k,
- * exchanging a node with the smallest of its two sons if necessary, stopping
- * when the heap property is re-established (each father smaller than its
- * two sons).
- */
-local void pqdownheap(s, tree, k)
- deflate_state *s;
- ct_data *tree; /* the tree to restore */
- int k; /* node to move down */
-{
- int v = s->heap[k];
- int j = k << 1; /* left son of k */
- while (j <= s->heap_len) {
- /* Set j to the smallest of the two sons: */
- if (j < s->heap_len &&
- smaller(tree, s->heap[j+1], s->heap[j], s->depth)) {
- j++;
- }
- /* Exit if v is smaller than both sons */
- if (smaller(tree, v, s->heap[j], s->depth)) break;
-
- /* Exchange v with the smallest son */
- s->heap[k] = s->heap[j]; k = j;
-
- /* And continue down the tree, setting j to the left son of k */
- j <<= 1;
- }
- s->heap[k] = v;
-}
-
-/* ===========================================================================
- * Compute the optimal bit lengths for a tree and update the total bit length
- * for the current block.
- * IN assertion: the fields freq and dad are set, heap[heap_max] and
- * above are the tree nodes sorted by increasing frequency.
- * OUT assertions: the field len is set to the optimal bit length, the
- * array bl_count contains the frequencies for each bit length.
- * The length opt_len is updated; static_len is also updated if stree is
- * not null.
- */
-local void gen_bitlen(s, desc)
- deflate_state *s;
- tree_desc *desc; /* the tree descriptor */
-{
- ct_data *tree = desc->dyn_tree;
- int max_code = desc->max_code;
- ct_data *stree = desc->stat_desc->static_tree;
- intf *extra = desc->stat_desc->extra_bits;
- int base = desc->stat_desc->extra_base;
- int max_length = desc->stat_desc->max_length;
- int h; /* heap index */
- int n, m; /* iterate over the tree elements */
- int bits; /* bit length */
- int xbits; /* extra bits */
- ush f; /* frequency */
- int overflow = 0; /* number of elements with bit length too large */
-
- for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0;
-
- /* In a first pass, compute the optimal bit lengths (which may
- * overflow in the case of the bit length tree).
- */
- tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */
-
- for (h = s->heap_max+1; h < HEAP_SIZE; h++) {
- n = s->heap[h];
- bits = tree[tree[n].Dad].Len + 1;
- if (bits > max_length) bits = max_length, overflow++;
- tree[n].Len = (ush)bits;
- /* We overwrite tree[n].Dad which is no longer needed */
-
- if (n > max_code) continue; /* not a leaf node */
-
- s->bl_count[bits]++;
- xbits = 0;
- if (n >= base) xbits = extra[n-base];
- f = tree[n].Freq;
- s->opt_len += (ulg)f * (bits + xbits);
- if (stree) s->static_len += (ulg)f * (stree[n].Len + xbits);
- }
- if (overflow == 0) return;
-
- Trace((stderr,"\nbit length overflow\n"));
- /* This happens for example on obj2 and pic of the Calgary corpus */
-
- /* Find the first bit length which could increase: */
- do {
- bits = max_length-1;
- while (s->bl_count[bits] == 0) bits--;
- s->bl_count[bits]--; /* move one leaf down the tree */
- s->bl_count[bits+1] += 2; /* move one overflow item as its brother */
- s->bl_count[max_length]--;
- /* The brother of the overflow item also moves one step up,
- * but this does not affect bl_count[max_length]
- */
- overflow -= 2;
- } while (overflow > 0);
-
- /* Now recompute all bit lengths, scanning in increasing frequency.
- * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all
- * lengths instead of fixing only the wrong ones. This idea is taken
- * from 'ar' written by Haruhiko Okumura.)
- */
- for (bits = max_length; bits != 0; bits--) {
- n = s->bl_count[bits];
- while (n != 0) {
- m = s->heap[--h];
- if (m > max_code) continue;
- if (tree[m].Len != (unsigned) bits) {
- Trace((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits));
- s->opt_len += ((long)bits - (long)tree[m].Len)
- *(long)tree[m].Freq;
- tree[m].Len = (ush)bits;
- }
- n--;
- }
- }
-}
-
-/* ===========================================================================
- * Generate the codes for a given tree and bit counts (which need not be
- * optimal).
- * IN assertion: the array bl_count contains the bit length statistics for
- * the given tree and the field len is set for all tree elements.
- * OUT assertion: the field code is set for all tree elements of non
- * zero code length.
- */
-local void gen_codes (tree, max_code, bl_count)
- ct_data *tree; /* the tree to decorate */
- int max_code; /* largest code with non zero frequency */
- ushf *bl_count; /* number of codes at each bit length */
-{
- ush next_code[MAX_BITS+1]; /* next code value for each bit length */
- ush code = 0; /* running code value */
- int bits; /* bit index */
- int n; /* code index */
-
- /* The distribution counts are first used to generate the code values
- * without bit reversal.
- */
- for (bits = 1; bits <= MAX_BITS; bits++) {
- next_code[bits] = code = (code + bl_count[bits-1]) << 1;
- }
- /* Check that the bit counts in bl_count are consistent. The last code
- * must be all ones.
- */
- Assert (code + bl_count[MAX_BITS]-1 == (1<<MAX_BITS)-1,
- "inconsistent bit counts");
- Tracev((stderr,"\ngen_codes: max_code %d ", max_code));
-
- for (n = 0; n <= max_code; n++) {
- int len = tree[n].Len;
- if (len == 0) continue;
- /* Now reverse the bits */
- tree[n].Code = bi_reverse(next_code[len]++, len);
-
- Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ",
- n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len]-1));
- }
-}
-
-/* ===========================================================================
- * Construct one Huffman tree and assigns the code bit strings and lengths.
- * Update the total bit length for the current block.
- * IN assertion: the field freq is set for all tree elements.
- * OUT assertions: the fields len and code are set to the optimal bit length
- * and corresponding code. The length opt_len is updated; static_len is
- * also updated if stree is not null. The field max_code is set.
- */
-local void build_tree(s, desc)
- deflate_state *s;
- tree_desc *desc; /* the tree descriptor */
-{
- ct_data *tree = desc->dyn_tree;
- ct_data *stree = desc->stat_desc->static_tree;
- int elems = desc->stat_desc->elems;
- int n, m; /* iterate over heap elements */
- int max_code = -1; /* largest code with non zero frequency */
- int node; /* new node being created */
-
- /* Construct the initial heap, with least frequent element in
- * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
- * heap[0] is not used.
- */
- s->heap_len = 0, s->heap_max = HEAP_SIZE;
-
- for (n = 0; n < elems; n++) {
- if (tree[n].Freq != 0) {
- s->heap[++(s->heap_len)] = max_code = n;
- s->depth[n] = 0;
- } else {
- tree[n].Len = 0;
- }
- }
-
- /* The pkzip format requires that at least one distance code exists,
- * and that at least one bit should be sent even if there is only one
- * possible code. So to avoid special checks later on we force at least
- * two codes of non zero frequency.
- */
- while (s->heap_len < 2) {
- node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0);
- tree[node].Freq = 1;
- s->depth[node] = 0;
- s->opt_len--; if (stree) s->static_len -= stree[node].Len;
- /* node is 0 or 1 so it does not have extra bits */
- }
- desc->max_code = max_code;
-
- /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
- * establish sub-heaps of increasing lengths:
- */
- for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n);
-
- /* Construct the Huffman tree by repeatedly combining the least two
- * frequent nodes.
- */
- node = elems; /* next internal node of the tree */
- do {
- pqremove(s, tree, n); /* n = node of least frequency */
- m = s->heap[SMALLEST]; /* m = node of next least frequency */
-
- s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */
- s->heap[--(s->heap_max)] = m;
-
- /* Create a new node father of n and m */
- tree[node].Freq = tree[n].Freq + tree[m].Freq;
- s->depth[node] = (uch) (MAX(s->depth[n], s->depth[m]) + 1);
- tree[n].Dad = tree[m].Dad = (ush)node;
-#ifdef DUMP_BL_TREE
- if (tree == s->bl_tree) {
- fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)",
- node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq);
- }
-#endif
- /* and insert the new node in the heap */
- s->heap[SMALLEST] = node++;
- pqdownheap(s, tree, SMALLEST);
-
- } while (s->heap_len >= 2);
-
- s->heap[--(s->heap_max)] = s->heap[SMALLEST];
-
- /* At this point, the fields freq and dad are set. We can now
- * generate the bit lengths.
- */
- gen_bitlen(s, (tree_desc *)desc);
-
- /* The field len is now set, we can generate the bit codes */
- gen_codes ((ct_data *)tree, max_code, s->bl_count);
-}
-
-/* ===========================================================================
- * Scan a literal or distance tree to determine the frequencies of the codes
- * in the bit length tree.
- */
-local void scan_tree (s, tree, max_code)
- deflate_state *s;
- ct_data *tree; /* the tree to be scanned */
- int max_code; /* and its largest code of non zero frequency */
-{
- int n; /* iterates over all tree elements */
- int prevlen = -1; /* last emitted length */
- int curlen; /* length of current code */
- int nextlen = tree[0].Len; /* length of next code */
- int count = 0; /* repeat count of the current code */
- int max_count = 7; /* max repeat count */
- int min_count = 4; /* min repeat count */
-
- if (nextlen == 0) max_count = 138, min_count = 3;
- tree[max_code+1].Len = (ush)0xffff; /* guard */
-
- for (n = 0; n <= max_code; n++) {
- curlen = nextlen; nextlen = tree[n+1].Len;
- if (++count < max_count && curlen == nextlen) {
- continue;
- } else if (count < min_count) {
- s->bl_tree[curlen].Freq += count;
- } else if (curlen != 0) {
- if (curlen != prevlen) s->bl_tree[curlen].Freq++;
- s->bl_tree[REP_3_6].Freq++;
- } else if (count <= 10) {
- s->bl_tree[REPZ_3_10].Freq++;
- } else {
- s->bl_tree[REPZ_11_138].Freq++;
- }
- count = 0; prevlen = curlen;
- if (nextlen == 0) {
- max_count = 138, min_count = 3;
- } else if (curlen == nextlen) {
- max_count = 6, min_count = 3;
- } else {
- max_count = 7, min_count = 4;
- }
- }
-}
-
-/* ===========================================================================
- * Send a literal or distance tree in compressed form, using the codes in
- * bl_tree.
- */
-local void send_tree (s, tree, max_code)
- deflate_state *s;
- ct_data *tree; /* the tree to be scanned */
- int max_code; /* and its largest code of non zero frequency */
-{
- int n; /* iterates over all tree elements */
- int prevlen = -1; /* last emitted length */
- int curlen; /* length of current code */
- int nextlen = tree[0].Len; /* length of next code */
- int count = 0; /* repeat count of the current code */
- int max_count = 7; /* max repeat count */
- int min_count = 4; /* min repeat count */
-
- /* tree[max_code+1].Len = -1; */ /* guard already set */
- if (nextlen == 0) max_count = 138, min_count = 3;
-
- for (n = 0; n <= max_code; n++) {
- curlen = nextlen; nextlen = tree[n+1].Len;
- if (++count < max_count && curlen == nextlen) {
- continue;
- } else if (count < min_count) {
- do { send_code(s, curlen, s->bl_tree); } while (--count != 0);
-
- } else if (curlen != 0) {
- if (curlen != prevlen) {
- send_code(s, curlen, s->bl_tree); count--;
- }
- Assert(count >= 3 && count <= 6, " 3_6?");
- send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2);
-
- } else if (count <= 10) {
- send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3);
-
- } else {
- send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7);
- }
- count = 0; prevlen = curlen;
- if (nextlen == 0) {
- max_count = 138, min_count = 3;
- } else if (curlen == nextlen) {
- max_count = 6, min_count = 3;
- } else {
- max_count = 7, min_count = 4;
- }
- }
-}
-
-/* ===========================================================================
- * Construct the Huffman tree for the bit lengths and return the index in
- * bl_order of the last bit length code to send.
- */
-local int build_bl_tree(s)
- deflate_state *s;
-{
- int max_blindex; /* index of last bit length code of non zero freq */
-
- /* Determine the bit length frequencies for literal and distance trees */
- scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code);
- scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code);
-
- /* Build the bit length tree: */
- build_tree(s, (tree_desc *)(&(s->bl_desc)));
- /* opt_len now includes the length of the tree representations, except
- * the lengths of the bit lengths codes and the 5+5+4 bits for the counts.
- */
-
- /* Determine the number of bit length codes to send. The pkzip format
- * requires that at least 4 bit length codes be sent. (appnote.txt says
- * 3 but the actual value used is 4.)
- */
- for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) {
- if (s->bl_tree[bl_order[max_blindex]].Len != 0) break;
- }
- /* Update opt_len to include the bit length tree and counts */
- s->opt_len += 3*(max_blindex+1) + 5+5+4;
- Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld",
- s->opt_len, s->static_len));
-
- return max_blindex;
-}
-
-/* ===========================================================================
- * Send the header for a block using dynamic Huffman trees: the counts, the
- * lengths of the bit length codes, the literal tree and the distance tree.
- * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
- */
-local void send_all_trees(s, lcodes, dcodes, blcodes)
- deflate_state *s;
- int lcodes, dcodes, blcodes; /* number of codes for each tree */
-{
- int rank; /* index in bl_order */
-
- Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes");
- Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,
- "too many codes");
- Tracev((stderr, "\nbl counts: "));
- send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */
- send_bits(s, dcodes-1, 5);
- send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */
- for (rank = 0; rank < blcodes; rank++) {
- Tracev((stderr, "\nbl code %2d ", bl_order[rank]));
- send_bits(s, s->bl_tree[bl_order[rank]].Len, 3);
- }
- Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent));
-
- send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */
- Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent));
-
- send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */
- Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent));
-}
-
-/* ===========================================================================
- * Send a stored block
- */
-void _tr_stored_block(s, buf, stored_len, eof)
- deflate_state *s;
- charf *buf; /* input block */
- ulg stored_len; /* length of input block */
- int eof; /* true if this is the last block for a file */
-{
- send_bits(s, (STORED_BLOCK<<1)+eof, 3); /* send block type */
- s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L;
- s->compressed_len += (stored_len + 4) << 3;
-
- copy_block(s, buf, (unsigned)stored_len, 1); /* with header */
-}
-
-/* Send just the `stored block' type code without any length bytes or data.
- */
-void _tr_stored_type_only(s)
- deflate_state *s;
-{
- send_bits(s, (STORED_BLOCK << 1), 3);
- bi_windup(s);
- s->compressed_len = (s->compressed_len + 3) & ~7L;
-}
-
-
-/* ===========================================================================
- * Send one empty static block to give enough lookahead for inflate.
- * This takes 10 bits, of which 7 may remain in the bit buffer.
- * The current inflate code requires 9 bits of lookahead. If the
- * last two codes for the previous block (real code plus EOB) were coded
- * on 5 bits or less, inflate may have only 5+3 bits of lookahead to decode
- * the last real code. In this case we send two empty static blocks instead
- * of one. (There are no problems if the previous block is stored or fixed.)
- * To simplify the code, we assume the worst case of last real code encoded
- * on one bit only.
- */
-void _tr_align(s)
- deflate_state *s;
-{
- send_bits(s, STATIC_TREES<<1, 3);
- send_code(s, END_BLOCK, static_ltree);
- s->compressed_len += 10L; /* 3 for block type, 7 for EOB */
- bi_flush(s);
- /* Of the 10 bits for the empty block, we have already sent
- * (10 - bi_valid) bits. The lookahead for the last real code (before
- * the EOB of the previous block) was thus at least one plus the length
- * of the EOB plus what we have just sent of the empty static block.
- */
- if (1 + s->last_eob_len + 10 - s->bi_valid < 9) {
- send_bits(s, STATIC_TREES<<1, 3);
- send_code(s, END_BLOCK, static_ltree);
- s->compressed_len += 10L;
- bi_flush(s);
- }
- s->last_eob_len = 7;
-}
-
-/* ===========================================================================
- * Determine the best encoding for the current block: dynamic trees, static
- * trees or store, and output the encoded block to the zip file. This function
- * returns the total compressed length for the file so far.
- */
-ulg _tr_flush_block(s, buf, stored_len, eof)
- deflate_state *s;
- charf *buf; /* input block, or NULL if too old */
- ulg stored_len; /* length of input block */
- int eof; /* true if this is the last block for a file */
-{
- ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */
- int max_blindex = 0; /* index of last bit length code of non zero freq */
-
- /* Build the Huffman trees unless a stored block is forced */
- if (s->level > 0) {
-
- /* Check if the file is ascii or binary */
- if (s->data_type == Z_UNKNOWN) set_data_type(s);
-
- /* Construct the literal and distance trees */
- build_tree(s, (tree_desc *)(&(s->l_desc)));
- Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len,
- s->static_len));
-
- build_tree(s, (tree_desc *)(&(s->d_desc)));
- Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len,
- s->static_len));
- /* At this point, opt_len and static_len are the total bit lengths of
- * the compressed block data, excluding the tree representations.
- */
-
- /* Build the bit length tree for the above two trees, and get the index
- * in bl_order of the last bit length code to send.
- */
- max_blindex = build_bl_tree(s);
-
- /* Determine the best encoding. Compute first the block length in bytes*/
- opt_lenb = (s->opt_len+3+7)>>3;
- static_lenb = (s->static_len+3+7)>>3;
-
- Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ",
- opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,
- s->last_lit));
-
- if (static_lenb <= opt_lenb) opt_lenb = static_lenb;
-
- } else {
- Assert(buf != (char*)0, "lost buf");
- opt_lenb = static_lenb = stored_len + 5; /* force a stored block */
- }
-
- /* If compression failed and this is the first and last block,
- * and if the .zip file can be seeked (to rewrite the local header),
- * the whole file is transformed into a stored file:
- */
-#ifdef STORED_FILE_OK
-# ifdef FORCE_STORED_FILE
- if (eof && s->compressed_len == 0L) { /* force stored file */
-# else
- if (stored_len <= opt_lenb && eof && s->compressed_len==0L && seekable()) {
-# endif
- /* Since LIT_BUFSIZE <= 2*WSIZE, the input data must be there: */
- if (buf == (charf*)0) error ("block vanished");
-
- copy_block(s, buf, (unsigned)stored_len, 0); /* without header */
- s->compressed_len = stored_len << 3;
- s->method = STORED;
- } else
-#endif /* STORED_FILE_OK */
-
-#ifdef FORCE_STORED
- if (buf != (char*)0) { /* force stored block */
-#else
- if (stored_len+4 <= opt_lenb && buf != (char*)0) {
- /* 4: two words for the lengths */
-#endif
- /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
- * Otherwise we can't have processed more than WSIZE input bytes since
- * the last block flush, because compression would have been
- * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
- * transform a block into a stored block.
- */
- _tr_stored_block(s, buf, stored_len, eof);
-
-#ifdef FORCE_STATIC
- } else if (static_lenb >= 0) { /* force static trees */
-#else
- } else if (static_lenb == opt_lenb) {
-#endif
- send_bits(s, (STATIC_TREES<<1)+eof, 3);
- compress_block(s, (ct_data *)static_ltree, (ct_data *)static_dtree);
- s->compressed_len += 3 + s->static_len;
- } else {
- send_bits(s, (DYN_TREES<<1)+eof, 3);
- send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1,
- max_blindex+1);
- compress_block(s, (ct_data *)s->dyn_ltree, (ct_data *)s->dyn_dtree);
- s->compressed_len += 3 + s->opt_len;
- }
- Assert (s->compressed_len == s->bits_sent, "bad compressed size");
- init_block(s);
-
- if (eof) {
- bi_windup(s);
- s->compressed_len += 7; /* align on byte boundary */
- }
- Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3,
- s->compressed_len-7*eof));
-
- return s->compressed_len >> 3;
-}
-
-/* ===========================================================================
- * Save the match info and tally the frequency counts. Return true if
- * the current block must be flushed.
- */
-int _tr_tally (s, dist, lc)
- deflate_state *s;
- unsigned dist; /* distance of matched string */
- unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */
-{
- s->d_buf[s->last_lit] = (ush)dist;
- s->l_buf[s->last_lit++] = (uch)lc;
- if (dist == 0) {
- /* lc is the unmatched char */
- s->dyn_ltree[lc].Freq++;
- } else {
- s->matches++;
- /* Here, lc is the match length - MIN_MATCH */
- dist--; /* dist = match distance - 1 */
- Assert((ush)dist < (ush)MAX_DIST(s) &&
- (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&
- (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match");
-
- s->dyn_ltree[length_code[lc]+LITERALS+1].Freq++;
- s->dyn_dtree[d_code(dist)].Freq++;
- }
-
- /* Try to guess if it is profitable to stop the current block here */
- if (s->level > 2 && (s->last_lit & 0xfff) == 0) {
- /* Compute an upper bound for the compressed length */
- ulg out_length = (ulg)s->last_lit*8L;
- ulg in_length = (ulg)((long)s->strstart - s->block_start);
- int dcode;
- for (dcode = 0; dcode < D_CODES; dcode++) {
- out_length += (ulg)s->dyn_dtree[dcode].Freq *
- (5L+extra_dbits[dcode]);
- }
- out_length >>= 3;
- Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ",
- s->last_lit, in_length, out_length,
- 100L - out_length*100L/in_length));
- if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1;
- }
- return (s->last_lit == s->lit_bufsize-1);
- /* We avoid equality with lit_bufsize because of wraparound at 64K
- * on 16 bit machines and because stored blocks are restricted to
- * 64K-1 bytes.
- */
-}
-
-/* ===========================================================================
- * Send the block data compressed using the given Huffman trees
- */
-local void compress_block(s, ltree, dtree)
- deflate_state *s;
- ct_data *ltree; /* literal tree */
- ct_data *dtree; /* distance tree */
-{
- unsigned dist; /* distance of matched string */
- int lc; /* match length or unmatched char (if dist == 0) */
- unsigned lx = 0; /* running index in l_buf */
- unsigned code; /* the code to send */
- int extra; /* number of extra bits to send */
-
- if (s->last_lit != 0) do {
- dist = s->d_buf[lx];
- lc = s->l_buf[lx++];
- if (dist == 0) {
- send_code(s, lc, ltree); /* send a literal byte */
- Tracecv(isgraph(lc), (stderr," '%c' ", lc));
- } else {
- /* Here, lc is the match length - MIN_MATCH */
- code = length_code[lc];
- send_code(s, code+LITERALS+1, ltree); /* send the length code */
- extra = extra_lbits[code];
- if (extra != 0) {
- lc -= base_length[code];
- send_bits(s, lc, extra); /* send the extra length bits */
- }
- dist--; /* dist is now the match distance - 1 */
- code = d_code(dist);
- Assert (code < D_CODES, "bad d_code");
-
- send_code(s, code, dtree); /* send the distance code */
- extra = extra_dbits[code];
- if (extra != 0) {
- dist -= base_dist[code];
- send_bits(s, dist, extra); /* send the extra distance bits */
- }
- } /* literal or match pair ? */
-
- /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */
- Assert(s->pending < s->lit_bufsize + 2*lx, "pendingBuf overflow");
-
- } while (lx < s->last_lit);
-
- send_code(s, END_BLOCK, ltree);
- s->last_eob_len = ltree[END_BLOCK].Len;
-}
-
-/* ===========================================================================
- * Set the data type to ASCII or BINARY, using a crude approximation:
- * binary if more than 20% of the bytes are <= 6 or >= 128, ascii otherwise.
- * IN assertion: the fields freq of dyn_ltree are set and the total of all
- * frequencies does not exceed 64K (to fit in an int on 16 bit machines).
- */
-local void set_data_type(s)
- deflate_state *s;
-{
- int n = 0;
- unsigned ascii_freq = 0;
- unsigned bin_freq = 0;
- while (n < 7) bin_freq += s->dyn_ltree[n++].Freq;
- while (n < 128) ascii_freq += s->dyn_ltree[n++].Freq;
- while (n < LITERALS) bin_freq += s->dyn_ltree[n++].Freq;
- s->data_type = (Byte)(bin_freq > (ascii_freq >> 2) ? Z_BINARY : Z_ASCII);
-}
-
-/* ===========================================================================
- * Reverse the first len bits of a code, using straightforward code (a faster
- * method would use a table)
- * IN assertion: 1 <= len <= 15
- */
-local unsigned bi_reverse(code, len)
- unsigned code; /* the value to invert */
- int len; /* its bit length */
-{
- register unsigned res = 0;
- do {
- res |= code & 1;
- code >>= 1, res <<= 1;
- } while (--len > 0);
- return res >> 1;
-}
-
-/* ===========================================================================
- * Flush the bit buffer, keeping at most 7 bits in it.
- */
-local void bi_flush(s)
- deflate_state *s;
-{
- if (s->bi_valid == 16) {
- put_short(s, s->bi_buf);
- s->bi_buf = 0;
- s->bi_valid = 0;
- } else if (s->bi_valid >= 8) {
- put_byte(s, (Byte)s->bi_buf);
- s->bi_buf >>= 8;
- s->bi_valid -= 8;
- }
-}
-
-/* ===========================================================================
- * Flush the bit buffer and align the output on a byte boundary
- */
-local void bi_windup(s)
- deflate_state *s;
-{
- if (s->bi_valid > 8) {
- put_short(s, s->bi_buf);
- } else if (s->bi_valid > 0) {
- put_byte(s, (Byte)s->bi_buf);
- }
- s->bi_buf = 0;
- s->bi_valid = 0;
-#ifdef DEBUG_ZLIB
- s->bits_sent = (s->bits_sent+7) & ~7;
-#endif
-}
-
-/* ===========================================================================
- * Copy a stored block, storing first the length and its
- * one's complement if requested.
- */
-local void copy_block(s, buf, len, header)
- deflate_state *s;
- charf *buf; /* the input data */
- unsigned len; /* its length */
- int header; /* true if block header must be written */
-{
- bi_windup(s); /* align on byte boundary */
- s->last_eob_len = 8; /* enough lookahead for inflate */
-
- if (header) {
- put_short(s, (ush)len);
- put_short(s, (ush)~len);
-#ifdef DEBUG_ZLIB
- s->bits_sent += 2*16;
-#endif
- }
-#ifdef DEBUG_ZLIB
- s->bits_sent += (ulg)len<<3;
-#endif
- /* bundle up the put_byte(s, *buf++) calls */
- zmemcpy(&s->pending_buf[s->pending], buf, len);
- s->pending += len;
-}
-/* --- trees.c */
-
-/* +++ inflate.c */
-/* inflate.c -- zlib interface to inflate modules
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* #include "zutil.h" */
-
-/* +++ infblock.h */
-/* infblock.h -- header to use infblock.c
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-struct inflate_blocks_state;
-typedef struct inflate_blocks_state FAR inflate_blocks_statef;
-
-extern inflate_blocks_statef * inflate_blocks_new OF((
- z_streamp z,
- check_func c, /* check function */
- uInt w)); /* window size */
-
-extern int inflate_blocks OF((
- inflate_blocks_statef *,
- z_streamp ,
- int)); /* initial return code */
-
-extern void inflate_blocks_reset OF((
- inflate_blocks_statef *,
- z_streamp ,
- uLongf *)); /* check value on output */
-
-extern int inflate_blocks_free OF((
- inflate_blocks_statef *,
- z_streamp ,
- uLongf *)); /* check value on output */
-
-extern void inflate_set_dictionary OF((
- inflate_blocks_statef *s,
- const Bytef *d, /* dictionary */
- uInt n)); /* dictionary length */
-
-extern int inflate_addhistory OF((
- inflate_blocks_statef *,
- z_streamp));
-
-extern int inflate_packet_flush OF((
- inflate_blocks_statef *));
-/* --- infblock.h */
-
-#ifndef NO_DUMMY_DECL
-struct inflate_blocks_state {int dummy;}; /* for buggy compilers */
-#endif
-
-/* inflate private state */
-struct internal_state {
-
- /* mode */
- enum {
- METHOD, /* waiting for method byte */
- FLAG, /* waiting for flag byte */
- DICT4, /* four dictionary check bytes to go */
- DICT3, /* three dictionary check bytes to go */
- DICT2, /* two dictionary check bytes to go */
- DICT1, /* one dictionary check byte to go */
- DICT0, /* waiting for inflateSetDictionary */
- BLOCKS, /* decompressing blocks */
- CHECK4, /* four check bytes to go */
- CHECK3, /* three check bytes to go */
- CHECK2, /* two check bytes to go */
- CHECK1, /* one check byte to go */
- DONE, /* finished check, done */
- BAD} /* got an error--stay here */
- mode; /* current inflate mode */
-
- /* mode dependent information */
- union {
- uInt method; /* if FLAGS, method byte */
- struct {
- uLong was; /* computed check value */
- uLong need; /* stream check value */
- } check; /* if CHECK, check values to compare */
- uInt marker; /* if BAD, inflateSync's marker bytes count */
- } sub; /* submode */
-
- /* mode independent information */
- int nowrap; /* flag for no wrapper */
- uInt wbits; /* log2(window size) (8..15, defaults to 15) */
- inflate_blocks_statef
- *blocks; /* current inflate_blocks state */
-
-};
-
-
-int inflateReset(z)
-z_streamp z;
-{
- uLong c;
-
- if (z == Z_NULL || z->state == Z_NULL)
- return Z_STREAM_ERROR;
- z->total_in = z->total_out = 0;
- z->msg = Z_NULL;
- z->state->mode = z->state->nowrap ? BLOCKS : METHOD;
- inflate_blocks_reset(z->state->blocks, z, &c);
- Trace((stderr, "inflate: reset\n"));
- return Z_OK;
-}
-
-
-int inflateEnd(z)
-z_streamp z;
-{
- uLong c;
-
- if (z == Z_NULL || z->state == Z_NULL || z->zfree == Z_NULL)
- return Z_STREAM_ERROR;
- if (z->state->blocks != Z_NULL)
- inflate_blocks_free(z->state->blocks, z, &c);
- ZFREE(z, z->state);
- z->state = Z_NULL;
- Trace((stderr, "inflate: end\n"));
- return Z_OK;
-}
-
-
-int inflateInit2_(z, w, version, stream_size)
-z_streamp z;
-int w;
-const char *version;
-int stream_size;
-{
- if (version == Z_NULL || version[0] != ZLIB_VERSION[0] ||
- stream_size != sizeof(z_stream))
- return Z_VERSION_ERROR;
-
- /* initialize state */
- if (z == Z_NULL)
- return Z_STREAM_ERROR;
- z->msg = Z_NULL;
-#ifndef NO_ZCFUNCS
- if (z->zalloc == Z_NULL)
- {
- z->zalloc = zcalloc;
- z->opaque = (voidpf)0;
- }
- if (z->zfree == Z_NULL) z->zfree = zcfree;
-#endif
- if ((z->state = (struct internal_state FAR *)
- ZALLOC(z,1,sizeof(struct internal_state))) == Z_NULL)
- return Z_MEM_ERROR;
- z->state->blocks = Z_NULL;
-
- /* handle undocumented nowrap option (no zlib header or check) */
- z->state->nowrap = 0;
- if (w < 0)
- {
- w = - w;
- z->state->nowrap = 1;
- }
-
- /* set window size */
- if (w < 8 || w > 15)
- {
- inflateEnd(z);
- return Z_STREAM_ERROR;
- }
- z->state->wbits = (uInt)w;
-
- /* create inflate_blocks state */
- if ((z->state->blocks =
- inflate_blocks_new(z, z->state->nowrap ? Z_NULL : adler32, (uInt)1 << w))
- == Z_NULL)
- {
- inflateEnd(z);
- return Z_MEM_ERROR;
- }
- Trace((stderr, "inflate: allocated\n"));
-
- /* reset state */
- inflateReset(z);
- return Z_OK;
-}
-
-
-int inflateInit_(z, version, stream_size)
-z_streamp z;
-const char *version;
-int stream_size;
-{
- return inflateInit2_(z, DEF_WBITS, version, stream_size);
-}
-
-
-#define NEEDBYTE {if(z->avail_in==0)goto empty;r=Z_OK;}
-#define NEXTBYTE (z->avail_in--,z->total_in++,*z->next_in++)
-
-int inflate(z, f)
-z_streamp z;
-int f;
-{
- int r;
- uInt b;
-
- if (z == Z_NULL || z->state == Z_NULL || z->next_in == Z_NULL || f < 0)
- return Z_STREAM_ERROR;
- r = Z_BUF_ERROR;
- while (1) switch (z->state->mode)
- {
- case METHOD:
- NEEDBYTE
- if (((z->state->sub.method = NEXTBYTE) & 0xf) != Z_DEFLATED)
- {
- z->state->mode = BAD;
- z->msg = (char*)"unknown compression method";
- z->state->sub.marker = 5; /* can't try inflateSync */
- break;
- }
- if ((z->state->sub.method >> 4) + 8 > z->state->wbits)
- {
- z->state->mode = BAD;
- z->msg = (char*)"invalid window size";
- z->state->sub.marker = 5; /* can't try inflateSync */
- break;
- }
- z->state->mode = FLAG;
- case FLAG:
- NEEDBYTE
- b = NEXTBYTE;
- if (((z->state->sub.method << 8) + b) % 31)
- {
- z->state->mode = BAD;
- z->msg = (char*)"incorrect header check";
- z->state->sub.marker = 5; /* can't try inflateSync */
- break;
- }
- Trace((stderr, "inflate: zlib header ok\n"));
- if (!(b & PRESET_DICT))
- {
- z->state->mode = BLOCKS;
- break;
- }
- z->state->mode = DICT4;
- case DICT4:
- NEEDBYTE
- z->state->sub.check.need = (uLong)NEXTBYTE << 24;
- z->state->mode = DICT3;
- case DICT3:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE << 16;
- z->state->mode = DICT2;
- case DICT2:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE << 8;
- z->state->mode = DICT1;
- case DICT1:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE;
- z->adler = z->state->sub.check.need;
- z->state->mode = DICT0;
- return Z_NEED_DICT;
- case DICT0:
- z->state->mode = BAD;
- z->msg = (char*)"need dictionary";
- z->state->sub.marker = 0; /* can try inflateSync */
- return Z_STREAM_ERROR;
- case BLOCKS:
- r = inflate_blocks(z->state->blocks, z, r);
- if (f == Z_PACKET_FLUSH && z->avail_in == 0 && z->avail_out != 0)
- r = inflate_packet_flush(z->state->blocks);
- if (r == Z_DATA_ERROR)
- {
- z->state->mode = BAD;
- z->state->sub.marker = 0; /* can try inflateSync */
- break;
- }
- if (r != Z_STREAM_END)
- return r;
- r = Z_OK;
- inflate_blocks_reset(z->state->blocks, z, &z->state->sub.check.was);
- if (z->state->nowrap)
- {
- z->state->mode = DONE;
- break;
- }
- z->state->mode = CHECK4;
- case CHECK4:
- NEEDBYTE
- z->state->sub.check.need = (uLong)NEXTBYTE << 24;
- z->state->mode = CHECK3;
- case CHECK3:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE << 16;
- z->state->mode = CHECK2;
- case CHECK2:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE << 8;
- z->state->mode = CHECK1;
- case CHECK1:
- NEEDBYTE
- z->state->sub.check.need += (uLong)NEXTBYTE;
-
- if (z->state->sub.check.was != z->state->sub.check.need)
- {
- z->state->mode = BAD;
- z->msg = (char*)"incorrect data check";
- z->state->sub.marker = 5; /* can't try inflateSync */
- break;
- }
- Trace((stderr, "inflate: zlib check ok\n"));
- z->state->mode = DONE;
- case DONE:
- return Z_STREAM_END;
- case BAD:
- return Z_DATA_ERROR;
- default:
- return Z_STREAM_ERROR;
- }
-
- empty:
- if (f != Z_PACKET_FLUSH)
- return r;
- z->state->mode = BAD;
- z->msg = (char *)"need more for packet flush";
- z->state->sub.marker = 0; /* can try inflateSync */
- return Z_DATA_ERROR;
-}
-
-
-int inflateSetDictionary(z, dictionary, dictLength)
-z_streamp z;
-const Bytef *dictionary;
-uInt dictLength;
-{
- uInt length = dictLength;
-
- if (z == Z_NULL || z->state == Z_NULL || z->state->mode != DICT0)
- return Z_STREAM_ERROR;
-
- if (adler32(1L, dictionary, dictLength) != z->adler) return Z_DATA_ERROR;
- z->adler = 1L;
-
- if (length >= ((uInt)1<<z->state->wbits))
- {
- length = (1<<z->state->wbits)-1;
- dictionary += dictLength - length;
- }
- inflate_set_dictionary(z->state->blocks, dictionary, length);
- z->state->mode = BLOCKS;
- return Z_OK;
-}
-
-/*
- * This subroutine adds the data at next_in/avail_in to the output history
- * without performing any output. The output buffer must be "caught up";
- * i.e. no pending output (hence s->read equals s->write), and the state must
- * be BLOCKS (i.e. we should be willing to see the start of a series of
- * BLOCKS). On exit, the output will also be caught up, and the checksum
- * will have been updated if need be.
- */
-
-int inflateIncomp(z)
-z_stream *z;
-{
- if (z->state->mode != BLOCKS)
- return Z_DATA_ERROR;
- return inflate_addhistory(z->state->blocks, z);
-}
-
-
-int inflateSync(z)
-z_streamp z;
-{
- uInt n; /* number of bytes to look at */
- Bytef *p; /* pointer to bytes */
- uInt m; /* number of marker bytes found in a row */
- uLong r, w; /* temporaries to save total_in and total_out */
-
- /* set up */
- if (z == Z_NULL || z->state == Z_NULL)
- return Z_STREAM_ERROR;
- if (z->state->mode != BAD)
- {
- z->state->mode = BAD;
- z->state->sub.marker = 0;
- }
- if ((n = z->avail_in) == 0)
- return Z_BUF_ERROR;
- p = z->next_in;
- m = z->state->sub.marker;
-
- /* search */
- while (n && m < 4)
- {
- if (*p == (Byte)(m < 2 ? 0 : 0xff))
- m++;
- else if (*p)
- m = 0;
- else
- m = 4 - m;
- p++, n--;
- }
-
- /* restore */
- z->total_in += p - z->next_in;
- z->next_in = p;
- z->avail_in = n;
- z->state->sub.marker = m;
-
- /* return no joy or set up to restart on a new block */
- if (m != 4)
- return Z_DATA_ERROR;
- r = z->total_in; w = z->total_out;
- inflateReset(z);
- z->total_in = r; z->total_out = w;
- z->state->mode = BLOCKS;
- return Z_OK;
-}
-
-#undef NEEDBYTE
-#undef NEXTBYTE
-/* --- inflate.c */
-
-/* +++ infblock.c */
-/* infblock.c -- interpret and process block types to last block
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* #include "zutil.h" */
-/* #include "infblock.h" */
-
-/* +++ inftrees.h */
-/* inftrees.h -- header to use inftrees.c
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-/* Huffman code lookup table entry--this entry is four bytes for machines
- that have 16-bit pointers (e.g. PC's in the small or medium model). */
-
-typedef struct inflate_huft_s FAR inflate_huft;
-
-struct inflate_huft_s {
- union {
- struct {
- Byte Exop; /* number of extra bits or operation */
- Byte Bits; /* number of bits in this code or subcode */
- } what;
- Bytef *pad; /* pad structure to a power of 2 (4 bytes for */
- } word; /* 16-bit, 8 bytes for 32-bit machines) */
- union {
- uInt Base; /* literal, length base, or distance base */
- inflate_huft *Next; /* pointer to next level of table */
- } more;
-};
-
-#ifdef DEBUG_ZLIB
- extern uInt inflate_hufts;
-#endif
-
-extern int inflate_trees_bits OF((
- uIntf *, /* 19 code lengths */
- uIntf *, /* bits tree desired/actual depth */
- inflate_huft * FAR *, /* bits tree result */
- z_streamp )); /* for zalloc, zfree functions */
-
-extern int inflate_trees_dynamic OF((
- uInt, /* number of literal/length codes */
- uInt, /* number of distance codes */
- uIntf *, /* that many (total) code lengths */
- uIntf *, /* literal desired/actual bit depth */
- uIntf *, /* distance desired/actual bit depth */
- inflate_huft * FAR *, /* literal/length tree result */
- inflate_huft * FAR *, /* distance tree result */
- z_streamp )); /* for zalloc, zfree functions */
-
-extern int inflate_trees_fixed OF((
- uIntf *, /* literal desired/actual bit depth */
- uIntf *, /* distance desired/actual bit depth */
- inflate_huft * FAR *, /* literal/length tree result */
- inflate_huft * FAR *)); /* distance tree result */
-
-extern int inflate_trees_free OF((
- inflate_huft *, /* tables to free */
- z_streamp )); /* for zfree function */
-
-/* --- inftrees.h */
-
-/* +++ infcodes.h */
-/* infcodes.h -- header to use infcodes.c
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-struct inflate_codes_state;
-typedef struct inflate_codes_state FAR inflate_codes_statef;
-
-extern inflate_codes_statef *inflate_codes_new OF((
- uInt, uInt,
- inflate_huft *, inflate_huft *,
- z_streamp ));
-
-extern int inflate_codes OF((
- inflate_blocks_statef *,
- z_streamp ,
- int));
-
-extern void inflate_codes_free OF((
- inflate_codes_statef *,
- z_streamp ));
-
-/* --- infcodes.h */
-
-/* +++ infutil.h */
-/* infutil.h -- types and macros common to blocks and codes
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-#ifndef _INFUTIL_H
-#define _INFUTIL_H
-
-typedef enum {
- TYPE, /* get type bits (3, including end bit) */
- LENS, /* get lengths for stored */
- STORED, /* processing stored block */
- TABLE, /* get table lengths */
- BTREE, /* get bit lengths tree for a dynamic block */
- DTREE, /* get length, distance trees for a dynamic block */
- CODES, /* processing fixed or dynamic block */
- DRY, /* output remaining window bytes */
- DONEB, /* finished last block, done */
- BADB} /* got a data error--stuck here */
-inflate_block_mode;
-
-/* inflate blocks semi-private state */
-struct inflate_blocks_state {
-
- /* mode */
- inflate_block_mode mode; /* current inflate_block mode */
-
- /* mode dependent information */
- union {
- uInt left; /* if STORED, bytes left to copy */
- struct {
- uInt table; /* table lengths (14 bits) */
- uInt index; /* index into blens (or border) */
- uIntf *blens; /* bit lengths of codes */
- uInt bb; /* bit length tree depth */
- inflate_huft *tb; /* bit length decoding tree */
- } trees; /* if DTREE, decoding info for trees */
- struct {
- inflate_huft *tl;
- inflate_huft *td; /* trees to free */
- inflate_codes_statef
- *codes;
- } decode; /* if CODES, current state */
- } sub; /* submode */
- uInt last; /* true if this block is the last block */
-
- /* mode independent information */
- uInt bitk; /* bits in bit buffer */
- uLong bitb; /* bit buffer */
- Bytef *window; /* sliding window */
- Bytef *end; /* one byte after sliding window */
- Bytef *read; /* window read pointer */
- Bytef *write; /* window write pointer */
- check_func checkfn; /* check function */
- uLong check; /* check on output */
-
-};
-
-
-/* defines for inflate input/output */
-/* update pointers and return */
-#define UPDBITS {s->bitb=b;s->bitk=k;}
-#define UPDIN {z->avail_in=n;z->total_in+=p-z->next_in;z->next_in=p;}
-#define UPDOUT {s->write=q;}
-#define UPDATE {UPDBITS UPDIN UPDOUT}
-#define LEAVE {UPDATE return inflate_flush(s,z,r);}
-/* get bytes and bits */
-#define LOADIN {p=z->next_in;n=z->avail_in;b=s->bitb;k=s->bitk;}
-#define NEEDBYTE {if(n)r=Z_OK;else LEAVE}
-#define NEXTBYTE (n--,*p++)
-#define NEEDBITS(j) {while(k<(j)){NEEDBYTE;b|=((uLong)NEXTBYTE)<<k;k+=8;}}
-#define DUMPBITS(j) {b>>=(j);k-=(j);}
-/* output bytes */
-#define WAVAIL (uInt)(q<s->read?s->read-q-1:s->end-q)
-#define LOADOUT {q=s->write;m=(uInt)WAVAIL;}
-#define WWRAP {if(q==s->end&&s->read!=s->window){q=s->window;m=(uInt)WAVAIL;}}
-#define FLUSH {UPDOUT r=inflate_flush(s,z,r); LOADOUT}
-#define NEEDOUT {if(m==0){WWRAP if(m==0){FLUSH WWRAP if(m==0) LEAVE}}r=Z_OK;}
-#define OUTBYTE(a) {*q++=(Byte)(a);m--;}
-/* load local pointers */
-#define LOAD {LOADIN LOADOUT}
-
-/* masks for lower bits (size given to avoid silly warnings with Visual C++) */
-extern uInt inflate_mask[17];
-
-/* copy as much as possible from the sliding window to the output area */
-extern int inflate_flush OF((
- inflate_blocks_statef *,
- z_streamp ,
- int));
-
-#ifndef NO_DUMMY_DECL
-struct internal_state {int dummy;}; /* for buggy compilers */
-#endif
-
-#endif
-/* --- infutil.h */
-
-#ifndef NO_DUMMY_DECL
-struct inflate_codes_state {int dummy;}; /* for buggy compilers */
-#endif
-
-/* Table for deflate from PKZIP's appnote.txt. */
-local const uInt border[] = { /* Order of the bit length code lengths */
- 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15};
-
-/*
- Notes beyond the 1.93a appnote.txt:
-
- 1. Distance pointers never point before the beginning of the output
- stream.
- 2. Distance pointers can point back across blocks, up to 32k away.
- 3. There is an implied maximum of 7 bits for the bit length table and
- 15 bits for the actual data.
- 4. If only one code exists, then it is encoded using one bit. (Zero
- would be more efficient, but perhaps a little confusing.) If two
- codes exist, they are coded using one bit each (0 and 1).
- 5. There is no way of sending zero distance codes--a dummy must be
- sent if there are none. (History: a pre 2.0 version of PKZIP would
- store blocks with no distance codes, but this was discovered to be
- too harsh a criterion.) Valid only for 1.93a. 2.04c does allow
- zero distance codes, which is sent as one code of zero bits in
- length.
- 6. There are up to 286 literal/length codes. Code 256 represents the
- end-of-block. Note however that the static length tree defines
- 288 codes just to fill out the Huffman codes. Codes 286 and 287
- cannot be used though, since there is no length base or extra bits
- defined for them. Similarily, there are up to 30 distance codes.
- However, static trees define 32 codes (all 5 bits) to fill out the
- Huffman codes, but the last two had better not show up in the data.
- 7. Unzip can check dynamic Huffman blocks for complete code sets.
- The exception is that a single code would not be complete (see #4).
- 8. The five bits following the block type is really the number of
- literal codes sent minus 257.
- 9. Length codes 8,16,16 are interpreted as 13 length codes of 8 bits
- (1+6+6). Therefore, to output three times the length, you output
- three codes (1+1+1), whereas to output four times the same length,
- you only need two codes (1+3). Hmm.
- 10. In the tree reconstruction algorithm, Code = Code + Increment
- only if BitLength(i) is not zero. (Pretty obvious.)
- 11. Correction: 4 Bits: # of Bit Length codes - 4 (4 - 19)
- 12. Note: length code 284 can represent 227-258, but length code 285
- really is 258. The last length deserves its own, short code
- since it gets used a lot in very redundant files. The length
- 258 is special since 258 - 3 (the min match length) is 255.
- 13. The literal/length and distance code bit lengths are read as a
- single stream of lengths. It is possible (and advantageous) for
- a repeat code (16, 17, or 18) to go across the boundary between
- the two sets of lengths.
- */
-
-
-void inflate_blocks_reset(s, z, c)
-inflate_blocks_statef *s;
-z_streamp z;
-uLongf *c;
-{
- if (s->checkfn != Z_NULL)
- *c = s->check;
- if (s->mode == BTREE || s->mode == DTREE)
- ZFREE(z, s->sub.trees.blens);
- if (s->mode == CODES)
- {
- inflate_codes_free(s->sub.decode.codes, z);
- inflate_trees_free(s->sub.decode.td, z);
- inflate_trees_free(s->sub.decode.tl, z);
- }
- s->mode = TYPE;
- s->bitk = 0;
- s->bitb = 0;
- s->read = s->write = s->window;
- if (s->checkfn != Z_NULL)
- z->adler = s->check = (*s->checkfn)(0L, Z_NULL, 0);
- Trace((stderr, "inflate: blocks reset\n"));
-}
-
-
-inflate_blocks_statef *inflate_blocks_new(z, c, w)
-z_streamp z;
-check_func c;
-uInt w;
-{
- inflate_blocks_statef *s;
-
- if ((s = (inflate_blocks_statef *)ZALLOC
- (z,1,sizeof(struct inflate_blocks_state))) == Z_NULL)
- return s;
- if ((s->window = (Bytef *)ZALLOC(z, 1, w)) == Z_NULL)
- {
- ZFREE(z, s);
- return Z_NULL;
- }
- s->end = s->window + w;
- s->checkfn = c;
- s->mode = TYPE;
- Trace((stderr, "inflate: blocks allocated\n"));
- inflate_blocks_reset(s, z, &s->check);
- return s;
-}
-
-
-#ifdef DEBUG_ZLIB
- extern uInt inflate_hufts;
-#endif
-int inflate_blocks(s, z, r)
-inflate_blocks_statef *s;
-z_streamp z;
-int r;
-{
- uInt t; /* temporary storage */
- uLong b; /* bit buffer */
- uInt k; /* bits in bit buffer */
- Bytef *p; /* input data pointer */
- uInt n; /* bytes available there */
- Bytef *q; /* output window write pointer */
- uInt m; /* bytes to end of window or read pointer */
-
- /* copy input/output information to locals (UPDATE macro restores) */
- LOAD
-
- /* process input based on current state */
- while (1) switch (s->mode)
- {
- case TYPE:
- NEEDBITS(3)
- t = (uInt)b & 7;
- s->last = t & 1;
- switch (t >> 1)
- {
- case 0: /* stored */
- Trace((stderr, "inflate: stored block%s\n",
- s->last ? " (last)" : ""));
- DUMPBITS(3)
- t = k & 7; /* go to byte boundary */
- DUMPBITS(t)
- s->mode = LENS; /* get length of stored block */
- break;
- case 1: /* fixed */
- Trace((stderr, "inflate: fixed codes block%s\n",
- s->last ? " (last)" : ""));
- {
- uInt bl, bd;
- inflate_huft *tl, *td;
-
- inflate_trees_fixed(&bl, &bd, &tl, &td);
- s->sub.decode.codes = inflate_codes_new(bl, bd, tl, td, z);
- if (s->sub.decode.codes == Z_NULL)
- {
- r = Z_MEM_ERROR;
- LEAVE
- }
- s->sub.decode.tl = Z_NULL; /* don't try to free these */
- s->sub.decode.td = Z_NULL;
- }
- DUMPBITS(3)
- s->mode = CODES;
- break;
- case 2: /* dynamic */
- Trace((stderr, "inflate: dynamic codes block%s\n",
- s->last ? " (last)" : ""));
- DUMPBITS(3)
- s->mode = TABLE;
- break;
- case 3: /* illegal */
- DUMPBITS(3)
- s->mode = BADB;
- z->msg = (char*)"invalid block type";
- r = Z_DATA_ERROR;
- LEAVE
- }
- break;
- case LENS:
- NEEDBITS(32)
- if ((((~b) >> 16) & 0xffff) != (b & 0xffff))
- {
- s->mode = BADB;
- z->msg = (char*)"invalid stored block lengths";
- r = Z_DATA_ERROR;
- LEAVE
- }
- s->sub.left = (uInt)b & 0xffff;
- b = k = 0; /* dump bits */
- Tracev((stderr, "inflate: stored length %u\n", s->sub.left));
- s->mode = s->sub.left ? STORED : (s->last ? DRY : TYPE);
- break;
- case STORED:
- if (n == 0)
- LEAVE
- NEEDOUT
- t = s->sub.left;
- if (t > n) t = n;
- if (t > m) t = m;
- zmemcpy(q, p, t);
- p += t; n -= t;
- q += t; m -= t;
- if ((s->sub.left -= t) != 0)
- break;
- Tracev((stderr, "inflate: stored end, %lu total out\n",
- z->total_out + (q >= s->read ? q - s->read :
- (s->end - s->read) + (q - s->window))));
- s->mode = s->last ? DRY : TYPE;
- break;
- case TABLE:
- NEEDBITS(14)
- s->sub.trees.table = t = (uInt)b & 0x3fff;
-#ifndef PKZIP_BUG_WORKAROUND
- if ((t & 0x1f) > 29 || ((t >> 5) & 0x1f) > 29)
- {
- s->mode = BADB;
- z->msg = (char*)"too many length or distance symbols";
- r = Z_DATA_ERROR;
- LEAVE
- }
-#endif
- t = 258 + (t & 0x1f) + ((t >> 5) & 0x1f);
- if (t < 19)
- t = 19;
- if ((s->sub.trees.blens = (uIntf*)ZALLOC(z, t, sizeof(uInt))) == Z_NULL)
- {
- r = Z_MEM_ERROR;
- LEAVE
- }
- DUMPBITS(14)
- s->sub.trees.index = 0;
- Tracev((stderr, "inflate: table sizes ok\n"));
- s->mode = BTREE;
- case BTREE:
- while (s->sub.trees.index < 4 + (s->sub.trees.table >> 10))
- {
- NEEDBITS(3)
- s->sub.trees.blens[border[s->sub.trees.index++]] = (uInt)b & 7;
- DUMPBITS(3)
- }
- while (s->sub.trees.index < 19)
- s->sub.trees.blens[border[s->sub.trees.index++]] = 0;
- s->sub.trees.bb = 7;
- t = inflate_trees_bits(s->sub.trees.blens, &s->sub.trees.bb,
- &s->sub.trees.tb, z);
- if (t != Z_OK)
- {
- ZFREE(z, s->sub.trees.blens);
- r = t;
- if (r == Z_DATA_ERROR)
- s->mode = BADB;
- LEAVE
- }
- s->sub.trees.index = 0;
- Tracev((stderr, "inflate: bits tree ok\n"));
- s->mode = DTREE;
- case DTREE:
- while (t = s->sub.trees.table,
- s->sub.trees.index < 258 + (t & 0x1f) + ((t >> 5) & 0x1f))
- {
- inflate_huft *h;
- uInt i, j, c;
-
- t = s->sub.trees.bb;
- NEEDBITS(t)
- h = s->sub.trees.tb + ((uInt)b & inflate_mask[t]);
- t = h->word.what.Bits;
- c = h->more.Base;
- if (c < 16)
- {
- DUMPBITS(t)
- s->sub.trees.blens[s->sub.trees.index++] = c;
- }
- else /* c == 16..18 */
- {
- i = c == 18 ? 7 : c - 14;
- j = c == 18 ? 11 : 3;
- NEEDBITS(t + i)
- DUMPBITS(t)
- j += (uInt)b & inflate_mask[i];
- DUMPBITS(i)
- i = s->sub.trees.index;
- t = s->sub.trees.table;
- if (i + j > 258 + (t & 0x1f) + ((t >> 5) & 0x1f) ||
- (c == 16 && i < 1))
- {
- inflate_trees_free(s->sub.trees.tb, z);
- ZFREE(z, s->sub.trees.blens);
- s->mode = BADB;
- z->msg = (char*)"invalid bit length repeat";
- r = Z_DATA_ERROR;
- LEAVE
- }
- c = c == 16 ? s->sub.trees.blens[i - 1] : 0;
- do {
- s->sub.trees.blens[i++] = c;
- } while (--j);
- s->sub.trees.index = i;
- }
- }
- inflate_trees_free(s->sub.trees.tb, z);
- s->sub.trees.tb = Z_NULL;
- {
- uInt bl, bd;
- inflate_huft *tl, *td;
- inflate_codes_statef *c;
-
- bl = 9; /* must be <= 9 for lookahead assumptions */
- bd = 6; /* must be <= 9 for lookahead assumptions */
- t = s->sub.trees.table;
-#ifdef DEBUG_ZLIB
- inflate_hufts = 0;
-#endif
- t = inflate_trees_dynamic(257 + (t & 0x1f), 1 + ((t >> 5) & 0x1f),
- s->sub.trees.blens, &bl, &bd, &tl, &td, z);
- ZFREE(z, s->sub.trees.blens);
- if (t != Z_OK)
- {
- if (t == (uInt)Z_DATA_ERROR)
- s->mode = BADB;
- r = t;
- LEAVE
- }
- Tracev((stderr, "inflate: trees ok, %d * %d bytes used\n",
- inflate_hufts, sizeof(inflate_huft)));
- if ((c = inflate_codes_new(bl, bd, tl, td, z)) == Z_NULL)
- {
- inflate_trees_free(td, z);
- inflate_trees_free(tl, z);
- r = Z_MEM_ERROR;
- LEAVE
- }
- s->sub.decode.codes = c;
- s->sub.decode.tl = tl;
- s->sub.decode.td = td;
- }
- s->mode = CODES;
- case CODES:
- UPDATE
- if ((r = inflate_codes(s, z, r)) != Z_STREAM_END)
- return inflate_flush(s, z, r);
- r = Z_OK;
- inflate_codes_free(s->sub.decode.codes, z);
- inflate_trees_free(s->sub.decode.td, z);
- inflate_trees_free(s->sub.decode.tl, z);
- LOAD
- Tracev((stderr, "inflate: codes end, %lu total out\n",
- z->total_out + (q >= s->read ? q - s->read :
- (s->end - s->read) + (q - s->window))));
- if (!s->last)
- {
- s->mode = TYPE;
- break;
- }
- if (k > 7) /* return unused byte, if any */
- {
- Assert(k < 16, "inflate_codes grabbed too many bytes")
- k -= 8;
- n++;
- p--; /* can always return one */
- }
- s->mode = DRY;
- case DRY:
- FLUSH
- if (s->read != s->write)
- LEAVE
- s->mode = DONEB;
- case DONEB:
- r = Z_STREAM_END;
- LEAVE
- case BADB:
- r = Z_DATA_ERROR;
- LEAVE
- default:
- r = Z_STREAM_ERROR;
- LEAVE
- }
-}
-
-
-int inflate_blocks_free(s, z, c)
-inflate_blocks_statef *s;
-z_streamp z;
-uLongf *c;
-{
- inflate_blocks_reset(s, z, c);
- ZFREE(z, s->window);
- ZFREE(z, s);
- Trace((stderr, "inflate: blocks freed\n"));
- return Z_OK;
-}
-
-
-void inflate_set_dictionary(s, d, n)
-inflate_blocks_statef *s;
-const Bytef *d;
-uInt n;
-{
- zmemcpy((charf *)s->window, d, n);
- s->read = s->write = s->window + n;
-}
-
-/*
- * This subroutine adds the data at next_in/avail_in to the output history
- * without performing any output. The output buffer must be "caught up";
- * i.e. no pending output (hence s->read equals s->write), and the state must
- * be BLOCKS (i.e. we should be willing to see the start of a series of
- * BLOCKS). On exit, the output will also be caught up, and the checksum
- * will have been updated if need be.
- */
-int inflate_addhistory(s, z)
-inflate_blocks_statef *s;
-z_stream *z;
-{
- uLong b; /* bit buffer */ /* NOT USED HERE */
- uInt k; /* bits in bit buffer */ /* NOT USED HERE */
- uInt t; /* temporary storage */
- Bytef *p; /* input data pointer */
- uInt n; /* bytes available there */
- Bytef *q; /* output window write pointer */
- uInt m; /* bytes to end of window or read pointer */
-
- if (s->read != s->write)
- return Z_STREAM_ERROR;
- if (s->mode != TYPE)
- return Z_DATA_ERROR;
-
- /* we're ready to rock */
- LOAD
- /* while there is input ready, copy to output buffer, moving
- * pointers as needed.
- */
- while (n) {
- t = n; /* how many to do */
- /* is there room until end of buffer? */
- if (t > m) t = m;
- /* update check information */
- if (s->checkfn != Z_NULL)
- s->check = (*s->checkfn)(s->check, q, t);
- zmemcpy(q, p, t);
- q += t;
- p += t;
- n -= t;
- z->total_out += t;
- s->read = q; /* drag read pointer forward */
-/* WWRAP */ /* expand WWRAP macro by hand to handle s->read */
- if (q == s->end) {
- s->read = q = s->window;
- m = WAVAIL;
- }
- }
- UPDATE
- return Z_OK;
-}
-
-
-/*
- * At the end of a Deflate-compressed PPP packet, we expect to have seen
- * a `stored' block type value but not the (zero) length bytes.
- */
-int inflate_packet_flush(s)
- inflate_blocks_statef *s;
-{
- if (s->mode != LENS)
- return Z_DATA_ERROR;
- s->mode = TYPE;
- return Z_OK;
-}
-/* --- infblock.c */
-
-/* +++ inftrees.c */
-/* inftrees.c -- generate Huffman trees for efficient decoding
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* #include "zutil.h" */
-/* #include "inftrees.h" */
-
-char inflate_copyright[] = " inflate 1.0.4 Copyright 1995-1996 Mark Adler ";
-/*
- If you use the zlib library in a product, an acknowledgment is welcome
- in the documentation of your product. If for some reason you cannot
- include such an acknowledgment, I would appreciate that you keep this
- copyright string in the executable of your product.
- */
-
-#ifndef NO_DUMMY_DECL
-struct internal_state {int dummy;}; /* for buggy compilers */
-#endif
-
-/* simplify the use of the inflate_huft type with some defines */
-#define base more.Base
-#define next more.Next
-#define exop word.what.Exop
-#define bits word.what.Bits
-
-
-local int huft_build OF((
- uIntf *, /* code lengths in bits */
- uInt, /* number of codes */
- uInt, /* number of "simple" codes */
- const uIntf *, /* list of base values for non-simple codes */
- const uIntf *, /* list of extra bits for non-simple codes */
- inflate_huft * FAR*,/* result: starting table */
- uIntf *, /* maximum lookup bits (returns actual) */
- z_streamp )); /* for zalloc function */
-
-local voidpf falloc OF((
- voidpf, /* opaque pointer (not used) */
- uInt, /* number of items */
- uInt)); /* size of item */
-
-/* Tables for deflate from PKZIP's appnote.txt. */
-local const uInt cplens[31] = { /* Copy lengths for literal codes 257..285 */
- 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
- 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
- /* see note #13 above about 258 */
-local const uInt cplext[31] = { /* Extra bits for literal codes 257..285 */
- 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
- 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 112, 112}; /* 112==invalid */
-local const uInt cpdist[30] = { /* Copy offsets for distance codes 0..29 */
- 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
- 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
- 8193, 12289, 16385, 24577};
-local const uInt cpdext[30] = { /* Extra bits for distance codes */
- 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
- 7, 7, 8, 8, 9, 9, 10, 10, 11, 11,
- 12, 12, 13, 13};
-
-/*
- Huffman code decoding is performed using a multi-level table lookup.
- The fastest way to decode is to simply build a lookup table whose
- size is determined by the longest code. However, the time it takes
- to build this table can also be a factor if the data being decoded
- is not very long. The most common codes are necessarily the
- shortest codes, so those codes dominate the decoding time, and hence
- the speed. The idea is you can have a shorter table that decodes the
- shorter, more probable codes, and then point to subsidiary tables for
- the longer codes. The time it costs to decode the longer codes is
- then traded against the time it takes to make longer tables.
-
- This results of this trade are in the variables lbits and dbits
- below. lbits is the number of bits the first level table for literal/
- length codes can decode in one step, and dbits is the same thing for
- the distance codes. Subsequent tables are also less than or equal to
- those sizes. These values may be adjusted either when all of the
- codes are shorter than that, in which case the longest code length in
- bits is used, or when the shortest code is *longer* than the requested
- table size, in which case the length of the shortest code in bits is
- used.
-
- There are two different values for the two tables, since they code a
- different number of possibilities each. The literal/length table
- codes 286 possible values, or in a flat code, a little over eight
- bits. The distance table codes 30 possible values, or a little less
- than five bits, flat. The optimum values for speed end up being
- about one bit more than those, so lbits is 8+1 and dbits is 5+1.
- The optimum values may differ though from machine to machine, and
- possibly even between compilers. Your mileage may vary.
- */
-
-
-/* If BMAX needs to be larger than 16, then h and x[] should be uLong. */
-#define BMAX 15 /* maximum bit length of any code */
-#define N_MAX 288 /* maximum number of codes in any set */
-
-#ifdef DEBUG_ZLIB
- uInt inflate_hufts;
-#endif
-
-local int huft_build(b, n, s, d, e, t, m, zs)
-uIntf *b; /* code lengths in bits (all assumed <= BMAX) */
-uInt n; /* number of codes (assumed <= N_MAX) */
-uInt s; /* number of simple-valued codes (0..s-1) */
-const uIntf *d; /* list of base values for non-simple codes */
-const uIntf *e; /* list of extra bits for non-simple codes */
-inflate_huft * FAR *t; /* result: starting table */
-uIntf *m; /* maximum lookup bits, returns actual */
-z_streamp zs; /* for zalloc function */
-/* Given a list of code lengths and a maximum table size, make a set of
- tables to decode that set of codes. Return Z_OK on success, Z_BUF_ERROR
- if the given code set is incomplete (the tables are still built in this
- case), Z_DATA_ERROR if the input is invalid (an over-subscribed set of
- lengths), or Z_MEM_ERROR if not enough memory. */
-{
-
- uInt a; /* counter for codes of length k */
- uInt c[BMAX+1]; /* bit length count table */
- uInt f; /* i repeats in table every f entries */
- int g; /* maximum code length */
- int h; /* table level */
- register uInt i; /* counter, current code */
- register uInt j; /* counter */
- register int k; /* number of bits in current code */
- int l; /* bits per table (returned in m) */
- register uIntf *p; /* pointer into c[], b[], or v[] */
- inflate_huft *q; /* points to current table */
- struct inflate_huft_s r; /* table entry for structure assignment */
- inflate_huft *u[BMAX]; /* table stack */
- uInt v[N_MAX]; /* values in order of bit length */
- register int w; /* bits before this table == (l * h) */
- uInt x[BMAX+1]; /* bit offsets, then code stack */
- uIntf *xp; /* pointer into x */
- int y; /* number of dummy codes added */
- uInt z; /* number of entries in current table */
-
-
- /* Generate counts for each bit length */
- p = c;
-#define C0 *p++ = 0;
-#define C2 C0 C0 C0 C0
-#define C4 C2 C2 C2 C2
- C4 /* clear c[]--assume BMAX+1 is 16 */
- p = b; i = n;
- do {
- c[*p++]++; /* assume all entries <= BMAX */
- } while (--i);
- if (c[0] == n) /* null input--all zero length codes */
- {
- *t = (inflate_huft *)Z_NULL;
- *m = 0;
- return Z_OK;
- }
-
-
- /* Find minimum and maximum length, bound *m by those */
- l = *m;
- for (j = 1; j <= BMAX; j++)
- if (c[j])
- break;
- k = j; /* minimum code length */
- if ((uInt)l < j)
- l = j;
- for (i = BMAX; i; i--)
- if (c[i])
- break;
- g = i; /* maximum code length */
- if ((uInt)l > i)
- l = i;
- *m = l;
-
-
- /* Adjust last length count to fill out codes, if needed */
- for (y = 1 << j; j < i; j++, y <<= 1)
- if ((y -= c[j]) < 0)
- return Z_DATA_ERROR;
- if ((y -= c[i]) < 0)
- return Z_DATA_ERROR;
- c[i] += y;
-
-
- /* Generate starting offsets into the value table for each length */
- x[1] = j = 0;
- p = c + 1; xp = x + 2;
- while (--i) { /* note that i == g from above */
- *xp++ = (j += *p++);
- }
-
-
- /* Make a table of values in order of bit lengths */
- p = b; i = 0;
- do {
- if ((j = *p++) != 0)
- v[x[j]++] = i;
- } while (++i < n);
- n = x[g]; /* set n to length of v */
-
-
- /* Generate the Huffman codes and for each, make the table entries */
- x[0] = i = 0; /* first Huffman code is zero */
- p = v; /* grab values in bit order */
- h = -1; /* no tables yet--level -1 */
- w = -l; /* bits decoded == (l * h) */
- u[0] = (inflate_huft *)Z_NULL; /* just to keep compilers happy */
- q = (inflate_huft *)Z_NULL; /* ditto */
- z = 0; /* ditto */
-
- /* go through the bit lengths (k already is bits in shortest code) */
- for (; k <= g; k++)
- {
- a = c[k];
- while (a--)
- {
- /* here i is the Huffman code of length k bits for value *p */
- /* make tables up to required level */
- while (k > w + l)
- {
- h++;
- w += l; /* previous table always l bits */
-
- /* compute minimum size table less than or equal to l bits */
- z = g - w;
- z = z > (uInt)l ? l : z; /* table size upper limit */
- if ((f = 1 << (j = k - w)) > a + 1) /* try a k-w bit table */
- { /* too few codes for k-w bit table */
- f -= a + 1; /* deduct codes from patterns left */
- xp = c + k;
- if (j < z)
- while (++j < z) /* try smaller tables up to z bits */
- {
- if ((f <<= 1) <= *++xp)
- break; /* enough codes to use up j bits */
- f -= *xp; /* else deduct codes from patterns */
- }
- }
- z = 1 << j; /* table entries for j-bit table */
-
- /* allocate and link in new table */
- if ((q = (inflate_huft *)ZALLOC
- (zs,z + 1,sizeof(inflate_huft))) == Z_NULL)
- {
- if (h)
- inflate_trees_free(u[0], zs);
- return Z_MEM_ERROR; /* not enough memory */
- }
-#ifdef DEBUG_ZLIB
- inflate_hufts += z + 1;
-#endif
- *t = q + 1; /* link to list for huft_free() */
- *(t = &(q->next)) = Z_NULL;
- u[h] = ++q; /* table starts after link */
-
- /* connect to last table, if there is one */
- if (h)
- {
- x[h] = i; /* save pattern for backing up */
- r.bits = (Byte)l; /* bits to dump before this table */
- r.exop = (Byte)j; /* bits in this table */
- r.next = q; /* pointer to this table */
- j = i >> (w - l); /* (get around Turbo C bug) */
- u[h-1][j] = r; /* connect to last table */
- }
- }
-
- /* set up table entry in r */
- r.bits = (Byte)(k - w);
- if (p >= v + n)
- r.exop = 128 + 64; /* out of values--invalid code */
- else if (*p < s)
- {
- r.exop = (Byte)(*p < 256 ? 0 : 32 + 64); /* 256 is end-of-block */
- r.base = *p++; /* simple code is just the value */
- }
- else
- {
- r.exop = (Byte)(e[*p - s] + 16 + 64);/* non-simple--look up in lists */
- r.base = d[*p++ - s];
- }
-
- /* fill code-like entries with r */
- f = 1 << (k - w);
- for (j = i >> w; j < z; j += f)
- q[j] = r;
-
- /* backwards increment the k-bit code i */
- for (j = 1 << (k - 1); i & j; j >>= 1)
- i ^= j;
- i ^= j;
-
- /* backup over finished tables */
- while ((i & ((1 << w) - 1)) != x[h])
- {
- h--; /* don't need to update q */
- w -= l;
- }
- }
- }
-
-
- /* Return Z_BUF_ERROR if we were given an incomplete table */
- return y != 0 && g != 1 ? Z_BUF_ERROR : Z_OK;
-}
-
-
-int inflate_trees_bits(c, bb, tb, z)
-uIntf *c; /* 19 code lengths */
-uIntf *bb; /* bits tree desired/actual depth */
-inflate_huft * FAR *tb; /* bits tree result */
-z_streamp z; /* for zfree function */
-{
- int r;
-
- r = huft_build(c, 19, 19, (uIntf*)Z_NULL, (uIntf*)Z_NULL, tb, bb, z);
- if (r == Z_DATA_ERROR)
- z->msg = (char*)"oversubscribed dynamic bit lengths tree";
- else if (r == Z_BUF_ERROR || *bb == 0)
- {
- inflate_trees_free(*tb, z);
- z->msg = (char*)"incomplete dynamic bit lengths tree";
- r = Z_DATA_ERROR;
- }
- return r;
-}
-
-
-int inflate_trees_dynamic(nl, nd, c, bl, bd, tl, td, z)
-uInt nl; /* number of literal/length codes */
-uInt nd; /* number of distance codes */
-uIntf *c; /* that many (total) code lengths */
-uIntf *bl; /* literal desired/actual bit depth */
-uIntf *bd; /* distance desired/actual bit depth */
-inflate_huft * FAR *tl; /* literal/length tree result */
-inflate_huft * FAR *td; /* distance tree result */
-z_streamp z; /* for zfree function */
-{
- int r;
-
- /* build literal/length tree */
- r = huft_build(c, nl, 257, cplens, cplext, tl, bl, z);
- if (r != Z_OK || *bl == 0)
- {
- if (r == Z_DATA_ERROR)
- z->msg = (char*)"oversubscribed literal/length tree";
- else if (r != Z_MEM_ERROR)
- {
- inflate_trees_free(*tl, z);
- z->msg = (char*)"incomplete literal/length tree";
- r = Z_DATA_ERROR;
- }
- return r;
- }
-
- /* build distance tree */
- r = huft_build(c + nl, nd, 0, cpdist, cpdext, td, bd, z);
- if (r != Z_OK || (*bd == 0 && nl > 257))
- {
- if (r == Z_DATA_ERROR)
- z->msg = (char*)"oversubscribed distance tree";
- else if (r == Z_BUF_ERROR) {
-#ifdef PKZIP_BUG_WORKAROUND
- r = Z_OK;
- }
-#else
- inflate_trees_free(*td, z);
- z->msg = (char*)"incomplete distance tree";
- r = Z_DATA_ERROR;
- }
- else if (r != Z_MEM_ERROR)
- {
- z->msg = (char*)"empty distance tree with lengths";
- r = Z_DATA_ERROR;
- }
- inflate_trees_free(*tl, z);
- return r;
-#endif
- }
-
- /* done */
- return Z_OK;
-}
-
-
-/* build fixed tables only once--keep them here */
-local int fixed_built = 0;
-#define FIXEDH 530 /* number of hufts used by fixed tables */
-local inflate_huft fixed_mem[FIXEDH];
-local uInt fixed_bl;
-local uInt fixed_bd;
-local inflate_huft *fixed_tl;
-local inflate_huft *fixed_td;
-
-
-local voidpf falloc(q, n, s)
-voidpf q; /* opaque pointer */
-uInt n; /* number of items */
-uInt s; /* size of item */
-{
- Assert(s == sizeof(inflate_huft) && n <= *(intf *)q,
- "inflate_trees falloc overflow");
- *(intf *)q -= n+s-s; /* s-s to avoid warning */
- return (voidpf)(fixed_mem + *(intf *)q);
-}
-
-
-int inflate_trees_fixed(bl, bd, tl, td)
-uIntf *bl; /* literal desired/actual bit depth */
-uIntf *bd; /* distance desired/actual bit depth */
-inflate_huft * FAR *tl; /* literal/length tree result */
-inflate_huft * FAR *td; /* distance tree result */
-{
- /* build fixed tables if not already (multiple overlapped executions ok) */
- if (!fixed_built)
- {
- int k; /* temporary variable */
- unsigned c[288]; /* length list for huft_build */
- z_stream z; /* for falloc function */
- int f = FIXEDH; /* number of hufts left in fixed_mem */
-
- /* set up fake z_stream for memory routines */
- z.zalloc = falloc;
- z.zfree = Z_NULL;
- z.opaque = (voidpf)&f;
-
- /* literal table */
- for (k = 0; k < 144; k++)
- c[k] = 8;
- for (; k < 256; k++)
- c[k] = 9;
- for (; k < 280; k++)
- c[k] = 7;
- for (; k < 288; k++)
- c[k] = 8;
- fixed_bl = 7;
- huft_build(c, 288, 257, cplens, cplext, &fixed_tl, &fixed_bl, &z);
-
- /* distance table */
- for (k = 0; k < 30; k++)
- c[k] = 5;
- fixed_bd = 5;
- huft_build(c, 30, 0, cpdist, cpdext, &fixed_td, &fixed_bd, &z);
-
- /* done */
- Assert(f == 0, "invalid build of fixed tables");
- fixed_built = 1;
- }
- *bl = fixed_bl;
- *bd = fixed_bd;
- *tl = fixed_tl;
- *td = fixed_td;
- return Z_OK;
-}
-
-
-int inflate_trees_free(t, z)
-inflate_huft *t; /* table to free */
-z_streamp z; /* for zfree function */
-/* Free the malloc'ed tables built by huft_build(), which makes a linked
- list of the tables it made, with the links in a dummy first entry of
- each table. */
-{
- register inflate_huft *p, *q, *r;
-
- /* Reverse linked list */
- p = Z_NULL;
- q = t;
- while (q != Z_NULL)
- {
- r = (q - 1)->next;
- (q - 1)->next = p;
- p = q;
- q = r;
- }
- /* Go through linked list, freeing from the malloced (t[-1]) address. */
- while (p != Z_NULL)
- {
- q = (--p)->next;
- ZFREE(z,p);
- p = q;
- }
- return Z_OK;
-}
-/* --- inftrees.c */
-
-/* +++ infcodes.c */
-/* infcodes.c -- process literals and length/distance pairs
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* #include "zutil.h" */
-/* #include "inftrees.h" */
-/* #include "infblock.h" */
-/* #include "infcodes.h" */
-/* #include "infutil.h" */
-
-/* +++ inffast.h */
-/* inffast.h -- header to use inffast.c
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-extern int inflate_fast OF((
- uInt,
- uInt,
- inflate_huft *,
- inflate_huft *,
- inflate_blocks_statef *,
- z_streamp ));
-/* --- inffast.h */
-
-/* simplify the use of the inflate_huft type with some defines */
-#define base more.Base
-#define next more.Next
-#define exop word.what.Exop
-#define bits word.what.Bits
-
-/* inflate codes private state */
-struct inflate_codes_state {
-
- /* mode */
- enum { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
- START, /* x: set up for LEN */
- LEN, /* i: get length/literal/eob next */
- LENEXT, /* i: getting length extra (have base) */
- DIST, /* i: get distance next */
- DISTEXT, /* i: getting distance extra */
- COPY, /* o: copying bytes in window, waiting for space */
- LIT, /* o: got literal, waiting for output space */
- WASH, /* o: got eob, possibly still output waiting */
- END, /* x: got eob and all data flushed */
- BADCODE} /* x: got error */
- mode; /* current inflate_codes mode */
-
- /* mode dependent information */
- uInt len;
- union {
- struct {
- inflate_huft *tree; /* pointer into tree */
- uInt need; /* bits needed */
- } code; /* if LEN or DIST, where in tree */
- uInt lit; /* if LIT, literal */
- struct {
- uInt get; /* bits to get for extra */
- uInt dist; /* distance back to copy from */
- } copy; /* if EXT or COPY, where and how much */
- } sub; /* submode */
-
- /* mode independent information */
- Byte lbits; /* ltree bits decoded per branch */
- Byte dbits; /* dtree bits decoder per branch */
- inflate_huft *ltree; /* literal/length/eob tree */
- inflate_huft *dtree; /* distance tree */
-
-};
-
-
-inflate_codes_statef *inflate_codes_new(bl, bd, tl, td, z)
-uInt bl, bd;
-inflate_huft *tl;
-inflate_huft *td; /* need separate declaration for Borland C++ */
-z_streamp z;
-{
- inflate_codes_statef *c;
-
- if ((c = (inflate_codes_statef *)
- ZALLOC(z,1,sizeof(struct inflate_codes_state))) != Z_NULL)
- {
- c->mode = START;
- c->lbits = (Byte)bl;
- c->dbits = (Byte)bd;
- c->ltree = tl;
- c->dtree = td;
- Tracev((stderr, "inflate: codes new\n"));
- }
- return c;
-}
-
-
-int inflate_codes(s, z, r)
-inflate_blocks_statef *s;
-z_streamp z;
-int r;
-{
- uInt j; /* temporary storage */
- inflate_huft *t; /* temporary pointer */
- uInt e; /* extra bits or operation */
- uLong b; /* bit buffer */
- uInt k; /* bits in bit buffer */
- Bytef *p; /* input data pointer */
- uInt n; /* bytes available there */
- Bytef *q; /* output window write pointer */
- uInt m; /* bytes to end of window or read pointer */
- Bytef *f; /* pointer to copy strings from */
- inflate_codes_statef *c = s->sub.decode.codes; /* codes state */
-
- /* copy input/output information to locals (UPDATE macro restores) */
- LOAD
-
- /* process input and output based on current state */
- while (1) switch (c->mode)
- { /* waiting for "i:"=input, "o:"=output, "x:"=nothing */
- case START: /* x: set up for LEN */
-#ifndef SLOW
- if (m >= 258 && n >= 10)
- {
- UPDATE
- r = inflate_fast(c->lbits, c->dbits, c->ltree, c->dtree, s, z);
- LOAD
- if (r != Z_OK)
- {
- c->mode = r == Z_STREAM_END ? WASH : BADCODE;
- break;
- }
- }
-#endif /* !SLOW */
- c->sub.code.need = c->lbits;
- c->sub.code.tree = c->ltree;
- c->mode = LEN;
- case LEN: /* i: get length/literal/eob next */
- j = c->sub.code.need;
- NEEDBITS(j)
- t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
- DUMPBITS(t->bits)
- e = (uInt)(t->exop);
- if (e == 0) /* literal */
- {
- c->sub.lit = t->base;
- Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
- "inflate: literal '%c'\n" :
- "inflate: literal 0x%02x\n", t->base));
- c->mode = LIT;
- break;
- }
- if (e & 16) /* length */
- {
- c->sub.copy.get = e & 15;
- c->len = t->base;
- c->mode = LENEXT;
- break;
- }
- if ((e & 64) == 0) /* next table */
- {
- c->sub.code.need = e;
- c->sub.code.tree = t->next;
- break;
- }
- if (e & 32) /* end of block */
- {
- Tracevv((stderr, "inflate: end of block\n"));
- c->mode = WASH;
- break;
- }
- c->mode = BADCODE; /* invalid code */
- z->msg = (char*)"invalid literal/length code";
- r = Z_DATA_ERROR;
- LEAVE
- case LENEXT: /* i: getting length extra (have base) */
- j = c->sub.copy.get;
- NEEDBITS(j)
- c->len += (uInt)b & inflate_mask[j];
- DUMPBITS(j)
- c->sub.code.need = c->dbits;
- c->sub.code.tree = c->dtree;
- Tracevv((stderr, "inflate: length %u\n", c->len));
- c->mode = DIST;
- case DIST: /* i: get distance next */
- j = c->sub.code.need;
- NEEDBITS(j)
- t = c->sub.code.tree + ((uInt)b & inflate_mask[j]);
- DUMPBITS(t->bits)
- e = (uInt)(t->exop);
- if (e & 16) /* distance */
- {
- c->sub.copy.get = e & 15;
- c->sub.copy.dist = t->base;
- c->mode = DISTEXT;
- break;
- }
- if ((e & 64) == 0) /* next table */
- {
- c->sub.code.need = e;
- c->sub.code.tree = t->next;
- break;
- }
- c->mode = BADCODE; /* invalid code */
- z->msg = (char*)"invalid distance code";
- r = Z_DATA_ERROR;
- LEAVE
- case DISTEXT: /* i: getting distance extra */
- j = c->sub.copy.get;
- NEEDBITS(j)
- c->sub.copy.dist += (uInt)b & inflate_mask[j];
- DUMPBITS(j)
- Tracevv((stderr, "inflate: distance %u\n", c->sub.copy.dist));
- c->mode = COPY;
- case COPY: /* o: copying bytes in window, waiting for space */
-#ifndef __TURBOC__ /* Turbo C bug for following expression */
- f = (uInt)(q - s->window) < c->sub.copy.dist ?
- s->end - (c->sub.copy.dist - (q - s->window)) :
- q - c->sub.copy.dist;
-#else
- f = q - c->sub.copy.dist;
- if ((uInt)(q - s->window) < c->sub.copy.dist)
- f = s->end - (c->sub.copy.dist - (uInt)(q - s->window));
-#endif
- while (c->len)
- {
- NEEDOUT
- OUTBYTE(*f++)
- if (f == s->end)
- f = s->window;
- c->len--;
- }
- c->mode = START;
- break;
- case LIT: /* o: got literal, waiting for output space */
- NEEDOUT
- OUTBYTE(c->sub.lit)
- c->mode = START;
- break;
- case WASH: /* o: got eob, possibly more output */
- FLUSH
- if (s->read != s->write)
- LEAVE
- c->mode = END;
- case END:
- r = Z_STREAM_END;
- LEAVE
- case BADCODE: /* x: got error */
- r = Z_DATA_ERROR;
- LEAVE
- default:
- r = Z_STREAM_ERROR;
- LEAVE
- }
-}
-
-
-void inflate_codes_free(c, z)
-inflate_codes_statef *c;
-z_streamp z;
-{
- ZFREE(z, c);
- Tracev((stderr, "inflate: codes free\n"));
-}
-/* --- infcodes.c */
-
-/* +++ infutil.c */
-/* inflate_util.c -- data and routines common to blocks and codes
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* #include "zutil.h" */
-/* #include "infblock.h" */
-/* #include "inftrees.h" */
-/* #include "infcodes.h" */
-/* #include "infutil.h" */
-
-#ifndef NO_DUMMY_DECL
-struct inflate_codes_state {int dummy;}; /* for buggy compilers */
-#endif
-
-/* And'ing with mask[n] masks the lower n bits */
-uInt inflate_mask[17] = {
- 0x0000,
- 0x0001, 0x0003, 0x0007, 0x000f, 0x001f, 0x003f, 0x007f, 0x00ff,
- 0x01ff, 0x03ff, 0x07ff, 0x0fff, 0x1fff, 0x3fff, 0x7fff, 0xffff
-};
-
-
-/* copy as much as possible from the sliding window to the output area */
-int inflate_flush(s, z, r)
-inflate_blocks_statef *s;
-z_streamp z;
-int r;
-{
- uInt n;
- Bytef *p;
- Bytef *q;
-
- /* local copies of source and destination pointers */
- p = z->next_out;
- q = s->read;
-
- /* compute number of bytes to copy as far as end of window */
- n = (uInt)((q <= s->write ? s->write : s->end) - q);
- if (n > z->avail_out) n = z->avail_out;
- if (n && r == Z_BUF_ERROR) r = Z_OK;
-
- /* update counters */
- z->avail_out -= n;
- z->total_out += n;
-
- /* update check information */
- if (s->checkfn != Z_NULL)
- z->adler = s->check = (*s->checkfn)(s->check, q, n);
-
- /* copy as far as end of window */
- if (p != Z_NULL) {
- zmemcpy(p, q, n);
- p += n;
- }
- q += n;
-
- /* see if more to copy at beginning of window */
- if (q == s->end)
- {
- /* wrap pointers */
- q = s->window;
- if (s->write == s->end)
- s->write = s->window;
-
- /* compute bytes to copy */
- n = (uInt)(s->write - q);
- if (n > z->avail_out) n = z->avail_out;
- if (n && r == Z_BUF_ERROR) r = Z_OK;
-
- /* update counters */
- z->avail_out -= n;
- z->total_out += n;
-
- /* update check information */
- if (s->checkfn != Z_NULL)
- z->adler = s->check = (*s->checkfn)(s->check, q, n);
-
- /* copy */
- if (p != Z_NULL) {
- zmemcpy(p, q, n);
- p += n;
- }
- q += n;
- }
-
- /* update pointers */
- z->next_out = p;
- s->read = q;
-
- /* done */
- return r;
-}
-/* --- infutil.c */
-
-/* +++ inffast.c */
-/* inffast.c -- process literals and length/distance pairs fast
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* #include "zutil.h" */
-/* #include "inftrees.h" */
-/* #include "infblock.h" */
-/* #include "infcodes.h" */
-/* #include "infutil.h" */
-/* #include "inffast.h" */
-
-#ifndef NO_DUMMY_DECL
-struct inflate_codes_state {int dummy;}; /* for buggy compilers */
-#endif
-
-/* simplify the use of the inflate_huft type with some defines */
-#define base more.Base
-#define next more.Next
-#define exop word.what.Exop
-#define bits word.what.Bits
-
-/* macros for bit input with no checking and for returning unused bytes */
-#define GRABBITS(j) {while(k<(j)){b|=((uLong)NEXTBYTE)<<k;k+=8;}}
-#define UNGRAB {n+=(c=k>>3);p-=c;k&=7;}
-
-/* Called with number of bytes left to write in window at least 258
- (the maximum string length) and number of input bytes available
- at least ten. The ten bytes are six bytes for the longest length/
- distance pair plus four bytes for overloading the bit buffer. */
-
-int inflate_fast(bl, bd, tl, td, s, z)
-uInt bl, bd;
-inflate_huft *tl;
-inflate_huft *td; /* need separate declaration for Borland C++ */
-inflate_blocks_statef *s;
-z_streamp z;
-{
- inflate_huft *t; /* temporary pointer */
- uInt e; /* extra bits or operation */
- uLong b; /* bit buffer */
- uInt k; /* bits in bit buffer */
- Bytef *p; /* input data pointer */
- uInt n; /* bytes available there */
- Bytef *q; /* output window write pointer */
- uInt m; /* bytes to end of window or read pointer */
- uInt ml; /* mask for literal/length tree */
- uInt md; /* mask for distance tree */
- uInt c; /* bytes to copy */
- uInt d; /* distance back to copy from */
- Bytef *r; /* copy source pointer */
-
- /* load input, output, bit values */
- LOAD
-
- /* initialize masks */
- ml = inflate_mask[bl];
- md = inflate_mask[bd];
-
- /* do until not enough input or output space for fast loop */
- do { /* assume called with m >= 258 && n >= 10 */
- /* get literal/length code */
- GRABBITS(20) /* max bits for literal/length code */
- if ((e = (t = tl + ((uInt)b & ml))->exop) == 0)
- {
- DUMPBITS(t->bits)
- Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
- "inflate: * literal '%c'\n" :
- "inflate: * literal 0x%02x\n", t->base));
- *q++ = (Byte)t->base;
- m--;
- continue;
- }
- do {
- DUMPBITS(t->bits)
- if (e & 16)
- {
- /* get extra bits for length */
- e &= 15;
- c = t->base + ((uInt)b & inflate_mask[e]);
- DUMPBITS(e)
- Tracevv((stderr, "inflate: * length %u\n", c));
-
- /* decode distance base of block to copy */
- GRABBITS(15); /* max bits for distance code */
- e = (t = td + ((uInt)b & md))->exop;
- do {
- DUMPBITS(t->bits)
- if (e & 16)
- {
- /* get extra bits to add to distance base */
- e &= 15;
- GRABBITS(e) /* get extra bits (up to 13) */
- d = t->base + ((uInt)b & inflate_mask[e]);
- DUMPBITS(e)
- Tracevv((stderr, "inflate: * distance %u\n", d));
-
- /* do the copy */
- m -= c;
- if ((uInt)(q - s->window) >= d) /* offset before dest */
- { /* just copy */
- r = q - d;
- *q++ = *r++; c--; /* minimum count is three, */
- *q++ = *r++; c--; /* so unroll loop a little */
- }
- else /* else offset after destination */
- {
- e = d - (uInt)(q - s->window); /* bytes from offset to end */
- r = s->end - e; /* pointer to offset */
- if (c > e) /* if source crosses, */
- {
- c -= e; /* copy to end of window */
- do {
- *q++ = *r++;
- } while (--e);
- r = s->window; /* copy rest from start of window */
- }
- }
- do { /* copy all or what's left */
- *q++ = *r++;
- } while (--c);
- break;
- }
- else if ((e & 64) == 0)
- e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop;
- else
- {
- z->msg = (char*)"invalid distance code";
- UNGRAB
- UPDATE
- return Z_DATA_ERROR;
- }
- } while (1);
- break;
- }
- if ((e & 64) == 0)
- {
- if ((e = (t = t->next + ((uInt)b & inflate_mask[e]))->exop) == 0)
- {
- DUMPBITS(t->bits)
- Tracevv((stderr, t->base >= 0x20 && t->base < 0x7f ?
- "inflate: * literal '%c'\n" :
- "inflate: * literal 0x%02x\n", t->base));
- *q++ = (Byte)t->base;
- m--;
- break;
- }
- }
- else if (e & 32)
- {
- Tracevv((stderr, "inflate: * end of block\n"));
- UNGRAB
- UPDATE
- return Z_STREAM_END;
- }
- else
- {
- z->msg = (char*)"invalid literal/length code";
- UNGRAB
- UPDATE
- return Z_DATA_ERROR;
- }
- } while (1);
- } while (m >= 258 && n >= 10);
-
- /* not enough input or output--restore pointers and return */
- UNGRAB
- UPDATE
- return Z_OK;
-}
-/* --- inffast.c */
-
-/* +++ zutil.c */
-/* zutil.c -- target dependent utility functions for the compression library
- * Copyright (C) 1995-1996 Jean-loup Gailly.
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* From: zutil.c,v 1.17 1996/07/24 13:41:12 me Exp $ */
-
-/* #include "zutil.h" */
-
-#ifndef NO_DUMMY_DECL
-struct internal_state {int dummy;}; /* for buggy compilers */
-#endif
-
-#ifndef STDC
-extern void exit OF((int));
-#endif
-
-const char *z_errmsg[10] = {
-"need dictionary", /* Z_NEED_DICT 2 */
-"stream end", /* Z_STREAM_END 1 */
-"", /* Z_OK 0 */
-"file error", /* Z_ERRNO (-1) */
-"stream error", /* Z_STREAM_ERROR (-2) */
-"data error", /* Z_DATA_ERROR (-3) */
-"insufficient memory", /* Z_MEM_ERROR (-4) */
-"buffer error", /* Z_BUF_ERROR (-5) */
-"incompatible version",/* Z_VERSION_ERROR (-6) */
-""};
-
-
-const char *zlibVersion()
-{
- return ZLIB_VERSION;
-}
-
-#ifdef DEBUG_ZLIB
-void z_error (m)
- char *m;
-{
- fprintf(stderr, "%s\n", m);
- exit(1);
-}
-#endif
-
-#ifndef HAVE_MEMCPY
-
-void zmemcpy(dest, source, len)
- Bytef* dest;
- Bytef* source;
- uInt len;
-{
- if (len == 0) return;
- do {
- *dest++ = *source++; /* ??? to be unrolled */
- } while (--len != 0);
-}
-
-int zmemcmp(s1, s2, len)
- Bytef* s1;
- Bytef* s2;
- uInt len;
-{
- uInt j;
-
- for (j = 0; j < len; j++) {
- if (s1[j] != s2[j]) return 2*(s1[j] > s2[j])-1;
- }
- return 0;
-}
-
-void zmemzero(dest, len)
- Bytef* dest;
- uInt len;
-{
- if (len == 0) return;
- do {
- *dest++ = 0; /* ??? to be unrolled */
- } while (--len != 0);
-}
-#endif
-
-#ifdef __TURBOC__
-#if (defined( __BORLANDC__) || !defined(SMALL_MEDIUM)) && !defined(__32BIT__)
-/* Small and medium model in Turbo C are for now limited to near allocation
- * with reduced MAX_WBITS and MAX_MEM_LEVEL
- */
-# define MY_ZCALLOC
-
-/* Turbo C malloc() does not allow dynamic allocation of 64K bytes
- * and farmalloc(64K) returns a pointer with an offset of 8, so we
- * must fix the pointer. Warning: the pointer must be put back to its
- * original form in order to free it, use zcfree().
- */
-
-#define MAX_PTR 10
-/* 10*64K = 640K */
-
-local int next_ptr = 0;
-
-typedef struct ptr_table_s {
- voidpf org_ptr;
- voidpf new_ptr;
-} ptr_table;
-
-local ptr_table table[MAX_PTR];
-/* This table is used to remember the original form of pointers
- * to large buffers (64K). Such pointers are normalized with a zero offset.
- * Since MSDOS is not a preemptive multitasking OS, this table is not
- * protected from concurrent access. This hack doesn't work anyway on
- * a protected system like OS/2. Use Microsoft C instead.
- */
-
-voidpf zcalloc (voidpf opaque, unsigned items, unsigned size)
-{
- voidpf buf = opaque; /* just to make some compilers happy */
- ulg bsize = (ulg)items*size;
-
- /* If we allocate less than 65520 bytes, we assume that farmalloc
- * will return a usable pointer which doesn't have to be normalized.
- */
- if (bsize < 65520L) {
- buf = farmalloc(bsize);
- if (*(ush*)&buf != 0) return buf;
- } else {
- buf = farmalloc(bsize + 16L);
- }
- if (buf == NULL || next_ptr >= MAX_PTR) return NULL;
- table[next_ptr].org_ptr = buf;
-
- /* Normalize the pointer to seg:0 */
- *((ush*)&buf+1) += ((ush)((uch*)buf-0) + 15) >> 4;
- *(ush*)&buf = 0;
- table[next_ptr++].new_ptr = buf;
- return buf;
-}
-
-void zcfree (voidpf opaque, voidpf ptr)
-{
- int n;
- if (*(ush*)&ptr != 0) { /* object < 64K */
- farfree(ptr);
- return;
- }
- /* Find the original pointer */
- for (n = 0; n < next_ptr; n++) {
- if (ptr != table[n].new_ptr) continue;
-
- farfree(table[n].org_ptr);
- while (++n < next_ptr) {
- table[n-1] = table[n];
- }
- next_ptr--;
- return;
- }
- ptr = opaque; /* just to make some compilers happy */
- Assert(0, "zcfree: ptr not found");
-}
-#endif
-#endif /* __TURBOC__ */
-
-
-#if defined(M_I86) && !defined(__32BIT__)
-/* Microsoft C in 16-bit mode */
-
-# define MY_ZCALLOC
-
-#if (!defined(_MSC_VER) || (_MSC_VER < 600))
-# define _halloc halloc
-# define _hfree hfree
-#endif
-
-voidpf zcalloc (voidpf opaque, unsigned items, unsigned size)
-{
- if (opaque) opaque = 0; /* to make compiler happy */
- return _halloc((long)items, size);
-}
-
-void zcfree (voidpf opaque, voidpf ptr)
-{
- if (opaque) opaque = 0; /* to make compiler happy */
- _hfree(ptr);
-}
-
-#endif /* MSC */
-
-
-#ifndef MY_ZCALLOC /* Any system without a special alloc function */
-
-#ifndef STDC
-extern voidp calloc OF((uInt items, uInt size));
-extern void free OF((voidpf ptr));
-#endif
-
-voidpf zcalloc (opaque, items, size)
- voidpf opaque;
- unsigned items;
- unsigned size;
-{
- if (opaque) items += size - size; /* make compiler happy */
- return (voidpf)calloc(items, size);
-}
-
-void zcfree (opaque, ptr)
- voidpf opaque;
- voidpf ptr;
-{
- free(ptr);
- if (opaque) return; /* make compiler happy */
-}
-
-#endif /* MY_ZCALLOC */
-/* --- zutil.c */
-
-/* +++ adler32.c */
-/* adler32.c -- compute the Adler-32 checksum of a data stream
- * Copyright (C) 1995-1996 Mark Adler
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* From: adler32.c,v 1.10 1996/05/22 11:52:18 me Exp $ */
-
-/* #include "zlib.h" */
-
-#define BASE 65521L /* largest prime smaller than 65536 */
-#define NMAX 5552
-/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */
-
-#define DO1(buf,i) {s1 += buf[i]; s2 += s1;}
-#define DO2(buf,i) DO1(buf,i); DO1(buf,i+1);
-#define DO4(buf,i) DO2(buf,i); DO2(buf,i+2);
-#define DO8(buf,i) DO4(buf,i); DO4(buf,i+4);
-#define DO16(buf) DO8(buf,0); DO8(buf,8);
-
-/* ========================================================================= */
-uLong adler32(adler, buf, len)
- uLong adler;
- const Bytef *buf;
- uInt len;
-{
- unsigned long s1 = adler & 0xffff;
- unsigned long s2 = (adler >> 16) & 0xffff;
- int k;
-
- if (buf == Z_NULL) return 1L;
-
- while (len > 0) {
- k = len < NMAX ? len : NMAX;
- len -= k;
- while (k >= 16) {
- DO16(buf);
- buf += 16;
- k -= 16;
- }
- if (k != 0) do {
- s1 += *buf++;
- s2 += s1;
- } while (--k);
- s1 %= BASE;
- s2 %= BASE;
- }
- return (s2 << 16) | s1;
-}
-/* --- adler32.c */
diff --git a/drivers/net/zlib.h b/drivers/net/zlib.h
deleted file mode 100644
index cf9153562702..000000000000
--- a/drivers/net/zlib.h
+++ /dev/null
@@ -1,1010 +0,0 @@
-/* $Id: zlib.h,v 1.2 1997/12/23 10:47:44 paulus Exp $ */
-
-/*
- * This file is derived from zlib.h and zconf.h from the zlib-1.0.4
- * distribution by Jean-loup Gailly and Mark Adler, with some additions
- * by Paul Mackerras to aid in implementing Deflate compression and
- * decompression for PPP packets.
- */
-
-/*
- * ==FILEVERSION 971127==
- *
- * This marker is used by the Linux installation script to determine
- * whether an up-to-date version of this file is already installed.
- */
-
-
-/* +++ zlib.h */
-/* zlib.h -- interface of the 'zlib' general purpose compression library
- version 1.0.4, Jul 24th, 1996.
-
- Copyright (C) 1995-1996 Jean-loup Gailly and Mark Adler
-
- This software is provided 'as-is', without any express or implied
- warranty. In no event will the authors be held liable for any damages
- arising from the use of this software.
-
- Permission is granted to anyone to use this software for any purpose,
- including commercial applications, and to alter it and redistribute it
- freely, subject to the following restrictions:
-
- 1. The origin of this software must not be misrepresented; you must not
- claim that you wrote the original software. If you use this software
- in a product, an acknowledgment in the product documentation would be
- appreciated but is not required.
- 2. Altered source versions must be plainly marked as such, and must not be
- misrepresented as being the original software.
- 3. This notice may not be removed or altered from any source distribution.
-
- Jean-loup Gailly Mark Adler
- gzip@prep.ai.mit.edu madler@alumni.caltech.edu
-
-
- The data format used by the zlib library is described by RFCs (Request for
- Comments) 1950 to 1952 in the files ftp://ds.internic.net/rfc/rfc1950.txt
- (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format).
-*/
-
-#ifndef _ZLIB_H
-#define _ZLIB_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-/* +++ zconf.h */
-/* zconf.h -- configuration of the zlib compression library
- * Copyright (C) 1995-1996 Jean-loup Gailly.
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* From: zconf.h,v 1.20 1996/07/02 15:09:28 me Exp $ */
-
-#ifndef _ZCONF_H
-#define _ZCONF_H
-
-/*
- * If you *really* need a unique prefix for all types and library functions,
- * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it.
- */
-#ifdef Z_PREFIX
-# define deflateInit_ z_deflateInit_
-# define deflate z_deflate
-# define deflateEnd z_deflateEnd
-# define inflateInit_ z_inflateInit_
-# define inflate z_inflate
-# define inflateEnd z_inflateEnd
-# define deflateInit2_ z_deflateInit2_
-# define deflateSetDictionary z_deflateSetDictionary
-# define deflateCopy z_deflateCopy
-# define deflateReset z_deflateReset
-# define deflateParams z_deflateParams
-# define inflateInit2_ z_inflateInit2_
-# define inflateSetDictionary z_inflateSetDictionary
-# define inflateSync z_inflateSync
-# define inflateReset z_inflateReset
-# define compress z_compress
-# define uncompress z_uncompress
-# define adler32 z_adler32
-# define crc32 z_crc32
-# define get_crc_table z_get_crc_table
-
-# define Byte z_Byte
-# define uInt z_uInt
-# define uLong z_uLong
-# define Bytef z_Bytef
-# define charf z_charf
-# define intf z_intf
-# define uIntf z_uIntf
-# define uLongf z_uLongf
-# define voidpf z_voidpf
-# define voidp z_voidp
-#endif
-
-#if (defined(_WIN32) || defined(__WIN32__)) && !defined(WIN32)
-# define WIN32
-#endif
-#if defined(__GNUC__) || defined(WIN32) || defined(__386__) || defined(i386)
-# ifndef __32BIT__
-# define __32BIT__
-# endif
-#endif
-#if defined(__MSDOS__) && !defined(MSDOS)
-# define MSDOS
-#endif
-
-/*
- * Compile with -DMAXSEG_64K if the alloc function cannot allocate more
- * than 64k bytes at a time (needed on systems with 16-bit int).
- */
-#if defined(MSDOS) && !defined(__32BIT__)
-# define MAXSEG_64K
-#endif
-#ifdef MSDOS
-# define UNALIGNED_OK
-#endif
-
-#if (defined(MSDOS) || defined(_WINDOWS) || defined(WIN32)) && !defined(STDC)
-# define STDC
-#endif
-#if (defined(__STDC__) || defined(__cplusplus)) && !defined(STDC)
-# define STDC
-#endif
-
-#ifndef STDC
-# ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */
-# define const
-# endif
-#endif
-
-/* Some Mac compilers merge all .h files incorrectly: */
-#if defined(__MWERKS__) || defined(applec) ||defined(THINK_C) ||defined(__SC__)
-# define NO_DUMMY_DECL
-#endif
-
-/* Maximum value for memLevel in deflateInit2 */
-#ifndef MAX_MEM_LEVEL
-# ifdef MAXSEG_64K
-# define MAX_MEM_LEVEL 8
-# else
-# define MAX_MEM_LEVEL 9
-# endif
-#endif
-
-/* Maximum value for windowBits in deflateInit2 and inflateInit2 */
-#ifndef MAX_WBITS
-# define MAX_WBITS 15 /* 32K LZ77 window */
-#endif
-
-/* The memory requirements for deflate are (in bytes):
- 1 << (windowBits+2) + 1 << (memLevel+9)
- that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values)
- plus a few kilobytes for small objects. For example, if you want to reduce
- the default memory requirements from 256K to 128K, compile with
- make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7"
- Of course this will generally degrade compression (there's no free lunch).
-
- The memory requirements for inflate are (in bytes) 1 << windowBits
- that is, 32K for windowBits=15 (default value) plus a few kilobytes
- for small objects.
-*/
-
- /* Type declarations */
-
-#ifndef OF /* function prototypes */
-# ifdef STDC
-# define OF(args) args
-# else
-# define OF(args) ()
-# endif
-#endif
-
-/* The following definitions for FAR are needed only for MSDOS mixed
- * model programming (small or medium model with some far allocations).
- * This was tested only with MSC; for other MSDOS compilers you may have
- * to define NO_MEMCPY in zutil.h. If you don't need the mixed model,
- * just define FAR to be empty.
- */
-#if (defined(M_I86SM) || defined(M_I86MM)) && !defined(__32BIT__)
- /* MSC small or medium model */
-# define SMALL_MEDIUM
-# ifdef _MSC_VER
-# define FAR __far
-# else
-# define FAR far
-# endif
-#endif
-#if defined(__BORLANDC__) && (defined(__SMALL__) || defined(__MEDIUM__))
-# ifndef __32BIT__
-# define SMALL_MEDIUM
-# define FAR __far
-# endif
-#endif
-#ifndef FAR
-# define FAR
-#endif
-
-typedef unsigned char Byte; /* 8 bits */
-typedef unsigned int uInt; /* 16 bits or more */
-typedef unsigned long uLong; /* 32 bits or more */
-
-#if defined(__BORLANDC__) && defined(SMALL_MEDIUM)
- /* Borland C/C++ ignores FAR inside typedef */
-# define Bytef Byte FAR
-#else
- typedef Byte FAR Bytef;
-#endif
-typedef char FAR charf;
-typedef int FAR intf;
-typedef uInt FAR uIntf;
-typedef uLong FAR uLongf;
-
-#ifdef STDC
- typedef void FAR *voidpf;
- typedef void *voidp;
-#else
- typedef Byte FAR *voidpf;
- typedef Byte *voidp;
-#endif
-
-
-/* Compile with -DZLIB_DLL for Windows DLL support */
-#if (defined(_WINDOWS) || defined(WINDOWS)) && defined(ZLIB_DLL)
-# include <windows.h>
-# define EXPORT WINAPI
-#else
-# define EXPORT
-#endif
-
-#endif /* _ZCONF_H */
-/* --- zconf.h */
-
-#define ZLIB_VERSION "1.0.4P"
-
-/*
- The 'zlib' compression library provides in-memory compression and
- decompression functions, including integrity checks of the uncompressed
- data. This version of the library supports only one compression method
- (deflation) but other algorithms may be added later and will have the same
- stream interface.
-
- For compression the application must provide the output buffer and
- may optionally provide the input buffer for optimization. For decompression,
- the application must provide the input buffer and may optionally provide
- the output buffer for optimization.
-
- Compression can be done in a single step if the buffers are large
- enough (for example if an input file is mmap'ed), or can be done by
- repeated calls of the compression function. In the latter case, the
- application must provide more input and/or consume the output
- (providing more output space) before each call.
-
- The library does not install any signal handler. It is recommended to
- add at least a handler for SIGSEGV when decompressing; the library checks
- the consistency of the input data whenever possible but may go nuts
- for some forms of corrupted input.
-*/
-
-typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size));
-typedef void (*free_func) OF((voidpf opaque, voidpf address));
-
-struct internal_state;
-
-typedef struct z_stream_s {
- Bytef *next_in; /* next input byte */
- uInt avail_in; /* number of bytes available at next_in */
- uLong total_in; /* total nb of input bytes read so far */
-
- Bytef *next_out; /* next output byte should be put there */
- uInt avail_out; /* remaining free space at next_out */
- uLong total_out; /* total nb of bytes output so far */
-
- char *msg; /* last error message, NULL if no error */
- struct internal_state FAR *state; /* not visible by applications */
-
- alloc_func zalloc; /* used to allocate the internal state */
- free_func zfree; /* used to free the internal state */
- voidpf opaque; /* private data object passed to zalloc and zfree */
-
- int data_type; /* best guess about the data type: ascii or binary */
- uLong adler; /* adler32 value of the uncompressed data */
- uLong reserved; /* reserved for future use */
-} z_stream;
-
-typedef z_stream FAR *z_streamp;
-
-/*
- The application must update next_in and avail_in when avail_in has
- dropped to zero. It must update next_out and avail_out when avail_out
- has dropped to zero. The application must initialize zalloc, zfree and
- opaque before calling the init function. All other fields are set by the
- compression library and must not be updated by the application.
-
- The opaque value provided by the application will be passed as the first
- parameter for calls of zalloc and zfree. This can be useful for custom
- memory management. The compression library attaches no meaning to the
- opaque value.
-
- zalloc must return Z_NULL if there is not enough memory for the object.
- On 16-bit systems, the functions zalloc and zfree must be able to allocate
- exactly 65536 bytes, but will not be required to allocate more than this
- if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS,
- pointers returned by zalloc for objects of exactly 65536 bytes *must*
- have their offset normalized to zero. The default allocation function
- provided by this library ensures this (see zutil.c). To reduce memory
- requirements and avoid any allocation of 64K objects, at the expense of
- compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h).
-
- The fields total_in and total_out can be used for statistics or
- progress reports. After compression, total_in holds the total size of
- the uncompressed data and may be saved for use in the decompressor
- (particularly if the decompressor wants to decompress everything in
- a single step).
-*/
-
- /* constants */
-
-#define Z_NO_FLUSH 0
-#define Z_PARTIAL_FLUSH 1
-#define Z_PACKET_FLUSH 2
-#define Z_SYNC_FLUSH 3
-#define Z_FULL_FLUSH 4
-#define Z_FINISH 5
-/* Allowed flush values; see deflate() below for details */
-
-#define Z_OK 0
-#define Z_STREAM_END 1
-#define Z_NEED_DICT 2
-#define Z_ERRNO (-1)
-#define Z_STREAM_ERROR (-2)
-#define Z_DATA_ERROR (-3)
-#define Z_MEM_ERROR (-4)
-#define Z_BUF_ERROR (-5)
-#define Z_VERSION_ERROR (-6)
-/* Return codes for the compression/decompression functions. Negative
- * values are errors, positive values are used for special but normal events.
- */
-
-#define Z_NO_COMPRESSION 0
-#define Z_BEST_SPEED 1
-#define Z_BEST_COMPRESSION 9
-#define Z_DEFAULT_COMPRESSION (-1)
-/* compression levels */
-
-#define Z_FILTERED 1
-#define Z_HUFFMAN_ONLY 2
-#define Z_DEFAULT_STRATEGY 0
-/* compression strategy; see deflateInit2() below for details */
-
-#define Z_BINARY 0
-#define Z_ASCII 1
-#define Z_UNKNOWN 2
-/* Possible values of the data_type field */
-
-#define Z_DEFLATED 8
-/* The deflate compression method (the only one supported in this version) */
-
-#define Z_NULL 0 /* for initializing zalloc, zfree, opaque */
-
-#define zlib_version zlibVersion()
-/* for compatibility with versions < 1.0.2 */
-
- /* basic functions */
-
-extern const char * EXPORT zlibVersion OF((void));
-/* The application can compare zlibVersion and ZLIB_VERSION for consistency.
- If the first character differs, the library code actually used is
- not compatible with the zlib.h header file used by the application.
- This check is automatically made by deflateInit and inflateInit.
- */
-
-/*
-extern int EXPORT deflateInit OF((z_streamp strm, int level));
-
- Initializes the internal stream state for compression. The fields
- zalloc, zfree and opaque must be initialized before by the caller.
- If zalloc and zfree are set to Z_NULL, deflateInit updates them to
- use default allocation functions.
-
- The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9:
- 1 gives best speed, 9 gives best compression, 0 gives no compression at
- all (the input data is simply copied a block at a time).
- Z_DEFAULT_COMPRESSION requests a default compromise between speed and
- compression (currently equivalent to level 6).
-
- deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
- enough memory, Z_STREAM_ERROR if level is not a valid compression level,
- Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible
- with the version assumed by the caller (ZLIB_VERSION).
- msg is set to null if there is no error message. deflateInit does not
- perform any compression: this will be done by deflate().
-*/
-
-
-extern int EXPORT deflate OF((z_streamp strm, int flush));
-/*
- Performs one or both of the following actions:
-
- - Compress more input starting at next_in and update next_in and avail_in
- accordingly. If not all input can be processed (because there is not
- enough room in the output buffer), next_in and avail_in are updated and
- processing will resume at this point for the next call of deflate().
-
- - Provide more output starting at next_out and update next_out and avail_out
- accordingly. This action is forced if the parameter flush is non zero.
- Forcing flush frequently degrades the compression ratio, so this parameter
- should be set only when necessary (in interactive applications).
- Some output may be provided even if flush is not set.
-
- Before the call of deflate(), the application should ensure that at least
- one of the actions is possible, by providing more input and/or consuming
- more output, and updating avail_in or avail_out accordingly; avail_out
- should never be zero before the call. The application can consume the
- compressed output when it wants, for example when the output buffer is full
- (avail_out == 0), or after each call of deflate(). If deflate returns Z_OK
- and with zero avail_out, it must be called again after making room in the
- output buffer because there might be more output pending.
-
- If the parameter flush is set to Z_PARTIAL_FLUSH, the current compression
- block is terminated and flushed to the output buffer so that the
- decompressor can get all input data available so far. For method 9, a future
- variant on method 8, the current block will be flushed but not terminated.
- Z_SYNC_FLUSH has the same effect as partial flush except that the compressed
- output is byte aligned (the compressor can clear its internal bit buffer)
- and the current block is always terminated; this can be useful if the
- compressor has to be restarted from scratch after an interruption (in which
- case the internal state of the compressor may be lost).
- If flush is set to Z_FULL_FLUSH, the compression block is terminated, a
- special marker is output and the compression dictionary is discarded; this
- is useful to allow the decompressor to synchronize if one compressed block
- has been damaged (see inflateSync below). Flushing degrades compression and
- so should be used only when necessary. Using Z_FULL_FLUSH too often can
- seriously degrade the compression. If deflate returns with avail_out == 0,
- this function must be called again with the same value of the flush
- parameter and more output space (updated avail_out), until the flush is
- complete (deflate returns with non-zero avail_out).
-
- If the parameter flush is set to Z_PACKET_FLUSH, the compression
- block is terminated, and a zero-length stored block is output,
- omitting the length bytes (the effect of this is that the 3-bit type
- code 000 for a stored block is output, and the output is then
- byte-aligned). This is designed for use at the end of a PPP packet.
-
- If the parameter flush is set to Z_FINISH, pending input is processed,
- pending output is flushed and deflate returns with Z_STREAM_END if there
- was enough output space; if deflate returns with Z_OK, this function must be
- called again with Z_FINISH and more output space (updated avail_out) but no
- more input data, until it returns with Z_STREAM_END or an error. After
- deflate has returned Z_STREAM_END, the only possible operations on the
- stream are deflateReset or deflateEnd.
-
- Z_FINISH can be used immediately after deflateInit if all the compression
- is to be done in a single step. In this case, avail_out must be at least
- 0.1% larger than avail_in plus 12 bytes. If deflate does not return
- Z_STREAM_END, then it must be called again as described above.
-
- deflate() may update data_type if it can make a good guess about
- the input data type (Z_ASCII or Z_BINARY). In doubt, the data is considered
- binary. This field is only for information purposes and does not affect
- the compression algorithm in any manner.
-
- deflate() returns Z_OK if some progress has been made (more input
- processed or more output produced), Z_STREAM_END if all input has been
- consumed and all output has been produced (only when flush is set to
- Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example
- if next_in or next_out was NULL), Z_BUF_ERROR if no progress is possible.
-*/
-
-
-extern int EXPORT deflateEnd OF((z_streamp strm));
-/*
- All dynamically allocated data structures for this stream are freed.
- This function discards any unprocessed input and does not flush any
- pending output.
-
- deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the
- stream state was inconsistent, Z_DATA_ERROR if the stream was freed
- prematurely (some input or output was discarded). In the error case,
- msg may be set but then points to a static string (which must not be
- deallocated).
-*/
-
-
-/*
-extern int EXPORT inflateInit OF((z_streamp strm));
-
- Initializes the internal stream state for decompression. The fields
- zalloc, zfree and opaque must be initialized before by the caller. If
- zalloc and zfree are set to Z_NULL, inflateInit updates them to use default
- allocation functions.
-
- inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not
- enough memory, Z_VERSION_ERROR if the zlib library version is incompatible
- with the version assumed by the caller. msg is set to null if there is no
- error message. inflateInit does not perform any decompression: this will be
- done by inflate().
-*/
-
-
-extern int EXPORT inflate OF((z_streamp strm, int flush));
-/*
- Performs one or both of the following actions:
-
- - Decompress more input starting at next_in and update next_in and avail_in
- accordingly. If not all input can be processed (because there is not
- enough room in the output buffer), next_in is updated and processing
- will resume at this point for the next call of inflate().
-
- - Provide more output starting at next_out and update next_out and avail_out
- accordingly. inflate() provides as much output as possible, until there
- is no more input data or no more space in the output buffer (see below
- about the flush parameter).
-
- Before the call of inflate(), the application should ensure that at least
- one of the actions is possible, by providing more input and/or consuming
- more output, and updating the next_* and avail_* values accordingly.
- The application can consume the uncompressed output when it wants, for
- example when the output buffer is full (avail_out == 0), or after each
- call of inflate(). If inflate returns Z_OK and with zero avail_out, it
- must be called again after making room in the output buffer because there
- might be more output pending.
-
- If the parameter flush is set to Z_PARTIAL_FLUSH or Z_PACKET_FLUSH,
- inflate flushes as much output as possible to the output buffer. The
- flushing behavior of inflate is not specified for values of the flush
- parameter other than Z_PARTIAL_FLUSH, Z_PACKET_FLUSH or Z_FINISH, but the
- current implementation actually flushes as much output as possible
- anyway. For Z_PACKET_FLUSH, inflate checks that once all the input data
- has been consumed, it is expecting to see the length field of a stored
- block; if not, it returns Z_DATA_ERROR.
-
- inflate() should normally be called until it returns Z_STREAM_END or an
- error. However if all decompression is to be performed in a single step
- (a single call of inflate), the parameter flush should be set to
- Z_FINISH. In this case all pending input is processed and all pending
- output is flushed; avail_out must be large enough to hold all the
- uncompressed data. (The size of the uncompressed data may have been saved
- by the compressor for this purpose.) The next operation on this stream must
- be inflateEnd to deallocate the decompression state. The use of Z_FINISH
- is never required, but can be used to inform inflate that a faster routine
- may be used for the single inflate() call.
-
- inflate() returns Z_OK if some progress has been made (more input
- processed or more output produced), Z_STREAM_END if the end of the
- compressed data has been reached and all uncompressed output has been
- produced, Z_NEED_DICT if a preset dictionary is needed at this point (see
- inflateSetDictionary below), Z_DATA_ERROR if the input data was corrupted,
- Z_STREAM_ERROR if the stream structure was inconsistent (for example if
- next_in or next_out was NULL), Z_MEM_ERROR if there was not enough memory,
- Z_BUF_ERROR if no progress is possible or if there was not enough room in
- the output buffer when Z_FINISH is used. In the Z_DATA_ERROR case, the
- application may then call inflateSync to look for a good compression block.
- In the Z_NEED_DICT case, strm->adler is set to the Adler32 value of the
- dictionary chosen by the compressor.
-*/
-
-
-extern int EXPORT inflateEnd OF((z_streamp strm));
-/*
- All dynamically allocated data structures for this stream are freed.
- This function discards any unprocessed input and does not flush any
- pending output.
-
- inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state
- was inconsistent. In the error case, msg may be set but then points to a
- static string (which must not be deallocated).
-*/
-
- /* Advanced functions */
-
-/*
- The following functions are needed only in some special applications.
-*/
-
-/*
-extern int EXPORT deflateInit2 OF((z_streamp strm,
- int level,
- int method,
- int windowBits,
- int memLevel,
- int strategy));
-
- This is another version of deflateInit with more compression options. The
- fields next_in, zalloc, zfree and opaque must be initialized before by
- the caller.
-
- The method parameter is the compression method. It must be Z_DEFLATED in
- this version of the library. (Method 9 will allow a 64K history buffer and
- partial block flushes.)
-
- The windowBits parameter is the base two logarithm of the window size
- (the size of the history buffer). It should be in the range 8..15 for this
- version of the library (the value 16 will be allowed for method 9). Larger
- values of this parameter result in better compression at the expense of
- memory usage. The default value is 15 if deflateInit is used instead.
-
- The memLevel parameter specifies how much memory should be allocated
- for the internal compression state. memLevel=1 uses minimum memory but
- is slow and reduces compression ratio; memLevel=9 uses maximum memory
- for optimal speed. The default value is 8. See zconf.h for total memory
- usage as a function of windowBits and memLevel.
-
- The strategy parameter is used to tune the compression algorithm. Use the
- value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a
- filter (or predictor), or Z_HUFFMAN_ONLY to force Huffman encoding only (no
- string match). Filtered data consists mostly of small values with a
- somewhat random distribution. In this case, the compression algorithm is
- tuned to compress them better. The effect of Z_FILTERED is to force more
- Huffman coding and less string matching; it is somewhat intermediate
- between Z_DEFAULT and Z_HUFFMAN_ONLY. The strategy parameter only affects
- the compression ratio but not the correctness of the compressed output even
- if it is not set appropriately.
-
- If next_in is not null, the library will use this buffer to hold also
- some history information; the buffer must either hold the entire input
- data, or have at least 1<<(windowBits+1) bytes and be writable. If next_in
- is null, the library will allocate its own history buffer (and leave next_in
- null). next_out need not be provided here but must be provided by the
- application for the next call of deflate().
-
- If the history buffer is provided by the application, next_in must
- must never be changed by the application since the compressor maintains
- information inside this buffer from call to call; the application
- must provide more input only by increasing avail_in. next_in is always
- reset by the library in this case.
-
- deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was
- not enough memory, Z_STREAM_ERROR if a parameter is invalid (such as
- an invalid method). msg is set to null if there is no error message.
- deflateInit2 does not perform any compression: this will be done by
- deflate().
-*/
-
-extern int EXPORT deflateSetDictionary OF((z_streamp strm,
- const Bytef *dictionary,
- uInt dictLength));
-/*
- Initializes the compression dictionary (history buffer) from the given
- byte sequence without producing any compressed output. This function must
- be called immediately after deflateInit or deflateInit2, before any call
- of deflate. The compressor and decompressor must use exactly the same
- dictionary (see inflateSetDictionary).
- The dictionary should consist of strings (byte sequences) that are likely
- to be encountered later in the data to be compressed, with the most commonly
- used strings preferably put towards the end of the dictionary. Using a
- dictionary is most useful when the data to be compressed is short and
- can be predicted with good accuracy; the data can then be compressed better
- than with the default empty dictionary. In this version of the library,
- only the last 32K bytes of the dictionary are used.
- Upon return of this function, strm->adler is set to the Adler32 value
- of the dictionary; the decompressor may later use this value to determine
- which dictionary has been used by the compressor. (The Adler32 value
- applies to the whole dictionary even if only a subset of the dictionary is
- actually used by the compressor.)
-
- deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a
- parameter is invalid (such as NULL dictionary) or the stream state
- is inconsistent (for example if deflate has already been called for this
- stream). deflateSetDictionary does not perform any compression: this will
- be done by deflate().
-*/
-
-extern int EXPORT deflateCopy OF((z_streamp dest,
- z_streamp source));
-/*
- Sets the destination stream as a complete copy of the source stream. If
- the source stream is using an application-supplied history buffer, a new
- buffer is allocated for the destination stream. The compressed output
- buffer is always application-supplied. It's the responsibility of the
- application to provide the correct values of next_out and avail_out for the
- next call of deflate.
-
- This function can be useful when several compression strategies will be
- tried, for example when there are several ways of pre-processing the input
- data with a filter. The streams that will be discarded should then be freed
- by calling deflateEnd. Note that deflateCopy duplicates the internal
- compression state which can be quite large, so this strategy is slow and
- can consume lots of memory.
-
- deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not
- enough memory, Z_STREAM_ERROR if the source stream state was inconsistent
- (such as zalloc being NULL). msg is left unchanged in both source and
- destination.
-*/
-
-extern int EXPORT deflateReset OF((z_streamp strm));
-/*
- This function is equivalent to deflateEnd followed by deflateInit,
- but does not free and reallocate all the internal compression state.
- The stream will keep the same compression level and any other attributes
- that may have been set by deflateInit2.
-
- deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
- stream state was inconsistent (such as zalloc or state being NULL).
-*/
-
-extern int EXPORT deflateParams OF((z_streamp strm, int level, int strategy));
-/*
- Dynamically update the compression level and compression strategy.
- This can be used to switch between compression and straight copy of
- the input data, or to switch to a different kind of input data requiring
- a different strategy. If the compression level is changed, the input
- available so far is compressed with the old level (and may be flushed);
- the new level will take effect only at the next call of deflate().
-
- Before the call of deflateParams, the stream state must be set as for
- a call of deflate(), since the currently available input may have to
- be compressed and flushed. In particular, strm->avail_out must be non-zero.
-
- deflateParams returns Z_OK if success, Z_STREAM_ERROR if the source
- stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR
- if strm->avail_out was zero.
-*/
-
-extern int EXPORT deflateOutputPending OF((z_streamp strm));
-/*
- Returns the number of bytes of output which are immediately
- available from the compressor (i.e. without any further input
- or flush).
-*/
-
-/*
-extern int EXPORT inflateInit2 OF((z_streamp strm,
- int windowBits));
-
- This is another version of inflateInit with more compression options. The
- fields next_out, zalloc, zfree and opaque must be initialized before by
- the caller.
-
- The windowBits parameter is the base two logarithm of the maximum window
- size (the size of the history buffer). It should be in the range 8..15 for
- this version of the library (the value 16 will be allowed soon). The
- default value is 15 if inflateInit is used instead. If a compressed stream
- with a larger window size is given as input, inflate() will return with
- the error code Z_DATA_ERROR instead of trying to allocate a larger window.
-
- If next_out is not null, the library will use this buffer for the history
- buffer; the buffer must either be large enough to hold the entire output
- data, or have at least 1<<windowBits bytes. If next_out is null, the
- library will allocate its own buffer (and leave next_out null). next_in
- need not be provided here but must be provided by the application for the
- next call of inflate().
-
- If the history buffer is provided by the application, next_out must
- never be changed by the application since the decompressor maintains
- history information inside this buffer from call to call; the application
- can only reset next_out to the beginning of the history buffer when
- avail_out is zero and all output has been consumed.
-
- inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was
- not enough memory, Z_STREAM_ERROR if a parameter is invalid (such as
- windowBits < 8). msg is set to null if there is no error message.
- inflateInit2 does not perform any decompression: this will be done by
- inflate().
-*/
-
-extern int EXPORT inflateSetDictionary OF((z_streamp strm,
- const Bytef *dictionary,
- uInt dictLength));
-/*
- Initializes the decompression dictionary (history buffer) from the given
- uncompressed byte sequence. This function must be called immediately after
- a call of inflate if this call returned Z_NEED_DICT. The dictionary chosen
- by the compressor can be determined from the Adler32 value returned by this
- call of inflate. The compressor and decompressor must use exactly the same
- dictionary (see deflateSetDictionary).
-
- inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a
- parameter is invalid (such as NULL dictionary) or the stream state is
- inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the
- expected one (incorrect Adler32 value). inflateSetDictionary does not
- perform any decompression: this will be done by subsequent calls of
- inflate().
-*/
-
-extern int EXPORT inflateSync OF((z_streamp strm));
-/*
- Skips invalid compressed data until the special marker (see deflate()
- above) can be found, or until all available input is skipped. No output
- is provided.
-
- inflateSync returns Z_OK if the special marker has been found, Z_BUF_ERROR
- if no more input was provided, Z_DATA_ERROR if no marker has been found,
- or Z_STREAM_ERROR if the stream structure was inconsistent. In the success
- case, the application may save the current current value of total_in which
- indicates where valid compressed data was found. In the error case, the
- application may repeatedly call inflateSync, providing more input each time,
- until success or end of the input data.
-*/
-
-extern int EXPORT inflateReset OF((z_streamp strm));
-/*
- This function is equivalent to inflateEnd followed by inflateInit,
- but does not free and reallocate all the internal decompression state.
- The stream will keep attributes that may have been set by inflateInit2.
-
- inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source
- stream state was inconsistent (such as zalloc or state being NULL).
-*/
-
-extern int inflateIncomp OF((z_stream *strm));
-/*
- This function adds the data at next_in (avail_in bytes) to the output
- history without performing any output. There must be no pending output,
- and the decompressor must be expecting to see the start of a block.
- Calling this function is equivalent to decompressing a stored block
- containing the data at next_in (except that the data is not output).
-*/
-
- /* utility functions */
-
-/*
- The following utility functions are implemented on top of the
- basic stream-oriented functions. To simplify the interface, some
- default options are assumed (compression level, window size,
- standard memory allocation functions). The source code of these
- utility functions can easily be modified if you need special options.
-*/
-
-extern int EXPORT compress OF((Bytef *dest, uLongf *destLen,
- const Bytef *source, uLong sourceLen));
-/*
- Compresses the source buffer into the destination buffer. sourceLen is
- the byte length of the source buffer. Upon entry, destLen is the total
- size of the destination buffer, which must be at least 0.1% larger than
- sourceLen plus 12 bytes. Upon exit, destLen is the actual size of the
- compressed buffer.
- This function can be used to compress a whole file at once if the
- input file is mmap'ed.
- compress returns Z_OK if success, Z_MEM_ERROR if there was not
- enough memory, Z_BUF_ERROR if there was not enough room in the output
- buffer.
-*/
-
-extern int EXPORT uncompress OF((Bytef *dest, uLongf *destLen,
- const Bytef *source, uLong sourceLen));
-/*
- Decompresses the source buffer into the destination buffer. sourceLen is
- the byte length of the source buffer. Upon entry, destLen is the total
- size of the destination buffer, which must be large enough to hold the
- entire uncompressed data. (The size of the uncompressed data must have
- been saved previously by the compressor and transmitted to the decompressor
- by some mechanism outside the scope of this compression library.)
- Upon exit, destLen is the actual size of the compressed buffer.
- This function can be used to decompress a whole file at once if the
- input file is mmap'ed.
-
- uncompress returns Z_OK if success, Z_MEM_ERROR if there was not
- enough memory, Z_BUF_ERROR if there was not enough room in the output
- buffer, or Z_DATA_ERROR if the input data was corrupted.
-*/
-
-
-typedef voidp gzFile;
-
-extern gzFile EXPORT gzopen OF((const char *path, const char *mode));
-/*
- Opens a gzip (.gz) file for reading or writing. The mode parameter
- is as in fopen ("rb" or "wb") but can also include a compression level
- ("wb9"). gzopen can be used to read a file which is not in gzip format;
- in this case gzread will directly read from the file without decompression.
- gzopen returns NULL if the file could not be opened or if there was
- insufficient memory to allocate the (de)compression state; errno
- can be checked to distinguish the two cases (if errno is zero, the
- zlib error is Z_MEM_ERROR).
-*/
-
-extern gzFile EXPORT gzdopen OF((int fd, const char *mode));
-/*
- gzdopen() associates a gzFile with the file descriptor fd. File
- descriptors are obtained from calls like open, dup, creat, pipe or
- fileno (in the file has been previously opened with fopen).
- The mode parameter is as in gzopen.
- The next call of gzclose on the returned gzFile will also close the
- file descriptor fd, just like fclose(fdopen(fd), mode) closes the file
- descriptor fd. If you want to keep fd open, use gzdopen(dup(fd), mode).
- gzdopen returns NULL if there was insufficient memory to allocate
- the (de)compression state.
-*/
-
-extern int EXPORT gzread OF((gzFile file, voidp buf, unsigned len));
-/*
- Reads the given number of uncompressed bytes from the compressed file.
- If the input file was not in gzip format, gzread copies the given number
- of bytes into the buffer.
- gzread returns the number of uncompressed bytes actually read (0 for
- end of file, -1 for error). */
-
-extern int EXPORT gzwrite OF((gzFile file, const voidp buf, unsigned len));
-/*
- Writes the given number of uncompressed bytes into the compressed file.
- gzwrite returns the number of uncompressed bytes actually written
- (0 in case of error).
-*/
-
-extern int EXPORT gzflush OF((gzFile file, int flush));
-/*
- Flushes all pending output into the compressed file. The parameter
- flush is as in the deflate() function. The return value is the zlib
- error number (see function gzerror below). gzflush returns Z_OK if
- the flush parameter is Z_FINISH and all output could be flushed.
- gzflush should be called only when strictly necessary because it can
- degrade compression.
-*/
-
-extern int EXPORT gzclose OF((gzFile file));
-/*
- Flushes all pending output if necessary, closes the compressed file
- and deallocates all the (de)compression state. The return value is the zlib
- error number (see function gzerror below).
-*/
-
-extern const char * EXPORT gzerror OF((gzFile file, int *errnum));
-/*
- Returns the error message for the last error which occurred on the
- given compressed file. errnum is set to zlib error number. If an
- error occurred in the file system and not in the compression library,
- errnum is set to Z_ERRNO and the application may consult errno
- to get the exact error code.
-*/
-
- /* checksum functions */
-
-/*
- These functions are not related to compression but are exported
- anyway because they might be useful in applications using the
- compression library.
-*/
-
-extern uLong EXPORT adler32 OF((uLong adler, const Bytef *buf, uInt len));
-
-/*
- Update a running Adler-32 checksum with the bytes buf[0..len-1] and
- return the updated checksum. If buf is NULL, this function returns
- the required initial value for the checksum.
- An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
- much faster. Usage example:
-
- uLong adler = adler32(0L, Z_NULL, 0);
-
- while (read_buffer(buffer, length) != EOF) {
- adler = adler32(adler, buffer, length);
- }
- if (adler != original_adler) error();
-*/
-
-extern uLong EXPORT crc32 OF((uLong crc, const Bytef *buf, uInt len));
-/*
- Update a running crc with the bytes buf[0..len-1] and return the updated
- crc. If buf is NULL, this function returns the required initial value
- for the crc. Pre- and post-conditioning (one's complement) is performed
- within this function so it shouldn't be done by the application.
- Usage example:
-
- uLong crc = crc32(0L, Z_NULL, 0);
-
- while (read_buffer(buffer, length) != EOF) {
- crc = crc32(crc, buffer, length);
- }
- if (crc != original_crc) error();
-*/
-
-
- /* various hacks, don't look :) */
-
-/* deflateInit and inflateInit are macros to allow checking the zlib version
- * and the compiler's view of z_stream:
- */
-extern int EXPORT deflateInit_ OF((z_streamp strm, int level,
- const char *version, int stream_size));
-extern int EXPORT inflateInit_ OF((z_streamp strm,
- const char *version, int stream_size));
-extern int EXPORT deflateInit2_ OF((z_streamp strm, int level, int method,
- int windowBits, int memLevel, int strategy,
- const char *version, int stream_size));
-extern int EXPORT inflateInit2_ OF((z_streamp strm, int windowBits,
- const char *version, int stream_size));
-#define deflateInit(strm, level) \
- deflateInit_((strm), (level), ZLIB_VERSION, sizeof(z_stream))
-#define inflateInit(strm) \
- inflateInit_((strm), ZLIB_VERSION, sizeof(z_stream))
-#define deflateInit2(strm, level, method, windowBits, memLevel, strategy) \
- deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\
- (strategy), ZLIB_VERSION, sizeof(z_stream))
-#define inflateInit2(strm, windowBits) \
- inflateInit2_((strm), (windowBits), ZLIB_VERSION, sizeof(z_stream))
-
-#if !defined(_Z_UTIL_H) && !defined(NO_DUMMY_DECL)
- struct internal_state {int dummy;}; /* hack for buggy compilers */
-#endif
-
-uLongf *get_crc_table OF((void)); /* can be used by asm versions of crc32() */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _ZLIB_H */
-/* --- zlib.h */
diff --git a/drivers/pnp/Config.help b/drivers/pnp/Config.help
index 8ec6d93f824d..c30b9d610618 100644
--- a/drivers/pnp/Config.help
+++ b/drivers/pnp/Config.help
@@ -27,3 +27,19 @@ CONFIG_ISAPNP
If unsure, say Y.
+CONFIG_PNPBIOS
+ Linux uses the PNPBIOS as defined in "Plug and Play BIOS
+ Specification Version 1.0A May 5, 1994" to autodetect built-in
+ mainboard resources (e.g. parallel port resources).
+
+ Other features (e.g. change resources, ESCD, event notification,
+ Docking station information, ISAPNP services) are not used.
+
+ Note: ACPI is expected to supersede PNPBIOS some day, currently it
+ co-exists nicely.
+
+ See latest pcmcia-cs (stand-alone package) for a nice "lspnp" tools,
+ or have a look at /proc/bus/pnp.
+
+ If unsure, say Y.
+
diff --git a/drivers/pnp/Config.in b/drivers/pnp/Config.in
index de0fa6cfa201..86e80955606e 100644
--- a/drivers/pnp/Config.in
+++ b/drivers/pnp/Config.in
@@ -8,4 +8,8 @@ tristate 'Plug and Play support' CONFIG_PNP
dep_tristate ' ISA Plug and Play support' CONFIG_ISAPNP $CONFIG_PNP
+if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then
+ dep_bool ' PNPBIOS support (EXPERIMENTAL)' CONFIG_PNPBIOS $CONFIG_PNP
+fi
+
endmenu
diff --git a/drivers/pnp/Makefile b/drivers/pnp/Makefile
index b6adf3ce0f46..890ca24c07fa 100644
--- a/drivers/pnp/Makefile
+++ b/drivers/pnp/Makefile
@@ -10,15 +10,22 @@
O_TARGET := pnp.o
-export-objs := isapnp.o
-list-multi := isa-pnp.o
+export-objs := isapnp.o pnpbios_core.o
+multi-objs := isa-pnp.o pnpbios.o
-proc-$(CONFIG_PROC_FS) = isapnp_proc.o
-isa-pnp-objs := isapnp.o quirks.o $(proc-y)
+isa-pnp-proc-$(CONFIG_PROC_FS) = isapnp_proc.o
+pnpbios-proc-$(CONFIG_PROC_FS) = pnpbios_proc.o
+
+isa-pnp-objs := isapnp.o quirks.o $(isa-pnp-proc-y)
+pnpbios-objs := pnpbios_core.o $(pnpbios-proc-y)
obj-$(CONFIG_ISAPNP) += isa-pnp.o
+obj-$(CONFIG_PNPBIOS) += pnpbios.o
include $(TOPDIR)/Rules.make
isa-pnp.o: $(isa-pnp-objs)
$(LD) $(LD_RFLAG) -r -o $@ $(isa-pnp-objs)
+
+pnpbios.o: $(pnpbios-objs)
+ $(LD) $(LD_RFLAG) -r -o $@ $(pnpbios-objs)
diff --git a/drivers/pnp/isapnp.c b/drivers/pnp/isapnp.c
index 250e8dc4fd75..32121ec13356 100644
--- a/drivers/pnp/isapnp.c
+++ b/drivers/pnp/isapnp.c
@@ -892,6 +892,7 @@ static int __init isapnp_create_device(struct pci_bus *card,
case _STAG_END:
if (size > 0)
isapnp_skip_bytes(size);
+ isapnp_config_prepare(dev);
return 1;
default:
printk(KERN_ERR "isapnp: unexpected or unknown tag type 0x%x for logical device %i (device %i), ignored\n", type, dev->devfn, card->number);
diff --git a/drivers/pnp/pnpbios_core.c b/drivers/pnp/pnpbios_core.c
new file mode 100644
index 000000000000..d60ec7444a80
--- /dev/null
+++ b/drivers/pnp/pnpbios_core.c
@@ -0,0 +1,1276 @@
+/*
+ * PnP BIOS services
+ *
+ * Originally (C) 1998 Christian Schmidt <schmidt@digadd.de>
+ * Modifications (c) 1998 Tom Lees <tom@lpsg.demon.co.uk>
+ * Minor reorganizations by David Hinds <dahinds@users.sourceforge.net>
+ * Modifications (c) 2001 by Thomas Hood <jdthood@mail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * References:
+ * Compaq Computer Corporation, Phoenix Technologies Ltd., Intel Corporation
+ * Plug and Play BIOS Specification, Version 1.0A, May 5, 1994
+ * Plug and Play BIOS Clarification Paper, October 6, 1994
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/kernel.h>
+#include <linux/pnpbios.h>
+#include <asm/page.h>
+#include <asm/system.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <asm/desc.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/kmod.h>
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <asm/system.h>
+
+
+/*
+ *
+ * PnP BIOS INTERFACE
+ *
+ */
+
+/* PnP BIOS signature: "$PnP" */
+#define PNP_SIGNATURE (('$' << 0) + ('P' << 8) + ('n' << 16) + ('P' << 24))
+
+#pragma pack(1)
+union pnp_bios_expansion_header {
+ struct {
+ u32 signature; /* "$PnP" */
+ u8 version; /* in BCD */
+ u8 length; /* length in bytes, currently 21h */
+ u16 control; /* system capabilities */
+ u8 checksum; /* all bytes must add up to 0 */
+
+ u32 eventflag; /* phys. address of the event flag */
+ u16 rmoffset; /* real mode entry point */
+ u16 rmcseg;
+ u16 pm16offset; /* 16 bit protected mode entry */
+ u32 pm16cseg;
+ u32 deviceID; /* EISA encoded system ID or 0 */
+ u16 rmdseg; /* real mode data segment */
+ u32 pm16dseg; /* 16 bit pm data segment base */
+ } fields;
+ char chars[0x21]; /* To calculate the checksum */
+};
+#pragma pack()
+
+static struct {
+ u16 offset;
+ u16 segment;
+} pnp_bios_callpoint;
+
+static union pnp_bios_expansion_header * pnp_bios_hdr = NULL;
+
+/* The PnP BIOS entries in the GDT */
+#define PNP_GDT (0x0060)
+#define PNP_CS32 (PNP_GDT+0x00) /* segment for calling fn */
+#define PNP_CS16 (PNP_GDT+0x08) /* code segment for BIOS */
+#define PNP_DS (PNP_GDT+0x10) /* data segment for BIOS */
+#define PNP_TS1 (PNP_GDT+0x18) /* transfer data segment */
+#define PNP_TS2 (PNP_GDT+0x20) /* another data segment */
+
+/*
+ * These are some opcodes for a "static asmlinkage"
+ * As this code is *not* executed inside the linux kernel segment, but in a
+ * alias at offset 0, we need a far return that can not be compiled by
+ * default (please, prove me wrong! this is *really* ugly!)
+ * This is the only way to get the bios to return into the kernel code,
+ * because the bios code runs in 16 bit protected mode and therefore can only
+ * return to the caller if the call is within the first 64kB, and the linux
+ * kernel begins at offset 3GB...
+ */
+
+asmlinkage void pnp_bios_callfunc(void);
+
+__asm__(
+ ".text \n"
+ __ALIGN_STR "\n"
+ SYMBOL_NAME_STR(pnp_bios_callfunc) ":\n"
+ " pushl %edx \n"
+ " pushl %ecx \n"
+ " pushl %ebx \n"
+ " pushl %eax \n"
+ " lcallw " SYMBOL_NAME_STR(pnp_bios_callpoint) "\n"
+ " addl $16, %esp \n"
+ " lret \n"
+ ".previous \n"
+);
+
+#define Q_SET_SEL(selname, address, size) \
+set_base (gdt [(selname) >> 3], __va((u32)(address))); \
+set_limit (gdt [(selname) >> 3], size)
+
+#define Q2_SET_SEL(selname, address, size) \
+set_base (gdt [(selname) >> 3], (u32)(address)); \
+set_limit (gdt [(selname) >> 3], size)
+
+/*
+ * At some point we want to use this stack frame pointer to unwind
+ * after PnP BIOS oopses.
+ */
+
+u32 pnp_bios_fault_esp;
+u32 pnp_bios_fault_eip;
+u32 pnp_bios_is_utter_crap = 0;
+
+static spinlock_t pnp_bios_lock;
+
+static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
+ u16 arg4, u16 arg5, u16 arg6, u16 arg7)
+{
+ unsigned long flags;
+ u16 status;
+
+ /*
+ * PnP BIOSes are generally not terribly re-entrant.
+ * Also, don't rely on them to save everything correctly.
+ */
+ if(pnp_bios_is_utter_crap)
+ return PNP_FUNCTION_NOT_SUPPORTED;
+
+ /* On some boxes IRQ's during PnP BIOS calls are deadly. */
+ spin_lock_irqsave(&pnp_bios_lock, flags);
+ __cli();
+ __asm__ __volatile__(
+ "pushl %%ebp\n\t"
+ "pushl %%edi\n\t"
+ "pushl %%esi\n\t"
+ "pushl %%ds\n\t"
+ "pushl %%es\n\t"
+ "pushl %%fs\n\t"
+ "pushl %%gs\n\t"
+ "pushfl\n\t"
+ "movl %%esp, pnp_bios_fault_esp\n\t"
+ "movl $1f, pnp_bios_fault_eip\n\t"
+ "lcall %5,%6\n\t"
+ "1:popfl\n\t"
+ "popl %%gs\n\t"
+ "popl %%fs\n\t"
+ "popl %%es\n\t"
+ "popl %%ds\n\t"
+ "popl %%esi\n\t"
+ "popl %%edi\n\t"
+ "popl %%ebp\n\t"
+ : "=a" (status)
+ : "0" ((func) | (((u32)arg1) << 16)),
+ "b" ((arg2) | (((u32)arg3) << 16)),
+ "c" ((arg4) | (((u32)arg5) << 16)),
+ "d" ((arg6) | (((u32)arg7) << 16)),
+ "i" (PNP_CS32),
+ "i" (0)
+ : "memory"
+ );
+ spin_unlock_irqrestore(&pnp_bios_lock, flags);
+
+ /* If we get here and this is set then the PnP BIOS faulted on us. */
+ if(pnp_bios_is_utter_crap)
+ {
+ printk(KERN_ERR "PnPBIOS: Warning! Your PnP BIOS caused a fatal error. Attempting to continue.\n");
+ printk(KERN_ERR "PnPBIOS: You may need to reboot with the \"nobiospnp\" option to operate stably.\n");
+ printk(KERN_ERR "PnPBIOS: Check with your vendor for an updated BIOS\n");
+ }
+
+ return status;
+}
+
+
+/*
+ *
+ * UTILITY FUNCTIONS
+ *
+ */
+
+void *pnpbios_kmalloc(size_t size, int f)
+{
+ void *p = kmalloc( size, f );
+ if ( p == NULL )
+ printk(KERN_ERR "PnPBIOS: kmalloc() failed.\n");
+ return p;
+}
+
+/*
+ * Call this only after init time
+ */
+static int pnp_bios_present(void)
+{
+ return (pnp_bios_hdr != NULL);
+}
+
+/* Forward declaration */
+static void update_devlist( u8 nodenum, struct pnp_bios_node *data );
+
+
+/*
+ *
+ * PnP BIOS ACCESS FUNCTIONS
+ *
+ */
+
+#define PNP_GET_NUM_SYS_DEV_NODES 0x00
+#define PNP_GET_SYS_DEV_NODE 0x01
+#define PNP_SET_SYS_DEV_NODE 0x02
+#define PNP_GET_EVENT 0x03
+#define PNP_SEND_MESSAGE 0x04
+#define PNP_GET_DOCKING_STATION_INFORMATION 0x05
+#define PNP_SET_STATIC_ALLOCED_RES_INFO 0x09
+#define PNP_GET_STATIC_ALLOCED_RES_INFO 0x0a
+#define PNP_GET_APM_ID_TABLE 0x0b
+#define PNP_GET_PNP_ISA_CONFIG_STRUC 0x40
+#define PNP_GET_ESCD_INFO 0x41
+#define PNP_READ_ESCD 0x42
+#define PNP_WRITE_ESCD 0x43
+
+/*
+ * Call PnP BIOS with function 0x00, "get number of system device nodes"
+ */
+static int __pnp_bios_dev_node_info(struct pnp_dev_node_info *data)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, data, sizeof(struct pnp_dev_node_info));
+ status = call_pnp_bios(PNP_GET_NUM_SYS_DEV_NODES, 0, PNP_TS1, 2, PNP_TS1, PNP_DS, 0, 0);
+ data->no_nodes &= 0xff;
+ return status;
+}
+
+int pnp_bios_dev_node_info(struct pnp_dev_node_info *data)
+{
+ int status = __pnp_bios_dev_node_info( data );
+ if ( status )
+ printk(KERN_WARNING "PnPBIOS: dev_node_info: Unexpected status 0x%x\n", status);
+ return status;
+}
+
+/*
+ * Note that some PnP BIOSes (e.g., on Sony Vaio laptops) die a horrible
+ * death if they are asked to access the "current" configuration.
+ * Therefore, if it's a matter of indifference, it's better to call
+ * get_dev_node() and set_dev_node() with boot=1 rather than with boot=0.
+ */
+
+/*
+ * Call PnP BIOS with function 0x01, "get system device node"
+ * Input: *nodenum = desired node,
+ * boot = whether to get nonvolatile boot (!=0)
+ * or volatile current (0) config
+ * Output: *nodenum=next node or 0xff if no more nodes
+ */
+static int __pnp_bios_get_dev_node(u8 *nodenum, char boot, struct pnp_bios_node *data)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ if ( !boot & pnpbios_dont_use_current_config )
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, nodenum, sizeof(char));
+ Q2_SET_SEL(PNP_TS2, data, 64 * 1024);
+ status = call_pnp_bios(PNP_GET_SYS_DEV_NODE, 0, PNP_TS1, 0, PNP_TS2, boot ? 2 : 1, PNP_DS, 0);
+ return status;
+}
+
+int pnp_bios_get_dev_node(u8 *nodenum, char boot, struct pnp_bios_node *data)
+{
+ int status;
+ status = __pnp_bios_get_dev_node( nodenum, boot, data );
+ if ( status )
+ printk(KERN_WARNING "PnPBIOS: get_dev_node: Unexpected 0x%x\n", status);
+ return status;
+}
+
+
+/*
+ * Call PnP BIOS with function 0x02, "set system device node"
+ * Input: *nodenum = desired node,
+ * boot = whether to set nonvolatile boot (!=0)
+ * or volatile current (0) config
+ */
+static int __pnp_bios_set_dev_node(u8 nodenum, char boot, struct pnp_bios_node *data)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ if ( !boot & pnpbios_dont_use_current_config )
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, data, /* *((u16 *) data)*/ 65536);
+ status = call_pnp_bios(PNP_SET_SYS_DEV_NODE, nodenum, 0, PNP_TS1, boot ? 2 : 1, PNP_DS, 0, 0);
+ return status;
+}
+
+int pnp_bios_set_dev_node(u8 nodenum, char boot, struct pnp_bios_node *data)
+{
+ int status;
+ status = __pnp_bios_set_dev_node( nodenum, boot, data );
+ if ( status ) {
+ printk(KERN_WARNING "PnPBIOS: set_dev_node: Unexpected set_dev_node status 0x%x\n", status);
+ return status;
+ }
+ if ( !boot ) {
+ /* Update devlist */
+ u8 thisnodenum = nodenum;
+ status = __pnp_bios_get_dev_node( &nodenum, boot, data );
+ if ( status ) {
+ printk(KERN_WARNING "PnPBIOS: set_dev_node: Unexpected get_dev_node status 0x%x\n", status);
+ return status;
+ }
+ update_devlist( thisnodenum, data );
+ }
+ return status;
+}
+
+#if needed
+/*
+ * Call PnP BIOS with function 0x03, "get event"
+ */
+static int pnp_bios_get_event(u16 *event)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, event, sizeof(u16));
+ status = call_pnp_bios(PNP_GET_EVENT, 0, PNP_TS1, PNP_DS, 0, 0 ,0 ,0);
+ return status;
+}
+#endif
+
+#if needed
+/*
+ * Call PnP BIOS with function 0x04, "send message"
+ */
+static int pnp_bios_send_message(u16 message)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ status = call_pnp_bios(PNP_SEND_MESSAGE, message, PNP_DS, 0, 0, 0, 0, 0);
+ return status;
+}
+#endif
+
+#ifdef CONFIG_HOTPLUG
+/*
+ * Call PnP BIOS with function 0x05, "get docking station information"
+ */
+static int pnp_bios_dock_station_info(struct pnp_docking_station_info *data)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, data, sizeof(struct pnp_docking_station_info));
+ status = call_pnp_bios(PNP_GET_DOCKING_STATION_INFORMATION, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0);
+ return status;
+}
+#endif
+
+#if needed
+/*
+ * Call PnP BIOS with function 0x09, "set statically allocated resource
+ * information"
+ */
+static int pnp_bios_set_stat_res(char *info)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, info, *((u16 *) info));
+ status = call_pnp_bios(PNP_SET_STATIC_ALLOCED_RES_INFO, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0);
+ return status;
+}
+#endif
+
+#if needed
+/*
+ * Call PnP BIOS with function 0x0a, "get statically allocated resource
+ * information"
+ */
+static int pnp_bios_get_stat_res(char *info)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, info, 64 * 1024);
+ status = call_pnp_bios(PNP_GET_STATIC_ALLOCED_RES_INFO, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0);
+ return status;
+}
+#endif
+
+#if needed
+/*
+ * Call PnP BIOS with function 0x0b, "get APM id table"
+ */
+static int pnp_bios_apm_id_table(char *table, u16 *size)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, table, *size);
+ Q2_SET_SEL(PNP_TS2, size, sizeof(u16));
+ status = call_pnp_bios(PNP_GET_APM_ID_TABLE, 0, PNP_TS2, 0, PNP_TS1, PNP_DS, 0, 0);
+ return status;
+}
+#endif
+
+#if needed
+/*
+ * Call PnP BIOS with function 0x40, "get isa pnp configuration structure"
+ */
+static int pnp_bios_isapnp_config(struct pnp_isa_config_struc *data)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return PNP_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, data, sizeof(struct pnp_isa_config_struc));
+ status = call_pnp_bios(PNP_GET_PNP_ISA_CONFIG_STRUC, 0, PNP_TS1, PNP_DS, 0, 0, 0, 0);
+ return status;
+}
+#endif
+
+#if needed
+/*
+ * Call PnP BIOS with function 0x41, "get ESCD info"
+ */
+static int pnp_bios_escd_info(struct escd_info_struc *data)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return ESCD_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, data, sizeof(struct escd_info_struc));
+ status = call_pnp_bios(PNP_GET_ESCD_INFO, 0, PNP_TS1, 2, PNP_TS1, 4, PNP_TS1, PNP_DS);
+ return status;
+}
+#endif
+
+#if needed
+/*
+ * Call PnP BIOS function 0x42, "read ESCD"
+ * nvram_base is determined by calling escd_info
+ */
+static int pnp_bios_read_escd(char *data, u32 nvram_base)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return ESCD_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, data, 64 * 1024);
+ set_base(gdt[PNP_TS2 >> 3], nvram_base);
+ set_limit(gdt[PNP_TS2 >> 3], 64 * 1024);
+ status = call_pnp_bios(PNP_READ_ESCD, 0, PNP_TS1, PNP_TS2, PNP_DS, 0, 0, 0);
+ return status;
+}
+#endif
+
+#if needed
+/*
+ * Call PnP BIOS function 0x43, "write ESCD"
+ */
+static int pnp_bios_write_escd(char *data, u32 nvram_base)
+{
+ u16 status;
+ if (!pnp_bios_present ())
+ return ESCD_FUNCTION_NOT_SUPPORTED;
+ Q2_SET_SEL(PNP_TS1, data, 64 * 1024);
+ set_base(gdt[PNP_TS2 >> 3], nvram_base);
+ set_limit(gdt[PNP_TS2 >> 3], 64 * 1024);
+ status = call_pnp_bios(PNP_WRITE_ESCD, 0, PNP_TS1, PNP_TS2, PNP_DS, 0, 0, 0);
+ return status;
+}
+#endif
+
+
+/*
+ *
+ * DOCKING FUNCTIONS
+ *
+ */
+
+#ifdef CONFIG_HOTPLUG
+
+static int unloading = 0;
+static struct completion unload_sem;
+
+/*
+ * (Much of this belongs in a shared routine somewhere)
+ */
+
+static int pnp_dock_event(int dock, struct pnp_docking_station_info *info)
+{
+ char *argv [3], **envp, *buf, *scratch;
+ int i = 0, value;
+
+ if (!hotplug_path [0])
+ return -ENOENT;
+ if (!current->fs->root) {
+ return -EAGAIN;
+ }
+ if (!(envp = (char **) pnpbios_kmalloc (20 * sizeof (char *), GFP_KERNEL))) {
+ return -ENOMEM;
+ }
+ if (!(buf = pnpbios_kmalloc (256, GFP_KERNEL))) {
+ kfree (envp);
+ return -ENOMEM;
+ }
+
+ /* only one standardized param to hotplug command: type */
+ argv [0] = hotplug_path;
+ argv [1] = "dock";
+ argv [2] = 0;
+
+ /* minimal command environment */
+ envp [i++] = "HOME=/";
+ envp [i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
+
+#ifdef DEBUG
+ /* hint that policy agent should enter no-stdout debug mode */
+ envp [i++] = "DEBUG=kernel";
+#endif
+ /* extensible set of named bus-specific parameters,
+ * supporting multiple driver selection algorithms.
+ */
+ scratch = buf;
+
+ /* action: add, remove */
+ envp [i++] = scratch;
+ scratch += sprintf (scratch, "ACTION=%s", dock?"add":"remove") + 1;
+
+ /* Report the ident for the dock */
+ envp [i++] = scratch;
+ scratch += sprintf (scratch, "DOCK=%x/%x/%x",
+ info->location_id, info->serial, info->capabilities);
+ envp[i] = 0;
+
+ value = call_usermodehelper (argv [0], argv, envp);
+ kfree (buf);
+ kfree (envp);
+ return 0;
+}
+
+/*
+ * Poll the PnP docking at regular intervals
+ */
+static int pnp_dock_thread(void * unused)
+{
+ static struct pnp_docking_station_info now;
+ int docked = -1, d;
+ daemonize();
+ reparent_to_init();
+ strcpy(current->comm, "kpnpbios");
+ while(!unloading && !signal_pending(current))
+ {
+ int err;
+
+ /*
+ * Poll every 2 seconds
+ */
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ*2);
+ if(signal_pending(current))
+ break;
+
+ err = pnp_bios_dock_station_info(&now);
+
+ switch(err)
+ {
+ /*
+ * No dock to manage
+ */
+ case PNP_FUNCTION_NOT_SUPPORTED:
+ complete_and_exit(&unload_sem, 0);
+ case PNP_SYSTEM_NOT_DOCKED:
+ d = 0;
+ break;
+ case PNP_SUCCESS:
+ d = 1;
+ break;
+ default:
+ printk(KERN_WARNING "PnPBIOS: pnp_dock_thread: Unexpected status 0x%x returned by BIOS.\n", err);
+ continue;
+ }
+ if(d != docked)
+ {
+ if(pnp_dock_event(d, &now)==0)
+ {
+ docked = d;
+#if 0
+ printk(KERN_INFO "PnPBIOS: Docking station %stached.\n", docked?"at":"de");
+#endif
+ }
+ }
+ }
+ complete_and_exit(&unload_sem, 0);
+}
+
+#endif /* CONFIG_HOTPLUG */
+
+
+/*
+ *
+ * NODE DATA PARSING FUNCTIONS
+ *
+ */
+
+static void add_irqresource(struct pci_dev *dev, int irq)
+{
+ int i = 0;
+ while (!(dev->irq_resource[i].flags & IORESOURCE_UNSET) && i < DEVICE_COUNT_IRQ) i++;
+ if (i < DEVICE_COUNT_IRQ) {
+ dev->irq_resource[i].start = (unsigned long) irq;
+ dev->irq_resource[i].flags = IORESOURCE_IRQ; // Also clears _UNSET flag
+ }
+}
+
+static void add_dmaresource(struct pci_dev *dev, int dma)
+{
+ int i = 0;
+ while (!(dev->dma_resource[i].flags & IORESOURCE_UNSET) && i < DEVICE_COUNT_DMA) i++;
+ if (i < DEVICE_COUNT_DMA) {
+ dev->dma_resource[i].start = (unsigned long) dma;
+ dev->dma_resource[i].flags = IORESOURCE_DMA; // Also clears _UNSET flag
+ }
+}
+
+static void add_ioresource(struct pci_dev *dev, int io, int len)
+{
+ int i = 0;
+ while (!(dev->resource[i].flags & IORESOURCE_UNSET) && i < DEVICE_COUNT_RESOURCE) i++;
+ if (i < DEVICE_COUNT_RESOURCE) {
+ dev->resource[i].start = (unsigned long) io;
+ dev->resource[i].end = (unsigned long)(io + len - 1);
+ dev->resource[i].flags = IORESOURCE_IO; // Also clears _UNSET flag
+ }
+}
+
+static void add_memresource(struct pci_dev *dev, int mem, int len)
+{
+ int i = 0;
+ while (!(dev->resource[i].flags & IORESOURCE_UNSET) && i < DEVICE_COUNT_RESOURCE) i++;
+ if (i < DEVICE_COUNT_RESOURCE) {
+ dev->resource[i].start = (unsigned long) mem;
+ dev->resource[i].end = (unsigned long)(mem + len - 1);
+ dev->resource[i].flags = IORESOURCE_MEM; // Also clears _UNSET flag
+ }
+}
+
+static void node_resource_data_to_dev(struct pnp_bios_node *node, struct pci_dev *dev)
+{
+ unsigned char *p = node->data, *lastp=NULL;
+ int i;
+
+ /*
+ * First, set resource info to default values
+ */
+ for (i=0;i<DEVICE_COUNT_RESOURCE;i++) {
+ dev->resource[i].start = 0; // "disabled"
+ dev->resource[i].flags = IORESOURCE_UNSET;
+ }
+ for (i=0;i<DEVICE_COUNT_IRQ;i++) {
+ dev->irq_resource[i].start = (unsigned long)-1; // "disabled"
+ dev->irq_resource[i].flags = IORESOURCE_UNSET;
+ }
+ for (i=0;i<DEVICE_COUNT_DMA;i++) {
+ dev->dma_resource[i].start = (unsigned long)-1; // "disabled"
+ dev->dma_resource[i].flags = IORESOURCE_UNSET;
+ }
+
+ /*
+ * Fill in dev resource info
+ */
+ while ( (char *)p < ((char *)node->data + node->size )) {
+ if(p==lastp) break;
+
+ if( p[0] & 0x80 ) {// large item
+ switch (p[0] & 0x7f) {
+ case 0x01: // memory
+ {
+ int io = *(short *) &p[4];
+ int len = *(short *) &p[10];
+ add_memresource(dev, io, len);
+ break;
+ }
+ case 0x02: // device name
+ {
+ int len = *(short *) &p[1];
+ memcpy(dev->name, p + 3, len >= 80 ? 79 : len);
+ break;
+ }
+ case 0x05: // 32-bit memory
+ {
+ int io = *(int *) &p[4];
+ int len = *(int *) &p[16];
+ add_memresource(dev, io, len);
+ break;
+ }
+ case 0x06: // fixed location 32-bit memory
+ {
+ int io = *(int *) &p[4];
+ int len = *(int *) &p[8];
+ add_memresource(dev, io, len);
+ break;
+ }
+ } /* switch */
+ lastp = p+3;
+ p = p + p[1] + p[2]*256 + 3;
+ continue;
+ }
+ if ((p[0]>>3) == 0x0f) // end tag
+ break;
+ switch (p[0]>>3) {
+ case 0x04: // irq
+ {
+ int i, mask, irq = -1;
+ mask= p[1] + p[2]*256;
+ for (i=0;i<16;i++, mask=mask>>1)
+ if(mask & 0x01) irq=i;
+ add_irqresource(dev, irq);
+ break;
+ }
+ case 0x05: // dma
+ {
+ int i, mask, dma = -1;
+ mask = p[1];
+ for (i=0;i<8;i++, mask = mask>>1)
+ if(mask & 0x01) dma=i;
+ add_dmaresource(dev, dma);
+ break;
+ }
+ case 0x08: // io
+ {
+ int io= p[2] + p[3] *256;
+ int len = p[7];
+ add_ioresource(dev, io, len);
+ break;
+ }
+ case 0x09: // fixed location io
+ {
+ int io = p[1] + p[2] * 256;
+ int len = p[3];
+ add_ioresource(dev, io, len);
+ break;
+ }
+ } /* switch */
+ lastp=p+1;
+ p = p + (p[0] & 0x07) + 1;
+
+ } /* while */
+
+ return;
+}
+
+
+/*
+ *
+ * DEVICE LIST MANAGEMENT FUNCTIONS
+ *
+ *
+ * Some of these are exported to give public access
+ *
+ * Question: Why maintain a device list when the PnP BIOS can
+ * list devices for us? Answer: Some PnP BIOSes can't report
+ * the current configuration, only the boot configuration.
+ * The boot configuration can be changed, so we need to keep
+ * a record of what the configuration was when we booted;
+ * presumably it continues to describe the current config.
+ * For those BIOSes that can change the current config, we
+ * keep the information in the devlist up to date.
+ *
+ * Note that it is currently assumed that the list does not
+ * grow or shrink in size after init time, and slot_name
+ * never changes. The list is protected by a spinlock.
+ */
+
+static LIST_HEAD(pnpbios_devices);
+
+static spinlock_t pnpbios_devices_lock;
+
+static int inline insert_device(struct pci_dev *dev)
+{
+
+ /*
+ * FIXME: Check for re-add of existing node;
+ * return -1 if node already present
+ */
+
+ /* We don't lock because we only do this at init time */
+ list_add_tail(&dev->global_list, &pnpbios_devices);
+
+ return 0;
+}
+
+#define HEX(id,a) hex[((id)>>a) & 15]
+#define CHAR(id,a) (0x40 + (((id)>>a) & 31))
+//
+static void inline pnpid32_to_pnpid(u32 id, char *str)
+{
+ const char *hex = "0123456789abcdef";
+
+ id = be32_to_cpu(id);
+ str[0] = CHAR(id, 26);
+ str[1] = CHAR(id, 21);
+ str[2] = CHAR(id,16);
+ str[3] = HEX(id, 12);
+ str[4] = HEX(id, 8);
+ str[5] = HEX(id, 4);
+ str[6] = HEX(id, 0);
+ str[7] = '\0';
+
+ return;
+}
+//
+#undef CHAR
+#undef HEX
+
+/*
+ * Build a linked list of pci_devs in order of ascending node number
+ * Called only at init time.
+ */
+static void __init build_devlist(void)
+{
+ int i;
+ int nodenum;
+ int nodes_got = 0;
+ int devs = 0;
+ struct pnp_bios_node *node;
+ struct pnp_dev_node_info node_info;
+ struct pci_dev *dev;
+
+ if (!pnp_bios_present ())
+ return;
+
+ if (pnp_bios_dev_node_info(&node_info) != 0)
+ return;
+
+ node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL);
+ if (!node)
+ return;
+
+ for(i=0,nodenum=0; i<0xff && nodenum!=0xff; i++) {
+ int thisnodenum = nodenum;
+ /* For now we build the list from the "boot" config
+ * because asking for the "current" config causes
+ * some BIOSes to crash. */
+ if (pnp_bios_get_dev_node((u8 *)&nodenum, (char )1 , node)) {
+ printk(KERN_WARNING "PnPBIOS: PnP BIOS reported error on attempt to get dev node.\n");
+ break;
+ }
+ /* The BIOS returns with nodenum = the next node number */
+ if (nodenum < thisnodenum) {
+ printk(KERN_WARNING "PnPBIOS: Node number is out of sequence. Naughty BIOS!\n");
+ break;
+ }
+ nodes_got++;
+ dev = pnpbios_kmalloc(sizeof (struct pci_dev), GFP_KERNEL);
+ if (!dev)
+ break;
+ memset(dev,0,sizeof(struct pci_dev));
+ dev->devfn=thisnodenum;
+ memcpy(dev->name,"PNPBIOS",8);
+ pnpid32_to_pnpid(node->eisa_id,dev->slot_name);
+ node_resource_data_to_dev(node,dev);
+ if(insert_device(dev)<0)
+ kfree(dev);
+ else
+ devs++;
+ }
+ kfree(node);
+
+ printk(KERN_INFO "PnPBIOS: %i node%s reported by PnP BIOS; %i recorded by driver.\n",
+ nodes_got, nodes_got != 1 ? "s" : "", devs);
+}
+
+static struct pci_dev *find_device_by_nodenum( u8 nodenum )
+{
+ struct pci_dev *dev;
+
+ pnpbios_for_each_dev(dev) {
+ if(dev->devfn == nodenum)
+ return dev;
+ }
+
+ return NULL;
+}
+
+static void update_devlist( u8 nodenum, struct pnp_bios_node *data )
+{
+ unsigned long flags;
+ struct pci_dev *dev;
+
+ spin_lock_irqsave(&pnpbios_devices_lock, flags);
+ dev = find_device_by_nodenum( nodenum );
+ if ( dev ) {
+ node_resource_data_to_dev(data,dev);
+ }
+ spin_unlock_irqrestore(&pnpbios_devices_lock, flags);
+
+ return;
+}
+
+
+/*
+ *
+ * DRIVER REGISTRATION FUNCTIONS
+ *
+ *
+ * Exported to give public access
+ *
+ */
+
+static LIST_HEAD(pnpbios_drivers);
+
+static const struct pnpbios_device_id *
+match_device(const struct pnpbios_device_id *ids, const struct pci_dev *dev)
+{
+ while (*ids->id)
+ {
+ if(memcmp(ids->id, dev->slot_name, 7)==0)
+ return ids;
+ ids++;
+ }
+ return NULL;
+}
+
+static int announce_device(struct pnpbios_driver *drv, struct pci_dev *dev)
+{
+ const struct pnpbios_device_id *id;
+ struct pci_dev tmpdev;
+ int ret;
+
+ if (drv->id_table) {
+ id = match_device(drv->id_table, dev);
+ if (!id)
+ return 0;
+ } else
+ id = NULL;
+
+ memcpy( &tmpdev, dev, sizeof(struct pci_dev));
+ tmpdev.global_list.prev = NULL;
+ tmpdev.global_list.next = NULL;
+
+ dev_probe_lock();
+ /* Obviously, probe() should not call any pnpbios functions */
+ ret = drv->probe(&tmpdev, id);
+ dev_probe_unlock();
+ if (ret < 1)
+ return 0;
+
+ dev->driver = (void *)drv;
+
+ return 1;
+}
+
+/**
+ * pnpbios_register_driver - register a new pci driver
+ * @drv: the driver structure to register
+ *
+ * Adds the driver structure to the list of registered drivers
+ *
+ * For each device in the pnpbios device list that matches one of
+ * the ids in drv->id_table, calls the driver's "probe" function with
+ * arguments (1) a pointer to a *temporary* struct pci_dev containing
+ * resource info for the device, and (2) a pointer to the id string
+ * of the device. Expects the probe function to return 1 if the
+ * driver claims the device (otherwise 0) in which case, marks the
+ * device as having this driver.
+ *
+ * Returns the number of pci devices which were claimed by the driver
+ * during registration. The driver remains registered even if the
+ * return value is zero.
+ */
+int pnpbios_register_driver(struct pnpbios_driver *drv)
+{
+ struct pci_dev *dev;
+ unsigned long flags;
+ int count = 0;
+
+ list_add_tail(&drv->node, &pnpbios_drivers);
+ spin_lock_irqsave(&pnpbios_devices_lock, flags);
+ pnpbios_for_each_dev(dev) {
+ if (!pnpbios_dev_driver(dev))
+ count += announce_device(drv, dev);
+ }
+ spin_unlock_irqrestore(&pnpbios_devices_lock, flags);
+ return count;
+}
+
+EXPORT_SYMBOL(pnpbios_register_driver);
+
+/**
+ * pnpbios_unregister_driver - unregister a pci driver
+ * @drv: the driver structure to unregister
+ *
+ * Deletes the driver structure from the list of registered PnPBIOS
+ * drivers, gives it a chance to clean up by calling its "remove"
+ * function for each device it was responsible for, and marks those
+ * devices as driverless.
+ */
+void pnpbios_unregister_driver(struct pnpbios_driver *drv)
+{
+ unsigned long flags;
+ struct pci_dev *dev;
+
+ list_del(&drv->node);
+ spin_lock_irqsave(&pnpbios_devices_lock, flags);
+ pnpbios_for_each_dev(dev) {
+ if (dev->driver == (void *)drv) {
+ if (drv->remove)
+ drv->remove(dev);
+ dev->driver = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&pnpbios_devices_lock, flags);
+}
+
+EXPORT_SYMBOL(pnpbios_unregister_driver);
+
+
+/*
+ *
+ * RESOURCE RESERVATION FUNCTIONS
+ *
+ *
+ * Used only at init time
+ *
+ */
+
+static void __init reserve_ioport_range(char *pnpid, int start, int end)
+{
+ struct resource *res;
+ char *regionid;
+
+ regionid = pnpbios_kmalloc(16, GFP_KERNEL);
+ if ( regionid == NULL )
+ return;
+ sprintf(regionid, "PnPBIOS %s", pnpid);
+ res = request_region(start,end-start+1,regionid);
+ if ( res == NULL )
+ kfree( regionid );
+ else
+ res->flags &= ~IORESOURCE_BUSY;
+ /*
+ * Failures at this point are usually harmless. pci quirks for
+ * example do reserve stuff they know about too, so we may well
+ * have double reservations.
+ */
+ printk(KERN_INFO
+ "PnPBIOS: %s: ioport range 0x%x-0x%x %s reserved.\n",
+ pnpid, start, end,
+ NULL != res ? "has been" : "could not be"
+ );
+
+ return;
+}
+
+static void __init reserve_resources_of_dev( struct pci_dev *dev )
+{
+ int i;
+
+ for (i=0;i<DEVICE_COUNT_RESOURCE;i++) {
+ if ( dev->resource[i].flags & IORESOURCE_UNSET )
+ /* end of resources */
+ break;
+ if (dev->resource[i].flags & IORESOURCE_IO) {
+ /* ioport */
+ if ( dev->resource[i].start == 0 )
+ /* disabled */
+ /* Do nothing */
+ continue;
+ if ( dev->resource[i].start < 0x100 )
+ /*
+ * Below 0x100 is only standard PC hardware
+ * (pics, kbd, timer, dma, ...)
+ * We should not get resource conflicts there,
+ * and the kernel reserves these anyway
+ * (see arch/i386/kernel/setup.c).
+ * So, do nothing
+ */
+ continue;
+ if ( dev->resource[i].end < dev->resource[i].start )
+ /* invalid endpoint */
+ /* Do nothing */
+ continue;
+ reserve_ioport_range(
+ dev->slot_name,
+ dev->resource[i].start,
+ dev->resource[i].end
+ );
+ } else if (dev->resource[i].flags & IORESOURCE_MEM) {
+ /* iomem */
+ /* For now do nothing */
+ continue;
+ } else {
+ /* Neither ioport nor iomem */
+ /* Do nothing */
+ continue;
+ }
+ }
+
+ return;
+}
+
+static void __init reserve_resources( void )
+{
+ struct pci_dev *dev;
+
+ pnpbios_for_each_dev(dev) {
+ if (
+ 0 != strcmp(dev->slot_name,"PNP0c01") && /* memory controller */
+ 0 != strcmp(dev->slot_name,"PNP0c02") /* system peripheral: other */
+ ) {
+ continue;
+ }
+ reserve_resources_of_dev(dev);
+ }
+
+ return;
+}
+
+
+/*
+ *
+ * INIT AND EXIT
+ *
+ */
+
+extern int is_sony_vaio_laptop;
+
+static int pnpbios_disabled; /* = 0 */
+static int dont_reserve_resources; /* = 0 */
+int pnpbios_dont_use_current_config; /* = 0 */
+
+#ifndef MODULE
+static int __init pnpbios_setup(char *str)
+{
+ int invert;
+
+ while ((str != NULL) && (*str != '\0')) {
+ if (strncmp(str, "off", 3) == 0)
+ pnpbios_disabled=1;
+ if (strncmp(str, "on", 2) == 0)
+ pnpbios_disabled=0;
+ invert = (strncmp(str, "no-", 3) == 0);
+ if (invert)
+ str += 3;
+ if (strncmp(str, "curr", 4) == 0)
+ pnpbios_dont_use_current_config = invert;
+ if (strncmp(str, "res", 3) == 0)
+ dont_reserve_resources = invert;
+ str = strchr(str, ',');
+ if (str != NULL)
+ str += strspn(str, ", \t");
+ }
+
+ return 1;
+}
+
+__setup("pnpbios=", pnpbios_setup);
+#endif
+
+subsys_initcall(pnpbios_init);
+
+void __init pnpbios_init(void)
+{
+ union pnp_bios_expansion_header *check;
+ u8 sum;
+ int i, length;
+
+ spin_lock_init(&pnp_bios_lock);
+ spin_lock_init(&pnpbios_devices_lock);
+
+ if(pnpbios_disabled) {
+ printk(KERN_INFO "PnPBIOS: Disabled.\n");
+ return;
+ }
+
+ if ( is_sony_vaio_laptop )
+ pnpbios_dont_use_current_config = 1;
+
+ /*
+ * Search the defined area (0xf0000-0xffff0) for a valid PnP BIOS
+ * structure and, if one is found, sets up the selectors and
+ * entry points
+ */
+ for (check = (union pnp_bios_expansion_header *) __va(0xf0000);
+ check < (union pnp_bios_expansion_header *) __va(0xffff0);
+ ((void *) (check)) += 16) {
+ if (check->fields.signature != PNP_SIGNATURE)
+ continue;
+ length = check->fields.length;
+ if (!length)
+ continue;
+ for (sum = 0, i = 0; i < length; i++)
+ sum += check->chars[i];
+ if (sum)
+ continue;
+ if (check->fields.version < 0x10) {
+ printk(KERN_WARNING "PnPBIOS: PnP BIOS version %d.%d is not supported.\n",
+ check->fields.version >> 4,
+ check->fields.version & 15);
+ continue;
+ }
+ printk(KERN_INFO "PnPBIOS: Found PnP BIOS installation structure at 0x%p.\n", check);
+ printk(KERN_INFO "PnPBIOS: PnP BIOS version %d.%d, entry 0x%x:0x%x, dseg 0x%x.\n",
+ check->fields.version >> 4, check->fields.version & 15,
+ check->fields.pm16cseg, check->fields.pm16offset,
+ check->fields.pm16dseg);
+ Q2_SET_SEL(PNP_CS32, &pnp_bios_callfunc, 64 * 1024);
+ Q_SET_SEL(PNP_CS16, check->fields.pm16cseg, 64 * 1024);
+ Q_SET_SEL(PNP_DS, check->fields.pm16dseg, 64 * 1024);
+ pnp_bios_callpoint.offset = check->fields.pm16offset;
+ pnp_bios_callpoint.segment = PNP_CS16;
+ pnp_bios_hdr = check;
+ break;
+ }
+ build_devlist();
+ if ( ! dont_reserve_resources )
+ reserve_resources();
+#ifdef CONFIG_PROC_FS
+ pnpbios_proc_init();
+#endif
+#ifdef CONFIG_HOTPLUG
+ init_completion(&unload_sem);
+ if(kernel_thread(pnp_dock_thread, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGNAL)>0)
+ unloading = 0;
+#endif
+}
+
+#ifdef MODULE
+
+MODULE_LICENSE("GPL");
+
+/* We have to run it early and not as a module. */
+module_init(pnpbios_init);
+
+#ifdef CONFIG_HOTPLUG
+static void pnpbios_exit(void)
+{
+ /* free_resources() ought to go here */
+ /* pnpbios_proc_done() */
+ unloading = 1;
+ wait_for_completion(&unload_sem);
+}
+
+module_exit(pnpbios_exit);
+
+#endif
+#endif
diff --git a/drivers/pnp/pnpbios_proc.c b/drivers/pnp/pnpbios_proc.c
new file mode 100644
index 000000000000..efa39987dc92
--- /dev/null
+++ b/drivers/pnp/pnpbios_proc.c
@@ -0,0 +1,151 @@
+/*
+ * pnp_proc.c: /proc/bus/pnp interface for Plug and Play devices
+ *
+ * Written by David Hinds, dahinds@users.sourceforge.net
+ */
+
+//#include <pcmcia/config.h>
+#define __NO_VERSION__
+//#include <pcmcia/k_compat.h>
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/pnpbios.h>
+
+static struct proc_dir_entry *proc_pnp = NULL;
+static struct proc_dir_entry *proc_pnp_boot = NULL;
+static struct pnp_dev_node_info node_info;
+
+static int proc_read_devices(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data)
+{
+ struct pnp_bios_node *node;
+ int i;
+ u8 nodenum;
+ char *p = buf;
+
+ if (pos != 0) {
+ *eof = 1;
+ return 0;
+ }
+ node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL);
+ if (!node) return -ENOMEM;
+ for (i=0,nodenum=0;i<0xff && nodenum!=0xff; i++) {
+ if ( pnp_bios_get_dev_node(&nodenum, 1, node) )
+ break;
+ p += sprintf(p, "%02x\t%08x\t%02x:%02x:%02x\t%04x\n",
+ node->handle, node->eisa_id,
+ node->type_code[0], node->type_code[1],
+ node->type_code[2], node->flags);
+ }
+ kfree(node);
+ return (p-buf);
+}
+
+static int proc_read_node(char *buf, char **start, off_t pos,
+ int count, int *eof, void *data)
+{
+ struct pnp_bios_node *node;
+ int boot = (long)data >> 8;
+ u8 nodenum = (long)data;
+ int len;
+
+ if (pos != 0) {
+ *eof = 1;
+ return 0;
+ }
+ node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL);
+ if (!node) return -ENOMEM;
+ if ( pnp_bios_get_dev_node(&nodenum, boot, node) )
+ return -EIO;
+ len = node->size - sizeof(struct pnp_bios_node);
+ memcpy(buf, node->data, len);
+ kfree(node);
+ return len;
+}
+
+static int proc_write_node(struct file *file, const char *buf,
+ unsigned long count, void *data)
+{
+ struct pnp_bios_node *node;
+ int boot = (long)data >> 8;
+ u8 nodenum = (long)data;
+
+ node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL);
+ if (!node) return -ENOMEM;
+ if ( pnp_bios_get_dev_node(&nodenum, boot, node) )
+ return -EIO;
+ if (count != node->size - sizeof(struct pnp_bios_node))
+ return -EINVAL;
+ memcpy(node->data, buf, count);
+ if (pnp_bios_set_dev_node(node->handle, boot, node) != 0)
+ return -EINVAL;
+ kfree(node);
+ return count;
+}
+
+/*
+ * When this is called, pnpbios functions are assumed to
+ * work and the pnpbios_dont_use_current_config flag
+ * should already have been set to the appropriate value
+ */
+void pnpbios_proc_init( void )
+{
+ struct pnp_bios_node *node;
+ struct proc_dir_entry *ent;
+ char name[3];
+ int i;
+ u8 nodenum;
+
+ if (pnp_bios_dev_node_info(&node_info) != 0) return;
+
+ proc_pnp = proc_mkdir("pnp", proc_bus);
+ if (!proc_pnp) return;
+ proc_pnp_boot = proc_mkdir("boot", proc_pnp);
+ if (!proc_pnp_boot) return;
+ create_proc_read_entry("devices", 0, proc_pnp, proc_read_devices, NULL);
+
+ node = pnpbios_kmalloc(node_info.max_node_size, GFP_KERNEL);
+ if (!node) return;
+ for (i=0,nodenum = 0; i<0xff && nodenum != 0xff; i++) {
+ if (pnp_bios_get_dev_node(&nodenum, 1, node) != 0)
+ break;
+ sprintf(name, "%02x", node->handle);
+ if ( !pnpbios_dont_use_current_config ) {
+ ent = create_proc_entry(name, 0, proc_pnp);
+ if (ent) {
+ ent->read_proc = proc_read_node;
+ ent->write_proc = proc_write_node;
+ ent->data = (void *)(long)(node->handle);
+ }
+ }
+ ent = create_proc_entry(name, 0, proc_pnp_boot);
+ if (ent) {
+ ent->read_proc = proc_read_node;
+ ent->write_proc = proc_write_node;
+ ent->data = (void *)(long)(node->handle+0x100);
+ }
+ }
+ kfree(node);
+}
+
+void pnpbios_proc_done(void)
+{
+ int i;
+ char name[3];
+
+ if (!proc_pnp) return;
+
+ for (i=0; i<0xff; i++) {
+ sprintf(name, "%02x", i);
+ if ( !pnpbios_dont_use_current_config )
+ remove_proc_entry(name, proc_pnp);
+ remove_proc_entry(name, proc_pnp_boot);
+ }
+ remove_proc_entry("boot", proc_pnp);
+ remove_proc_entry("devices", proc_pnp);
+ remove_proc_entry("pnp", proc_bus);
+}
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index c8aa338eb9e7..5c26cb94f8c0 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1326,7 +1326,7 @@ static void tw_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
TW_Command *command_packet;
if (test_and_set_bit(TW_IN_INTR, &tw_dev->flags))
return;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(tw_dev->host->host_lock, flags);
if (tw_dev->tw_pci_dev->irq == irq) {
spin_lock(&tw_dev->tw_lock);
@@ -1475,7 +1475,7 @@ static void tw_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
}
spin_unlock(&tw_dev->tw_lock);
}
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
clear_bit(TW_IN_INTR, &tw_dev->flags);
} /* End tw_interrupt() */
@@ -1902,9 +1902,7 @@ int tw_scsi_detect(Scsi_Host_Template *tw_host)
return 0;
}
- spin_unlock_irq(&io_request_lock);
ret = tw_findcards(tw_host);
- spin_lock_irq(&io_request_lock);
return ret;
} /* End tw_scsi_detect() */
@@ -1929,9 +1927,9 @@ int tw_scsi_eh_abort(Scsi_Cmnd *SCpnt)
}
/* We have to let AEN requests through before the reset */
- spin_unlock_irq(&io_request_lock);
+ spin_unlock_irq(tw_dev->host->host_lock);
mdelay(TW_AEN_WAIT_TIME);
- spin_lock_irq(&io_request_lock);
+ spin_lock_irq(tw_dev->host->host_lock);
spin_lock(&tw_dev->tw_lock);
tw_dev->num_aborts++;
@@ -1993,9 +1991,9 @@ int tw_scsi_eh_reset(Scsi_Cmnd *SCpnt)
}
/* We have to let AEN requests through before the reset */
- spin_unlock_irq(&io_request_lock);
+ spin_unlock_irq(tw_dev->host->host_lock);
mdelay(TW_AEN_WAIT_TIME);
- spin_lock_irq(&io_request_lock);
+ spin_lock_irq(tw_dev->host->host_lock);
spin_lock(&tw_dev->tw_lock);
tw_dev->num_resets++;
diff --git a/drivers/scsi/53c7,8xx.c b/drivers/scsi/53c7,8xx.c
index 81217c47e929..2cd317cb8355 100644
--- a/drivers/scsi/53c7,8xx.c
+++ b/drivers/scsi/53c7,8xx.c
@@ -1108,9 +1108,9 @@ NCR53c7x0_init (struct Scsi_Host *host) {
if (!search) {
#ifdef __powerpc__
- if (request_irq(host->irq, do_NCR53c7x0_intr, SA_SHIRQ, "53c7,8xx", NULL))
+ if (request_irq(host->irq, do_NCR53c7x0_intr, SA_SHIRQ, "53c7,8xx", host))
#else
- if (request_irq(host->irq, do_NCR53c7x0_intr, SA_INTERRUPT, "53c7,8xx", NULL))
+ if (request_irq(host->irq, do_NCR53c7x0_intr, SA_INTERRUPT, "53c7,8xx", host))
#endif
{
@@ -4360,9 +4360,11 @@ static void
do_NCR53c7x0_intr(int irq, void *dev_id, struct pt_regs * regs) {
unsigned long flags;
- spin_lock_irqsave(&io_request_lock, flags);
+ struct Scsi_Host *dev = dev_id;
+
+ spin_lock_irqsave(dev->host_lock, flags);
NCR53c7x0_intr(irq, dev_id, regs);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
/*
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 6cebda8f4dc8..f4da7117c667 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -1508,18 +1508,7 @@ NCR_700_intr(int irq, void *dev_id, struct pt_regs *regs)
__u8 pun = 0xff, lun = 0xff;
unsigned long flags;
- /* Unfortunately, we have to take the io_request_lock here
- * rather than the host lock hostdata->lock because we're
- * looking to exclude queuecommand from messing with the
- * registers while we're processing the interrupt. Since
- * queuecommand is called holding io_request_lock, and we have
- * to take io_request_lock before we call the command
- * scsi_done, we would get a deadlock if we took
- * hostdata->lock here and in queuecommand (because the order
- * of locking in queuecommand: 1) io_request_lock then 2)
- * hostdata->lock would be the reverse of taking it in this
- * routine */
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(host->host_lock, flags);
if((istat = NCR_700_readb(host, ISTAT_REG))
& (SCSI_INT_PENDING | DMA_INT_PENDING)) {
__u32 dsps;
@@ -1764,7 +1753,7 @@ NCR_700_intr(int irq, void *dev_id, struct pt_regs *regs)
}
}
out_unlock:
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(host->host_lock, flags);
}
/* FIXME: Need to put some proc information in and plumb it
diff --git a/drivers/scsi/AM53C974.c b/drivers/scsi/AM53C974.c
index 5696214c7e12..8381d13cb2a6 100644
--- a/drivers/scsi/AM53C974.c
+++ b/drivers/scsi/AM53C974.c
@@ -1021,10 +1021,11 @@ static void AM53C974_main(void)
static void do_AM53C974_intr(int irq, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock, flags);
+ struct Scsi_Host *dev = dev_id;
+
+ spin_lock_irqsave(dev->host_lock, flags);
AM53C974_intr(irq, dev_id, regs);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
/************************************************************************
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
index b0c7bd184f98..e415cbc9a67d 100644
--- a/drivers/scsi/BusLogic.h
+++ b/drivers/scsi/BusLogic.h
@@ -1531,7 +1531,7 @@ static inline
void BusLogic_AcquireHostAdapterLockIH(BusLogic_HostAdapter_T *HostAdapter,
ProcessorFlags_T *ProcessorFlags)
{
- spin_lock_irqsave(&HostAdapter->SCSI_Host->host_lock, *ProcessorFlags);
+ spin_lock_irqsave(HostAdapter->SCSI_Host->host_lock, *ProcessorFlags);
}
@@ -1544,7 +1544,7 @@ static inline
void BusLogic_ReleaseHostAdapterLockIH(BusLogic_HostAdapter_T *HostAdapter,
ProcessorFlags_T *ProcessorFlags)
{
- spin_unlock_irqrestore(&HostAdapter->SCSI_Host->host_lock, *ProcessorFlags);
+ spin_unlock_irqrestore(HostAdapter->SCSI_Host->host_lock, *ProcessorFlags);
}
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 4d007c240f3e..a40f558a0c3b 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -692,9 +692,9 @@ void NCR5380_timer_fn(unsigned long surplus_to_requirements)
}
restore_flags(flags);
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(instance->host_lock, flags);
run_main();
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(instance->host_lock, flags);
}
#endif /* def USLEEP */
@@ -745,7 +745,7 @@ static int __init NCR5380_probe_irq(struct Scsi_Host *instance, int possible)
NCR5380_setup(instance);
for (trying_irqs = i = 0, mask = 1; i < 16; ++i, mask <<= 1)
- if ((mask & possible) && (request_irq(i, &probe_intr, SA_INTERRUPT, "NCR-probe", NULL)
+ if ((mask & possible) && (request_irq(i, &probe_intr, SA_INTERRUPT, "NCR-probe", instance)
== 0))
trying_irqs |= mask;
@@ -1271,7 +1271,7 @@ static void NCR5380_main(void) {
* this should prevent any race conditions.
*/
- spin_unlock_irq(&io_request_lock);
+ spin_unlock_irq(instance->host_lock);
save_flags(flags);
@@ -1424,7 +1424,7 @@ static void NCR5380_main(void) {
break;
} /* for instance */
} while (!done);
- spin_lock_irq(&io_request_lock);
+ spin_lock_irq(instance->host_lock);
/* cli();*/
main_running = 0;
}
@@ -1521,10 +1521,10 @@ static void NCR5380_intr(int irq, void *dev_id, struct pt_regs *regs) {
{
unsigned long timeout = jiffies + NCR_TIMEOUT;
- spin_unlock_irq(&io_request_lock);
+ spin_unlock_irq(instance->host_lock);
while (NCR5380_read(BUS_AND_STATUS_REG) & BASR_ACK
&& time_before(jiffies, timeout));
- spin_lock_irq(&io_request_lock);
+ spin_lock_irq(instance->host_lock);
if (time_after_eq(jiffies, timeout) )
printk("scsi%d: timeout at NCR5380.c:%d\n",
@@ -1554,10 +1554,12 @@ static void NCR5380_intr(int irq, void *dev_id, struct pt_regs *regs) {
static void do_NCR5380_intr(int irq, void *dev_id, struct pt_regs *regs) {
unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock, flags);
+
+ struct Scsi_Host *dev = dev_id;
+
+ spin_lock_irqsave(dev->host_lock, flags);
NCR5380_intr(irq, dev_id, regs);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
#endif
@@ -1669,12 +1671,12 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
{
unsigned long timeout = jiffies + 2 * NCR_TIMEOUT;
- spin_unlock_irq(&io_request_lock);
+ spin_unlock_irq(instance->host_lock);
while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS)
&& time_before(jiffies,timeout));
- spin_lock_irq(&io_request_lock);
+ spin_lock_irq(instance->host_lock);
if (time_after_eq(jiffies,timeout)) {
printk("scsi: arbitration timeout at %d\n", __LINE__);
@@ -1834,10 +1836,10 @@ part2:
hostdata->selecting = 0; /* clear this pointer, because we passed the
waiting period */
#else
- spin_unlock_irq(&io_request_lock);
+ spin_unlock_irq(instance->host_lock);
while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) &
(SR_BSY | SR_IO)));
- spin_lock_irq(&io_request_lock);
+ spin_lock_irq(instance->host_lock);
#endif
if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) ==
(SR_SEL | SR_IO)) {
@@ -1905,9 +1907,9 @@ part2:
{
unsigned long timeout = jiffies + NCR_TIMEOUT;
- spin_unlock_irq(&io_request_lock);
+ spin_unlock_irq(instance->host_lock);
while (!(NCR5380_read(STATUS_REG) & SR_REQ) && time_before(jiffies, timeout));
- spin_lock_irq(&io_request_lock);
+ spin_lock_irq(instance->host_lock);
if (time_after_eq(jiffies, timeout)) {
printk("scsi%d: timeout at NCR5380.c:%d\n", instance->host_no, __LINE__);
diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c
index 9a23dbc35a05..1aa7cfc855f5 100644
--- a/drivers/scsi/NCR53C9x.c
+++ b/drivers/scsi/NCR53C9x.c
@@ -3546,9 +3546,10 @@ void esp_intr(int irq, void *dev_id, struct pt_regs *pregs)
struct NCR_ESP *esp;
unsigned long flags;
int again;
+ struct Scsi_Host *dev = dev_id;
/* Handle all ESP interrupts showing at this IRQ level. */
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
repeat:
again = 0;
for_each_esp(esp) {
@@ -3572,7 +3573,7 @@ repeat:
}
if(again)
goto repeat;
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
#else
/* For SMP we only service one ESP on the list list at our IRQ level! */
@@ -3580,9 +3581,10 @@ void esp_intr(int irq, void *dev_id, struct pt_regs *pregs)
{
struct NCR_ESP *esp;
unsigned long flags;
-
+ struct Scsi_Host *dev = dev_id;
+
/* Handle all ESP interrupts showing at this IRQ level. */
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
for_each_esp(esp) {
if(((esp)->irq & 0xf) == irq) {
if(esp->dma_irq_p(esp)) {
@@ -3599,7 +3601,7 @@ void esp_intr(int irq, void *dev_id, struct pt_regs *pregs)
}
}
out:
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
#endif
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c
index 118acb2db1eb..bffa89fb0d43 100644
--- a/drivers/scsi/NCR53c406a.c
+++ b/drivers/scsi/NCR53c406a.c
@@ -537,7 +537,7 @@ NCR53c406a_detect(Scsi_Host_Template * tpnt){
DEB(printk("NCR53c406a: using port_base %x\n", port_base));
if(irq_level > 0) {
- if(request_irq(irq_level, do_NCR53c406a_intr, 0, "NCR53c406a", NULL)){
+ if(request_irq(irq_level, do_NCR53c406a_intr, 0, "NCR53c406a", shpnt)){
printk("NCR53c406a: unable to allocate IRQ %d\n", irq_level);
goto err_release;
}
@@ -780,10 +780,11 @@ NCR53c406a_biosparm(Scsi_Disk *disk, kdev_t dev, int* info_array){
static void
do_NCR53c406a_intr(int unused, void *dev_id, struct pt_regs *regs){
unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock, flags);
+ struct Scsi_Host * dev = dev_id;
+
+ spin_lock_irqsave(dev->host_lock, flags);
NCR53c406a_intr(0, dev_id, regs);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
static void
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index b9cac9c99da6..08cafe0f88b5 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -40,9 +40,9 @@ static void a2091_intr (int irq, void *dummy, struct pt_regs *fp)
continue;
if (status & ISTR_INTS) {
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(instance->host_lock, flags);
wd33c93_intr (instance);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(instance->host_lock, flags);
}
}
}
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index 1005da51785c..062f02df9ba6 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -30,14 +30,15 @@ static void a3000_intr (int irq, void *dummy, struct pt_regs *fp)
{
unsigned long flags;
unsigned int status = DMA(a3000_host)->ISTR;
-
+ struct Scsi_Host *dev = dummy;
+
if (!(status & ISTR_INT_P))
return;
if (status & ISTR_INTS)
{
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
wd33c93_intr (a3000_host);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
} else
printk("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n",
status);
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 361fa6b70fa6..6b75605b352a 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -5871,7 +5871,7 @@ advansys_queuecommand(Scsi_Cmnd *scp, void (*done)(Scsi_Cmnd *))
/* host_lock taken by mid-level prior to call but need to protect */
/* against own ISR */
- spin_lock_irqsave(&boardp->lock, flags);
+ spin_lock_irqsave(boardp->lock, flags);
/*
* Block new commands while handling a reset or abort request.
@@ -6413,10 +6413,10 @@ asc_scsi_done_list(Scsi_Cmnd *scp, int from_isr)
ASC_STATS(scp->host, done);
ASC_ASSERT(scp->scsi_done != NULL);
if (from_isr)
- spin_lock_irqsave(&scp->host->host_lock, flags);
+ spin_lock_irqsave(scp->host->host_lock, flags);
scp->scsi_done(scp);
if (from_isr)
- spin_unlock_irqrestore(&scp->host->host_lock, flags);
+ spin_unlock_irqrestore(scp->host->host_lock, flags);
scp = tscp;
}
ASC_DBG(2, "asc_scsi_done_list: done\n");
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 938019b6654f..774abe3d29b3 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -1349,9 +1349,9 @@ int aha152x_detect(Scsi_Host_Template * tpnt)
printk(KERN_INFO "aha152x%d: trying software interrupt, ", HOSTNO);
SETPORT(DMACNTRL0, SWINT|INTEN);
- spin_unlock_irq(&shpnt->host_lock);
+ spin_unlock_irq(shpnt->host_lock);
mdelay(1000);
- spin_lock_irq(&shpnt->host_lock);
+ spin_lock_irq(shpnt->host_lock);
free_irq(shpnt->irq, shpnt);
if (!HOSTDATA(shpnt)->swint) {
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 1b1414eae012..2198459e2bbc 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -425,9 +425,9 @@ static void do_aha1542_intr_handle(int irq, void *dev_id, struct pt_regs *regs)
if (!shost)
panic("Splunge!");
- spin_lock_irqsave(&shost->host_lock, flags);
+ spin_lock_irqsave(shost->host_lock, flags);
aha1542_intr_handle(shost, dev_id, regs);
- spin_unlock_irqrestore(&shost->host_lock, flags);
+ spin_unlock_irqrestore(shost->host_lock, flags);
}
/* A "high" level interrupt handler */
@@ -1475,9 +1475,9 @@ static int aha1542_bus_reset(Scsi_Cmnd * SCpnt)
* check for timeout, and if we are doing something like this
* we are pretty desperate anyways.
*/
- spin_unlock_irq(&SCpnt->host->host_lock);
+ spin_unlock_irq(SCpnt->host->host_lock);
scsi_sleep(4 * HZ);
- spin_lock_irq(&SCpnt->host->host_lock);
+ spin_lock_irq(SCpnt->host->host_lock);
WAIT(STATUS(SCpnt->host->io_port),
STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF);
@@ -1539,9 +1539,9 @@ static int aha1542_host_reset(Scsi_Cmnd * SCpnt)
* check for timeout, and if we are doing something like this
* we are pretty desperate anyways.
*/
- spin_unlock_irq(&SCpnt->host->host_lock);
+ spin_unlock_irq(SCpnt->host->host_lock);
scsi_sleep(4 * HZ);
- spin_lock_irq(&SCpnt->host->host_lock);
+ spin_lock_irq(SCpnt->host->host_lock);
WAIT(STATUS(SCpnt->host->io_port),
STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF);
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index 63805aec9c30..44c9d83eaec7 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -224,7 +224,7 @@ void aha1740_intr_handle(int irq, void *dev_id, struct pt_regs * regs)
if (!host)
panic("aha1740.c: Irq from unknown host!\n");
- spin_lock_irqsave(&host->host_lock, flags);
+ spin_lock_irqsave(host->host_lock, flags);
base = host->io_port;
number_serviced = 0;
@@ -299,7 +299,7 @@ void aha1740_intr_handle(int irq, void *dev_id, struct pt_regs * regs)
number_serviced++;
}
- spin_unlock_irqrestore(&host->host_lock, flags);
+ spin_unlock_irqrestore(host->host_lock, flags);
}
int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index aec2ad4c107e..7cf724157498 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -433,7 +433,7 @@ go_42:
/*
* Complete the command
*/
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(workrequ->host->host_lock, flags);
(*workrequ->scsi_done) (workrequ);
/*
@@ -441,7 +441,7 @@ go_42:
*/
dev->id[target_id].curr_req = 0;
dev->working--;
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(workrequ->host->host_lock, flags);
/*
* Take it back wide
*/
diff --git a/drivers/scsi/blz1230.c b/drivers/scsi/blz1230.c
index 4a3f28512258..68efebdfb9cf 100644
--- a/drivers/scsi/blz1230.c
+++ b/drivers/scsi/blz1230.c
@@ -137,7 +137,7 @@ int __init blz1230_esp_detect(Scsi_Host_Template *tpnt)
esp->irq = IRQ_AMIGA_PORTS;
esp->slot = board+REAL_BLZ1230_ESP_ADDR;
if (request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ,
- "Blizzard 1230 SCSI IV", esp_intr))
+ "Blizzard 1230 SCSI IV", esp->ehost))
goto err_out;
/* Figure out our scsi ID on the bus */
diff --git a/drivers/scsi/blz2060.c b/drivers/scsi/blz2060.c
index d67a2ddba942..355a82cf5e8c 100644
--- a/drivers/scsi/blz2060.c
+++ b/drivers/scsi/blz2060.c
@@ -115,7 +115,7 @@ int __init blz2060_esp_detect(Scsi_Host_Template *tpnt)
esp->irq = IRQ_AMIGA_PORTS;
request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ,
- "Blizzard 2060 SCSI", esp_intr);
+ "Blizzard 2060 SCSI", esp->ehost);
/* Figure out our scsi ID on the bus */
esp->scsi_id = 7;
diff --git a/drivers/scsi/cpqfcTSinit.c b/drivers/scsi/cpqfcTSinit.c
index aad2a1a429de..8dc265c07243 100644
--- a/drivers/scsi/cpqfcTSinit.c
+++ b/drivers/scsi/cpqfcTSinit.c
@@ -242,7 +242,7 @@ static void launch_FCworker_thread(struct Scsi_Host *HostAdapter)
cpqfcHBAdata->notify_wt = &sem;
/* must unlock before kernel_thread(), for it may cause a reschedule. */
- spin_unlock_irq(&HostAdapter->host_lock);
+ spin_unlock_irq(HostAdapter->host_lock);
kernel_thread((int (*)(void *))cpqfcTSWorkerThread,
(void *) HostAdapter, 0);
/*
@@ -250,7 +250,7 @@ static void launch_FCworker_thread(struct Scsi_Host *HostAdapter)
*/
down (&sem);
- spin_lock_irq(&HostAdapter->host_lock);
+ spin_lock_irq(HostAdapter->host_lock);
cpqfcHBAdata->notify_wt = NULL;
LEAVE("launch_FC_worker_thread");
@@ -407,7 +407,7 @@ int cpqfcTS_detect(Scsi_Host_Template *ScsiHostTemplate)
// start our kernel worker thread
- spin_lock_irq(&HostAdapter->host_lock);
+ spin_lock_irq(HostAdapter->host_lock);
launch_FCworker_thread(HostAdapter);
@@ -447,16 +447,16 @@ int cpqfcTS_detect(Scsi_Host_Template *ScsiHostTemplate)
unsigned long stop_time;
- spin_unlock_irq(&HostAdapter->host_lock);
+ spin_unlock_irq(HostAdapter->host_lock);
stop_time = jiffies + 4*HZ;
while ( time_before(jiffies, stop_time) )
schedule(); // (our worker task needs to run)
}
- spin_lock_irq(&HostAdapter->host_lock);
+ spin_lock_irq(HostAdapter->host_lock);
NumberOfAdapters++;
- spin_unlock_irq(&HostAdapter->host_lock);
+ spin_unlock_irq(HostAdapter->host_lock);
} // end of while()
}
@@ -1596,9 +1596,9 @@ int cpqfcTS_eh_device_reset(Scsi_Cmnd *Cmnd)
int retval;
Scsi_Device *SDpnt = Cmnd->device;
// printk(" ENTERING cpqfcTS_eh_device_reset() \n");
- spin_unlock_irq(&Cmnd->host->host_lock);
+ spin_unlock_irq(Cmnd->host->host_lock);
retval = cpqfcTS_TargetDeviceReset( SDpnt, 0);
- spin_lock_irq(&Cmnd->host->host_lock);
+ spin_lock_irq(Cmnd->host->host_lock);
return retval;
}
@@ -1653,7 +1653,7 @@ void cpqfcTS_intr_handler( int irq,
UCHAR IntPending;
ENTER("intr_handler");
- spin_lock_irqsave( &HostAdapter->host_lock, flags);
+ spin_lock_irqsave( HostAdapter->host_lock, flags);
// is this our INT?
IntPending = readb( cpqfcHBA->fcChip.Registers.INTPEND.address);
@@ -1702,7 +1702,7 @@ void cpqfcTS_intr_handler( int irq,
}
}
}
- spin_unlock_irqrestore( &HostAdapter->host_lock, flags);
+ spin_unlock_irqrestore( HostAdapter->host_lock, flags);
LEAVE("intr_handler");
}
diff --git a/drivers/scsi/cpqfcTSworker.c b/drivers/scsi/cpqfcTSworker.c
index 705f435b313b..e4ab3e5262fd 100644
--- a/drivers/scsi/cpqfcTSworker.c
+++ b/drivers/scsi/cpqfcTSworker.c
@@ -200,7 +200,7 @@ void cpqfcTSWorkerThread( void *host)
PCI_TRACE( 0x90)
// first, take the IO lock so the SCSI upper layers can't call
// into our _quecommand function (this also disables INTs)
- spin_lock_irqsave( &HostAdapter->host_lock, flags); // STOP _que function
+ spin_lock_irqsave( HostAdapter->host_lock, flags); // STOP _que function
PCI_TRACE( 0x90)
CPQ_SPINLOCK_HBA( cpqfcHBAdata)
@@ -214,7 +214,7 @@ void cpqfcTSWorkerThread( void *host)
PCI_TRACE( 0x90)
// release the IO lock (and re-enable interrupts)
- spin_unlock_irqrestore( &HostAdapter->host_lock, flags);
+ spin_unlock_irqrestore( HostAdapter->host_lock, flags);
// disable OUR HBA interrupt (keep them off as much as possible
// during error recovery)
@@ -3051,7 +3051,7 @@ void cpqfcTSheartbeat( unsigned long ptr )
goto Skip;
// STOP _que function
- spin_lock_irqsave( &cpqfcHBAdata->HostAdapter->host_lock, flags);
+ spin_lock_irqsave( cpqfcHBAdata->HostAdapter->host_lock, flags);
PCI_TRACE( 0xA8)
@@ -3059,7 +3059,7 @@ void cpqfcTSheartbeat( unsigned long ptr )
cpqfcHBAdata->BoardLock = &BoardLock; // stop Linux SCSI command queuing
// release the IO lock (and re-enable interrupts)
- spin_unlock_irqrestore( &cpqfcHBAdata->HostAdapter->host_lock, flags);
+ spin_unlock_irqrestore( cpqfcHBAdata->HostAdapter->host_lock, flags);
// Ensure no contention from _quecommand or Worker process
CPQ_SPINLOCK_HBA( cpqfcHBAdata)
diff --git a/drivers/scsi/cyberstorm.c b/drivers/scsi/cyberstorm.c
index 2fee344099dc..76f575edc601 100644
--- a/drivers/scsi/cyberstorm.c
+++ b/drivers/scsi/cyberstorm.c
@@ -136,7 +136,7 @@ int __init cyber_esp_detect(Scsi_Host_Template *tpnt)
esp->irq = IRQ_AMIGA_PORTS;
request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ,
- "CyberStorm SCSI", esp_intr);
+ "CyberStorm SCSI", esp->ehost);
/* Figure out our scsi ID on the bus */
/* The DMA cond flag contains a hardcoded jumper bit
* which can be used to select host number 6 or 7.
diff --git a/drivers/scsi/cyberstormII.c b/drivers/scsi/cyberstormII.c
index cfb0be918421..b44eb3ce32cd 100644
--- a/drivers/scsi/cyberstormII.c
+++ b/drivers/scsi/cyberstormII.c
@@ -130,7 +130,7 @@ int __init cyberII_esp_detect(Scsi_Host_Template *tpnt)
esp->irq = IRQ_AMIGA_PORTS;
request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ,
- "CyberStorm SCSI Mk II", esp_intr);
+ "CyberStorm SCSI Mk II", esp->ehost);
/* Figure out our scsi ID on the bus */
esp->scsi_id = 7;
diff --git a/drivers/scsi/dec_esp.c b/drivers/scsi/dec_esp.c
index 1106dab4ff2d..23eabb24f434 100644
--- a/drivers/scsi/dec_esp.c
+++ b/drivers/scsi/dec_esp.c
@@ -189,10 +189,10 @@ int dec_esp_detect(Scsi_Host_Template * tpnt)
esp_initialize(esp);
if (request_irq(esp->irq, esp_intr, SA_INTERRUPT,
- "NCR 53C94 SCSI", NULL))
+ "NCR 53C94 SCSI", esp->ehost))
goto err_dealloc;
if (request_irq(SCSI_DMA_INT, scsi_dma_int, SA_INTERRUPT,
- "JUNKIO SCSI DMA", NULL))
+ "JUNKIO SCSI DMA", esp->ehost))
goto err_free_irq;
}
@@ -253,7 +253,7 @@ int dec_esp_detect(Scsi_Host_Template * tpnt)
esp->dma_advance_sg = 0;
if (request_irq(esp->irq, esp_intr, SA_INTERRUPT,
- "PMAZ_AA", NULL)) {
+ "PMAZ_AA", esp->ehost)) {
esp_deallocate(esp);
release_tc_card(slot);
continue;
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index cdde0295cd3e..fc1c90a0ae4c 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -1158,12 +1158,12 @@ static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
timeout *= HZ;
if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irq(&pHba->host->host_lock);
+ spin_unlock_irq(pHba->host->host_lock);
if (!timeout)
schedule();
else
schedule_timeout(timeout*HZ);
- spin_lock_irq(&pHba->host->host_lock);
+ spin_lock_irq(pHba->host->host_lock);
}
wq_write_lock_irq(&adpt_wq_i2o_post.lock);
__remove_wait_queue(&adpt_wq_i2o_post, &wait);
@@ -1701,7 +1701,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32* arg)
}
do {
- spin_lock_irqsave(&pHba->host->host_lock, flags);
+ spin_lock_irqsave(pHba->host->host_lock, flags);
// This state stops any new commands from enterring the
// controller while processing the ioctl
// pHba->state |= DPTI_STATE_IOCTL;
@@ -1709,7 +1709,7 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32* arg)
// the queue empties and stops. We need a way to restart the queue
rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
// pHba->state &= ~DPTI_STATE_IOCTL;
- spin_unlock_irqrestore(&pHba->host->host_lock, flags);
+ spin_unlock_irqrestore(pHba->host->host_lock, flags);
} while(rcode == -ETIMEDOUT);
if(rcode){
@@ -1947,9 +1947,9 @@ static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
break;
}
case I2ORESETCMD:
- spin_lock_irqsave(&pHba->host->host_lock, flags);
+ spin_lock_irqsave(pHba->host->host_lock, flags);
adpt_hba_reset(pHba);
- spin_unlock_irqrestore(&pHba->host->host_lock, flags);
+ spin_unlock_irqrestore(pHba->host->host_lock, flags);
break;
case I2ORESCANCMD:
adpt_rescan(pHba);
@@ -1996,7 +1996,7 @@ static void adpt_isr(int irq, void *dev_id, struct pt_regs *regs)
printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
return;
}
- spin_lock_irqsave(&pHba->host->host_lock, flags);
+ spin_lock_irqsave(pHba->host->host_lock, flags);
while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
m = readl(pHba->reply_port);
if(m == EMPTY_QUEUE){
@@ -2061,7 +2061,7 @@ static void adpt_isr(int irq, void *dev_id, struct pt_regs *regs)
wmb();
rmb();
}
-out: spin_unlock_irqrestore(&pHba->host->host_lock, flags);
+out: spin_unlock_irqrestore(pHba->host->host_lock, flags);
}
static s32 adpt_scsi_to_i2o(adpt_hba* pHba, Scsi_Cmnd* cmd, struct adpt_device* d)
@@ -2334,13 +2334,13 @@ static s32 adpt_rescan(adpt_hba* pHba)
s32 rcode;
ulong flags;
- spin_lock_irqsave(&pHba->host->host_lock, flags);
+ spin_lock_irqsave(pHba->host->host_lock, flags);
if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
goto out;
if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
goto out;
rcode = 0;
-out: spin_unlock_irqrestore(&pHba->host->host_lock, flags);
+out: spin_unlock_irqrestore(pHba->host->host_lock, flags);
return rcode;
}
diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c
index b1f7abcff28c..b821b4c1de21 100644
--- a/drivers/scsi/dtc.c
+++ b/drivers/scsi/dtc.c
@@ -259,7 +259,7 @@ int __init dtc_detect(Scsi_Host_Template * tpnt){
/* With interrupts enabled, it will sometimes hang when doing heavy
* reads. So better not enable them until I finger it out. */
if (instance->irq != IRQ_NONE)
- if (request_irq(instance->irq, do_dtc_intr, SA_INTERRUPT, "dtc")) {
+ if (request_irq(instance->irq, do_dtc_intr, SA_INTERRUPT, "dtc", instance)) {
printk("scsi%d : IRQ%d not free, interrupts disabled\n",
instance->host_no, instance->irq);
instance->irq = IRQ_NONE;
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 519419267185..6b02eb9c97ec 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1712,10 +1712,10 @@ static inline int do_reset(Scsi_Cmnd *SCarg) {
HD(j)->in_reset = TRUE;
- spin_unlock_irq(&sh[j]->host_lock);
+ spin_unlock_irq(sh[j]->host_lock);
time = jiffies;
while ((jiffies - time) < (10 * HZ) && limit++ < 200000) udelay(100L);
- spin_lock_irq(&sh[j]->host_lock);
+ spin_lock_irq(sh[j]->host_lock);
printk("%s: reset, interrupts disabled, loops %d.\n", BN(j), limit);
@@ -2171,9 +2171,9 @@ static void do_interrupt_handler(int irq, void *shap, struct pt_regs *regs) {
/* Check if the interrupt must be processed by this handler */
if ((j = (unsigned int)((char *)shap - sha)) >= num_boards) return;
- spin_lock_irqsave(&sh[j]->host_lock, spin_flags);
+ spin_lock_irqsave(sh[j]->host_lock, spin_flags);
ihdlr(irq, j);
- spin_unlock_irqrestore(&sh[j]->host_lock, spin_flags);
+ spin_unlock_irqrestore(sh[j]->host_lock, spin_flags);
}
int eata2x_release(struct Scsi_Host *shpnt) {
diff --git a/drivers/scsi/eata_dma.c b/drivers/scsi/eata_dma.c
index 6133d6301b2a..ca72457de7af 100644
--- a/drivers/scsi/eata_dma.c
+++ b/drivers/scsi/eata_dma.c
@@ -229,10 +229,11 @@ void eata_int_handler(int, void *, struct pt_regs *);
void do_eata_int_handler(int irq, void *dev_id, struct pt_regs * regs)
{
unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock, flags);
+ struct Scsi_Host *dev = dev_id;
+
+ spin_lock_irqsave(dev->host_lock, flags);
eata_int_handler(irq, dev_id, regs);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
void eata_int_handler(int irq, void *dev_id, struct pt_regs * regs)
@@ -1503,7 +1504,7 @@ int eata_detect(Scsi_Host_Template * tpnt)
if (reg_IRQ[i] >= 1){ /* exchange the interrupt handler which */
free_irq(i, NULL); /* we used for probing with the real one */
request_irq(i, (void *)(do_eata_int_handler), SA_INTERRUPT|SA_SHIRQ,
- "eata_dma", NULL);
+ "eata_dma", first_HBA); /* Check it */
}
}
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 83b4234056c8..2860ec0e2edd 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -110,10 +110,11 @@ void eata_pio_int_handler(int irq, void *dev_id, struct pt_regs * regs);
void do_eata_pio_int_handler(int irq, void *dev_id, struct pt_regs * regs)
{
unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock, flags);
+ struct Scsi_Host *dev = dev_id;
+
+ spin_lock_irqsave(dev->host_lock, flags);
eata_pio_int_handler(irq, dev_id, regs);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
void eata_pio_int_handler(int irq, void *dev_id, struct pt_regs * regs)
@@ -704,36 +705,38 @@ int register_pio_HBA(long base, struct get_conf *gc, Scsi_Host_Template * tpnt)
return (FALSE);
}
+ request_region(base, 8, "eata_pio");
+
+ size = sizeof(hostdata) + (sizeof(struct eata_ccb) * ntohs(gc->queuesiz));
+
+ sh = scsi_register(tpnt, size);
+ if(sh == NULL)
+ {
+ release_region(base, 8);
+ return FALSE;
+ }
+
if (!reg_IRQ[gc->IRQ]) { /* Interrupt already registered ? */
if (!request_irq(gc->IRQ, do_eata_pio_int_handler, SA_INTERRUPT,
- "EATA-PIO", NULL)){
+ "EATA-PIO", sh)){
reg_IRQ[gc->IRQ]++;
if (!gc->IRQ_TR)
reg_IRQL[gc->IRQ] = TRUE; /* IRQ is edge triggered */
} else {
printk("Couldn't allocate IRQ %d, Sorry.\n", gc->IRQ);
+ release_region(base, 8);
return (FALSE);
}
} else { /* More than one HBA on this IRQ */
if (reg_IRQL[gc->IRQ] == TRUE) {
printk("Can't support more than one HBA on this IRQ,\n"
" if the IRQ is edge triggered. Sorry.\n");
+ release_region(base, 8);
return (FALSE);
} else
reg_IRQ[gc->IRQ]++;
}
- request_region(base, 8, "eata_pio");
-
- size = sizeof(hostdata) + (sizeof(struct eata_ccb) * ntohs(gc->queuesiz));
-
- sh = scsi_register(tpnt, size);
- if(sh == NULL)
- {
- release_region(base, 8);
- return FALSE;
- }
-
hd = SD(sh);
memset(hd->ccb, 0, (sizeof(struct eata_ccb) * ntohs(gc->queuesiz)));
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c
index b8dcd2ab89d6..37b3b448fc3f 100644
--- a/drivers/scsi/esp.c
+++ b/drivers/scsi/esp.c
@@ -813,7 +813,7 @@ static int __init esp_register_irq(struct esp *esp)
* sanely maintain.
*/
if (request_irq(esp->ehost->irq, esp_intr,
- SA_SHIRQ, "ESP SCSI", esp)) {
+ SA_SHIRQ, "ESP SCSI", esp->ehost)) {
printk("esp%d: Cannot acquire irq line\n",
esp->esp_id);
return -1;
diff --git a/drivers/scsi/fastlane.c b/drivers/scsi/fastlane.c
index 05c46cdf1ef1..35f9e8f424ac 100644
--- a/drivers/scsi/fastlane.c
+++ b/drivers/scsi/fastlane.c
@@ -167,7 +167,7 @@ int __init fastlane_esp_detect(Scsi_Host_Template *tpnt)
esp->irq = IRQ_AMIGA_PORTS;
esp->slot = board+FASTLANE_ESP_ADDR;
if (request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ,
- "Fastlane SCSI", esp_intr)) {
+ "Fastlane SCSI", esp->ehost)) {
printk(KERN_WARNING "Fastlane: Could not get IRQ%d, aborting.\n", IRQ_AMIGA_PORTS);
goto err_unmap;
}
diff --git a/drivers/scsi/fd_mcs.c b/drivers/scsi/fd_mcs.c
index 8332bf1f47c3..c41d92c3f08e 100644
--- a/drivers/scsi/fd_mcs.c
+++ b/drivers/scsi/fd_mcs.c
@@ -740,9 +740,9 @@ static void fd_mcs_intr( int irq, void *dev_id, struct pt_regs * regs )
#if EVERY_ACCESS
printk( " AFAIL " );
#endif
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(shpnt->host_lock, flags);
my_done( shpnt, DID_BUS_BUSY << 16 );
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
return;
}
current_SC->SCp.phase = in_selection;
@@ -766,9 +766,9 @@ static void fd_mcs_intr( int irq, void *dev_id, struct pt_regs * regs )
#if EVERY_ACCESS
printk( " SFAIL " );
#endif
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(shpnt->host_lock, flags);
my_done( shpnt, DID_NO_CONNECT << 16 );
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
return;
} else {
#if EVERY_ACCESS
@@ -1117,11 +1117,11 @@ static void fd_mcs_intr( int irq, void *dev_id, struct pt_regs * regs )
#if EVERY_ACCESS
printk( "BEFORE MY_DONE. . ." );
#endif
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(shpnt->host_lock, flags);
my_done( shpnt,
(current_SC->SCp.Status & 0xff)
| ((current_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16) );
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
#if EVERY_ACCESS
printk( "RETURNING.\n" );
#endif
@@ -1342,9 +1342,9 @@ int fd_mcs_abort( Scsi_Cmnd *SCpnt)
restore_flags( flags );
/* Aborts are not done well. . . */
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(shpnt->host_lock, flags);
my_done( shpnt, DID_ABORT << 16 );
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(shpnt->host_lock, flags);
return SCSI_ABORT_SUCCESS;
}
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index 61e3a19a6c10..f11acf771c1c 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -1266,9 +1266,9 @@ void do_fdomain_16x0_intr( int irq, void *dev_id, struct pt_regs * regs )
#if EVERY_ACCESS
printk( " AFAIL " );
#endif
- spin_lock_irqsave(&current_SC->host->host_lock, flags);
+ spin_lock_irqsave(current_SC->host->host_lock, flags);
my_done( DID_BUS_BUSY << 16 );
- spin_unlock_irqrestore(&current_SC->host->host_lock, flags);
+ spin_unlock_irqrestore(current_SC->host->host_lock, flags);
return;
}
current_SC->SCp.phase = in_selection;
@@ -1292,9 +1292,9 @@ void do_fdomain_16x0_intr( int irq, void *dev_id, struct pt_regs * regs )
#if EVERY_ACCESS
printk( " SFAIL " );
#endif
- spin_lock_irqsave(&current_SC->host->host_lock, flags);
+ spin_lock_irqsave(current_SC->host->host_lock, flags);
my_done( DID_NO_CONNECT << 16 );
- spin_unlock_irqrestore(&current_SC->host->host_lock, flags);
+ spin_unlock_irqrestore(current_SC->host->host_lock, flags);
return;
} else {
#if EVERY_ACCESS
@@ -1639,10 +1639,10 @@ void do_fdomain_16x0_intr( int irq, void *dev_id, struct pt_regs * regs )
#if EVERY_ACCESS
printk( "BEFORE MY_DONE. . ." );
#endif
- spin_lock_irqsave(&current_SC->host->host_lock, flags);
+ spin_lock_irqsave(current_SC->host->host_lock, flags);
my_done( (current_SC->SCp.Status & 0xff)
| ((current_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16) );
- spin_unlock_irqrestore(&current_SC->host->host_lock, flags);
+ spin_unlock_irqrestore(current_SC->host->host_lock, flags);
#if EVERY_ACCESS
printk( "RETURNING.\n" );
#endif
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 8040de54c488..28b8ea274682 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -629,19 +629,19 @@ static unchar gdth_direction_tab[0x100] = {
#define GDTH_LOCK_HA(ha,flags) spin_lock_irqsave(&(ha)->smp_lock,flags)
#define GDTH_UNLOCK_HA(ha,flags) spin_unlock_irqrestore(&(ha)->smp_lock,flags)
-#define GDTH_LOCK_SCSI_DONE(flags) spin_lock_irqsave(&io_request_lock,flags)
-#define GDTH_UNLOCK_SCSI_DONE(flags) spin_unlock_irqrestore(&io_request_lock,flags)
-#define GDTH_LOCK_SCSI_DOCMD() spin_lock_irq(&io_request_lock)
-#define GDTH_UNLOCK_SCSI_DOCMD() spin_unlock_irq(&io_request_lock)
+#define GDTH_LOCK_SCSI_DONE(dev, flags) spin_lock_irqsave(dev->host_lock,flags)
+#define GDTH_UNLOCK_SCSI_DONE(flags) spin_unlock_irqrestore(dev->host_lock,flags)
+#define GDTH_LOCK_SCSI_DOCMD(dev) spin_lock_irq(dev->host_lock)
+#define GDTH_UNLOCK_SCSI_DOCMD(dev) spin_unlock_irq(dev->host_lock)
#else
#define GDTH_INIT_LOCK_HA(ha) do {} while (0)
#define GDTH_LOCK_HA(ha,flags) do {save_flags(flags); cli();} while (0)
#define GDTH_UNLOCK_HA(ha,flags) do {restore_flags(flags);} while (0)
-#define GDTH_LOCK_SCSI_DONE(flags) do {} while (0)
-#define GDTH_UNLOCK_SCSI_DONE(flags) do {} while (0)
-#define GDTH_LOCK_SCSI_DOCMD() do {} while (0)
-#define GDTH_UNLOCK_SCSI_DOCMD() do {} while (0)
+#define GDTH_LOCK_SCSI_DONE(dev, flags) do {} while (0)
+#define GDTH_UNLOCK_SCSI_DONE(dev, flags) do {} while (0)
+#define GDTH_LOCK_SCSI_DOCMD(dev) do {} while (0)
+#define GDTH_UNLOCK_SCSI_DOCMD(dev) do {} while (0)
#endif
/* LILO and modprobe/insmod parameters */
@@ -3339,9 +3339,9 @@ static void gdth_interrupt(int irq,struct pt_regs *regs)
if (rval == 2) {
gdth_putq(hanum,scp,scp->SCp.this_residual);
} else if (rval == 1) {
- GDTH_LOCK_SCSI_DONE(flags);
+ GDTH_LOCK_SCSI_DONE(scp->host, flags);
scp->scsi_done(scp);
- GDTH_UNLOCK_SCSI_DONE(flags);
+ GDTH_UNLOCK_SCSI_DONE(scp->host,flags);
}
gdth_next(hanum);
}
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index eae6f5c31c79..0367cc228db9 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -40,9 +40,9 @@ static void gvp11_intr (int irq, void *dummy, struct pt_regs *fp)
if (!(status & GVP11_DMAC_INT_PENDING))
continue;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(instance->host_lock, flags);
wd33c93_intr (instance);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(instance->host_lock, flags);
}
}
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 43127b530fe7..0153b018ecc9 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -41,8 +41,8 @@
/* current version of this driver-source: */
#define IBMMCA_SCSI_DRIVER_VERSION "4.0b"
-#define IBMLOCK spin_lock_irqsave(&io_request_lock, flags);
-#define IBMUNLOCK spin_unlock_irqrestore(&io_request_lock, flags);
+#define IBMLOCK(dev) spin_lock_irqsave(dev->host_lock, flags);
+#define IBMUNLOCK(dev) spin_unlock_irqrestore(dev->host_lock, flags);
/* driver configuration */
#define IM_MAX_HOSTS 8 /* maximum number of host adapters */
@@ -505,14 +505,14 @@ static void interrupt_handler (int irq, void *dev_id, struct pt_regs *regs)
Scsi_Cmnd *cmd;
int lastSCSI;
- IBMLOCK
+ IBMLOCK(dev_id)
/* search for one adapter-response on shared interrupt */
for (host_index=0;
hosts[host_index] && !(inb(IM_STAT_REG(host_index)) & IM_INTR_REQUEST);
host_index++);
/* return if some other device on this IRQ caused the interrupt */
if (!hosts[host_index]) {
- IBMUNLOCK
+ IBMUNLOCK(dev_id)
return;
}
@@ -521,15 +521,15 @@ static void interrupt_handler (int irq, void *dev_id, struct pt_regs *regs)
if ((reset_status(host_index) == IM_RESET_NOT_IN_PROGRESS_NO_INT)||
(reset_status(host_index) == IM_RESET_FINISHED_OK_NO_INT)) {
reset_status(host_index) = IM_RESET_NOT_IN_PROGRESS;
- IBMUNLOCK
+ IBMUNLOCK(dev_id)
return;
}
/*must wait for attention reg not busy, then send EOI to subsystem */
while (1) {
if (!(inb (IM_STAT_REG(host_index)) & IM_BUSY)) break;
- IBMUNLOCK /* cycle interrupt */
- IBMLOCK
+ IBMUNLOCK(dev_id) /* cycle interrupt */
+ IBMLOCK(dev_id)
}
ihost_index=host_index;
/*get command result and logical device */
@@ -539,7 +539,7 @@ static void interrupt_handler (int irq, void *dev_id, struct pt_regs *regs)
/* get the last_scsi_command here */
lastSCSI = last_scsi_command(ihost_index)[ldn];
outb (IM_EOI | ldn, IM_ATTN_REG(ihost_index));
- IBMUNLOCK
+ IBMUNLOCK(dev_id)
/*these should never happen (hw fails, or a local programming bug) */
if (!global_command_error_excuse) {
switch (cmd_result) {
@@ -731,14 +731,14 @@ static void issue_cmd (int host_index, unsigned long cmd_reg,
unsigned long flags;
/* must wait for attention reg not busy */
while (1) {
- IBMLOCK
+ IBMLOCK(hosts[host_index])
if (!(inb(IM_STAT_REG(host_index)) & IM_BUSY)) break;
- IBMUNLOCK
+ IBMUNLOCK(hosts[host_index])
}
/* write registers and enable system interrupts */
outl (cmd_reg, IM_CMD_REG(host_index));
outb (attn_reg, IM_ATTN_REG(host_index));
- IBMUNLOCK
+ IBMUNLOCK(hosts[host_index])
return;
}
@@ -1442,7 +1442,7 @@ static int ibmmca_getinfo (char *buf, int slot, void *dev)
unsigned int pos[8];
unsigned long flags;
- IBMLOCK
+ IBMLOCK(dev)
shpnt = dev; /* assign host-structure to local pointer */
len = 0; /* set filled text-buffer index to 0 */
/* get the _special contents of the hostdata structure */
@@ -1496,7 +1496,7 @@ static int ibmmca_getinfo (char *buf, int slot, void *dev)
while ( len % sizeof( int ) != ( sizeof ( int ) - 1 ) )
len += sprintf (buf+len, " ");
len += sprintf (buf+len, "\n");
- IBMUNLOCK
+ IBMUNLOCK(shpnt)
return len;
}
@@ -2192,7 +2192,7 @@ int ibmmca_abort (Scsi_Cmnd * cmd)
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort subroutine called...\n");
#endif
- IBMLOCK
+ IBMLOCK(cmd->host)
shpnt = cmd->host;
/* search for the right hostadapter */
for (host_index = 0; hosts[host_index] && hosts[host_index]->host_no != shpnt->host_no; host_index++);
@@ -2201,7 +2201,7 @@ int ibmmca_abort (Scsi_Cmnd * cmd)
cmd->result = DID_NO_CONNECT << 16;
if (cmd->scsi_done) (cmd->scsi_done) (cmd);
shpnt = cmd->host;
- IBMUNLOCK
+ IBMUNLOCK(shpnt)
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort adapter selection failed!\n");
#endif
@@ -2224,7 +2224,7 @@ int ibmmca_abort (Scsi_Cmnd * cmd)
/*if cmd for this ldn has already finished, no need to abort */
if (!ld(host_index)[ldn].cmd) {
- IBMUNLOCK
+ IBMUNLOCK(shpnt)
return SCSI_ABORT_NOT_RUNNING;
}
@@ -2244,13 +2244,13 @@ int ibmmca_abort (Scsi_Cmnd * cmd)
while (1) {
if (!(inb (IM_STAT_REG(host_index)) & IM_BUSY))
break;
- IBMUNLOCK
- IBMLOCK
+ IBMUNLOCK(shpnt)
+ IBMLOCK(shpnt)
}
/* write registers and enable system interrupts */
outl (imm_command, IM_CMD_REG(host_index));
outb (IM_IMM_CMD | ldn, IM_ATTN_REG(host_index));
- IBMUNLOCK
+ IBMUNLOCK(shpnt)
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort queued to adapter...\n");
#endif
@@ -2262,21 +2262,21 @@ int ibmmca_abort (Scsi_Cmnd * cmd)
/*if abort went well, call saved done, then return success or error */
if (cmd->result == (DID_ABORT << 16)) {
- IBMLOCK
+ IBMLOCK(shpnt)
cmd->result |= DID_ABORT << 16;
if (cmd->scsi_done) (cmd->scsi_done) (cmd);
ld(host_index)[ldn].cmd = NULL;
- IBMUNLOCK
+ IBMUNLOCK(shpnt)
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort finished with success.\n");
#endif
return SCSI_ABORT_SUCCESS;
} else {
- IBMLOCK
+ IBMLOCK(shpnt)
cmd->result |= DID_NO_CONNECT << 16;
if (cmd->scsi_done) (cmd->scsi_done) (cmd);
ld(host_index)[ldn].cmd = NULL;
- IBMUNLOCK
+ IBMUNLOCK(shpnt)
#ifdef IM_DEBUG_PROBE
printk("IBM MCA SCSI: Abort failed.\n");
#endif
@@ -2297,7 +2297,7 @@ int ibmmca_reset (Scsi_Cmnd * cmd, unsigned int reset_flags)
printk("IBM MCA SCSI: Reset called with NULL-command!\n");
return(SCSI_RESET_SNOOZE);
}
- IBMLOCK
+ IBMLOCK(cmd->host)
ticks = IM_RESET_DELAY*HZ;
shpnt = cmd->host;
/* search for the right hostadapter */
@@ -2308,7 +2308,7 @@ int ibmmca_reset (Scsi_Cmnd * cmd, unsigned int reset_flags)
if (local_checking_phase_flag(host_index)) {
printk("IBM MCA SCSI: unable to reset while checking devices.\n");
- IBMUNLOCK
+ IBMUNLOCK(shpnt)
return SCSI_RESET_SNOOZE;
}
@@ -2324,8 +2324,8 @@ int ibmmca_reset (Scsi_Cmnd * cmd, unsigned int reset_flags)
while (1) {
if (!(inb (IM_STAT_REG(host_index)) & IM_BUSY))
break;
- IBMUNLOCK
- IBMLOCK
+ IBMUNLOCK(shpnt)
+ IBMLOCK(shpnt)
}
/*write registers and enable system interrupts */
outl (imm_command, IM_CMD_REG(host_index));
@@ -2342,7 +2342,7 @@ int ibmmca_reset (Scsi_Cmnd * cmd, unsigned int reset_flags)
printk("IBM MCA SCSI: reset did not complete within %d seconds.\n",
IM_RESET_DELAY);
reset_status(host_index) = IM_RESET_FINISHED_FAIL;
- IBMUNLOCK
+ IBMUNLOCK(shpnt)
return SCSI_RESET_ERROR;
}
@@ -2360,13 +2360,13 @@ int ibmmca_reset (Scsi_Cmnd * cmd, unsigned int reset_flags)
/* if reset failed, just return an error */
if (reset_status(host_index) == IM_RESET_FINISHED_FAIL) {
printk("IBM MCA SCSI: reset failed.\n");
- IBMUNLOCK
+ IBMUNLOCK(shpnt)
return SCSI_RESET_ERROR;
}
/* so reset finished ok - call outstanding done's, and return success */
printk ("IBM MCA SCSI: Reset successfully completed.\n");
- IBMUNLOCK
+ IBMUNLOCK(shpnt)
for (i = 0; i < MAX_LOG_DEV; i++) {
cmd_aid = ld(host_index)[i].cmd;
if (cmd_aid && cmd_aid->scsi_done) {
@@ -2454,8 +2454,8 @@ int ibmmca_proc_info (char *buffer, char **start, off_t offset, int length,
unsigned long flags;
int max_pun;
- IBMLOCK
for (i = 0; hosts[i] && hosts[i]->host_no != hostno; i++);
+ IBMLOCK(hosts[i]) /* Check it */
shpnt = hosts[i];
host_index = i;
if (!shpnt) {
@@ -2537,7 +2537,7 @@ int ibmmca_proc_info (char *buffer, char **start, off_t offset, int length,
*start = buffer + offset;
len -= offset;
if (len > length) len = length;
- IBMUNLOCK
+ IBMUNLOCK(shpnt)
return len;
}
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 933d02eb8ecc..525544d8a5a6 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -940,10 +940,10 @@ static void imm_interrupt(void *data)
if (cmd->SCp.phase > 0)
imm_pb_release(cmd->host->unique_id);
- spin_lock_irqsave(&host->host_lock, flags);
+ spin_lock_irqsave(host->host_lock, flags);
tmp->cur_cmd = 0;
cmd->scsi_done(cmd);
- spin_unlock_irqrestore(&host->host_lock, flags);
+ spin_unlock_irqrestore(host->host_lock, flags);
return;
}
diff --git a/drivers/scsi/in2000.h b/drivers/scsi/in2000.h
index ac72f6a29806..4f12b8fd5e94 100644
--- a/drivers/scsi/in2000.h
+++ b/drivers/scsi/in2000.h
@@ -393,8 +393,8 @@ struct IN2000_hostdata {
# define in2000__INITFUNC(function) __initfunc(function)
# define in2000__INIT __init
# define in2000__INITDATA __initdata
-# define CLISPIN_LOCK(host,flags) spin_lock_irqsave(&host->host_lock, flags)
-# define CLISPIN_UNLOCK(host,flags) spin_unlock_irqrestore(&host->host_lock, \
+# define CLISPIN_LOCK(host,flags) spin_lock_irqsave(host->host_lock, flags)
+# define CLISPIN_UNLOCK(host,flags) spin_unlock_irqrestore(host->host_lock, \
flags)
int in2000_detect(Scsi_Host_Template *) in2000__INIT;
diff --git a/drivers/scsi/ini9100u.c b/drivers/scsi/ini9100u.c
index 17ac962a89bd..1fba128f8e5b 100644
--- a/drivers/scsi/ini9100u.c
+++ b/drivers/scsi/ini9100u.c
@@ -699,113 +699,121 @@ static void i91uSCBPost(BYTE * pHcb, BYTE * pScb)
static void i91u_intr0(int irqno, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
+ struct Scsi_Host *dev = dev_id;
+
if (tul_hcs[0].HCS_Intr != irqno)
return;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
tul_isr(&tul_hcs[0]);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
static void i91u_intr1(int irqno, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
+ struct Scsi_Host *dev = dev_id;
+
if (tul_hcs[1].HCS_Intr != irqno)
return;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
tul_isr(&tul_hcs[1]);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
static void i91u_intr2(int irqno, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
+ struct Scsi_Host *dev = dev_id;
+
if (tul_hcs[2].HCS_Intr != irqno)
return;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
tul_isr(&tul_hcs[2]);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
static void i91u_intr3(int irqno, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
+ struct Scsi_Host *dev = dev_id;
+
if (tul_hcs[3].HCS_Intr != irqno)
return;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
tul_isr(&tul_hcs[3]);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
static void i91u_intr4(int irqno, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
+ struct Scsi_Host *dev = dev_id;
+
if (tul_hcs[4].HCS_Intr != irqno)
return;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
tul_isr(&tul_hcs[4]);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
static void i91u_intr5(int irqno, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
+ struct Scsi_Host *dev = dev_id;
+
if (tul_hcs[5].HCS_Intr != irqno)
return;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
tul_isr(&tul_hcs[5]);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
static void i91u_intr6(int irqno, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
+ struct Scsi_Host *dev = dev_id;
+
if (tul_hcs[6].HCS_Intr != irqno)
return;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
tul_isr(&tul_hcs[6]);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
static void i91u_intr7(int irqno, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
+ struct Scsi_Host *dev = dev_id;
+
if (tul_hcs[7].HCS_Intr != irqno)
return;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
tul_isr(&tul_hcs[7]);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
/*
diff --git a/drivers/scsi/inia100.c b/drivers/scsi/inia100.c
index 47cf2b4dfb1f..82ffd13ac384 100644
--- a/drivers/scsi/inia100.c
+++ b/drivers/scsi/inia100.c
@@ -717,19 +717,19 @@ int inia100_biosparam(Scsi_Disk * disk, kdev_t dev, int *info_array)
}
-static void subIntr(ORC_HCS * pHCB, int irqno)
+static void subIntr(ORC_HCS * pHCB, int irqno, struct Scsi_Host *dev)
{
unsigned long flags;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
if (pHCB->HCS_Intr != irqno) {
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
return;
}
orc_interrupt(pHCB);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
/*
@@ -737,42 +737,42 @@ static void subIntr(ORC_HCS * pHCB, int irqno)
*/
static void inia100_intr0(int irqno, void *dev_id, struct pt_regs *regs)
{
- subIntr(&orc_hcs[0], irqno);
+ subIntr(&orc_hcs[0], irqno, dev_id);
}
static void inia100_intr1(int irqno, void *dev_id, struct pt_regs *regs)
{
- subIntr(&orc_hcs[1], irqno);
+ subIntr(&orc_hcs[1], irqno, dev_id);
}
static void inia100_intr2(int irqno, void *dev_id, struct pt_regs *regs)
{
- subIntr(&orc_hcs[2], irqno);
+ subIntr(&orc_hcs[2], irqno, dev_id);
}
static void inia100_intr3(int irqno, void *dev_id, struct pt_regs *regs)
{
- subIntr(&orc_hcs[3], irqno);
+ subIntr(&orc_hcs[3], irqno, dev_id);
}
static void inia100_intr4(int irqno, void *dev_id, struct pt_regs *regs)
{
- subIntr(&orc_hcs[4], irqno);
+ subIntr(&orc_hcs[4], irqno, dev_id);
}
static void inia100_intr5(int irqno, void *dev_id, struct pt_regs *regs)
{
- subIntr(&orc_hcs[5], irqno);
+ subIntr(&orc_hcs[5], irqno, dev_id);
}
static void inia100_intr6(int irqno, void *dev_id, struct pt_regs *regs)
{
- subIntr(&orc_hcs[6], irqno);
+ subIntr(&orc_hcs[6], irqno, dev_id);
}
static void inia100_intr7(int irqno, void *dev_id, struct pt_regs *regs)
{
- subIntr(&orc_hcs[7], irqno);
+ subIntr(&orc_hcs[7], irqno, dev_id);
}
/*
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index a929633a2a07..258af5f19516 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -1769,13 +1769,13 @@ ips_queue(Scsi_Cmnd *SC, void (*done) (Scsi_Cmnd *)) {
char *kern_area;
u_int32_t datasize;
- spin_unlock_irq(&SC->host->host_lock);
+ spin_unlock_irq(SC->host->host_lock);
/* wait for the command to finish */
down(&ha->ioctl_sem);
/* reobtain the lock */
- spin_lock_irq(&SC->host->host_lock);
+ spin_lock_irq(SC->host->host_lock);
/* command finished -- copy back */
user_area = *((char **) &SC->cmnd[4]);
@@ -1916,24 +1916,24 @@ do_ipsintr(int irq, void *dev_id, struct pt_regs *regs) {
METHOD_TRACE("do_ipsintr", 2);
- spin_lock_irqsave(&host->host_lock, cpu_flags);
+ spin_lock_irqsave(host->host_lock, cpu_flags);
if (test_and_set_bit(IPS_IN_INTR, &ha->flags)) {
- spin_unlock_irqrestore(&host->host_lock, cpu_flags);
+ spin_unlock_irqrestore(host->host_lock, cpu_flags);
return ;
}
if (!ha) {
clear_bit(IPS_IN_INTR, &ha->flags);
- spin_unlock_irqrestore(&host->host_lock, cpu_flags);
+ spin_unlock_irqrestore(host->host_lock, cpu_flags);
return;
}
if (!ha->active) {
clear_bit(IPS_IN_INTR, &ha->flags);
- spin_unlock_irqrestore(&host->host_lock, cpu_flags);
+ spin_unlock_irqrestore(host->host_lock, cpu_flags);
return;
}
@@ -1942,7 +1942,7 @@ do_ipsintr(int irq, void *dev_id, struct pt_regs *regs) {
clear_bit(IPS_IN_INTR, &ha->flags);
- spin_unlock_irqrestore(&host->host_lock, cpu_flags);
+ spin_unlock_irqrestore(host->host_lock, cpu_flags);
/* start the next command */
ips_next(ha, IPS_INTR_ON);
@@ -2487,7 +2487,7 @@ ips_make_passthru(ips_ha_t *ha, Scsi_Cmnd *SC, ips_scb_t *scb, int intr) {
task.data = (void *) &flash_data;
/* Unlock the per-board lock */
- spin_unlock_irq(&SC->host->host_lock);
+ spin_unlock_irq(SC->host->host_lock);
queue_task(&task, &tq_immediate);
mark_bh(IMMEDIATE_BH);
@@ -2496,7 +2496,7 @@ ips_make_passthru(ips_ha_t *ha, Scsi_Cmnd *SC, ips_scb_t *scb, int intr) {
down(&ha->flash_ioctl_sem);
/* Obtain the per-board lock */
- spin_lock_irq(&SC->host->host_lock);
+ spin_lock_irq(SC->host->host_lock);
return (flash_data.retcode);
}
@@ -2604,7 +2604,7 @@ ips_make_passthru(ips_ha_t *ha, Scsi_Cmnd *SC, ips_scb_t *scb, int intr) {
task.data = (void *) &flash_data;
/* Unlock the per-board lock */
- spin_unlock_irq(&SC->host->host_lock);
+ spin_unlock_irq(SC->host->host_lock);
queue_task(&task, &tq_immediate);
mark_bh(IMMEDIATE_BH);
@@ -2613,7 +2613,7 @@ ips_make_passthru(ips_ha_t *ha, Scsi_Cmnd *SC, ips_scb_t *scb, int intr) {
down(&ha->flash_ioctl_sem);
/* Obtain the per-board lock */
- spin_lock_irq(&SC->host->host_lock);
+ spin_lock_irq(SC->host->host_lock);
return (flash_data.retcode);
}
@@ -3585,7 +3585,7 @@ ips_next(ips_ha_t *ha, int intr) {
* this command won't time out
*/
if (intr == IPS_INTR_ON) {
- spin_lock_irqsave(&host->host_lock, cpu_flags2);
+ spin_lock_irqsave(host->host_lock, cpu_flags2);
intr_status = IPS_INTR_IORL;
} else {
intr_status = intr;
@@ -3610,7 +3610,7 @@ ips_next(ips_ha_t *ha, int intr) {
}
if (intr == IPS_INTR_ON)
- spin_unlock_irqrestore(&host->host_lock, cpu_flags2);
+ spin_unlock_irqrestore(host->host_lock, cpu_flags2);
#ifndef NO_IPS_CMDLINE
/*
@@ -6625,7 +6625,7 @@ ips_wait(ips_ha_t *ha, int time, int intr) {
* We were called under the HA lock so we can assume that interrupts
* are masked.
*/
- spin_lock(&host->host_lock);
+ spin_lock(host->host_lock);
while (test_and_set_bit(IPS_IN_INTR, &ha->flags))
udelay(1000);
@@ -6634,7 +6634,7 @@ ips_wait(ips_ha_t *ha, int time, int intr) {
clear_bit(IPS_IN_INTR, &ha->flags);
- spin_unlock(&host->host_lock);
+ spin_unlock(host->host_lock);
}
udelay(1000); /* 1 milisecond */
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 3eee69724bc7..cca7a81e94c3 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -120,7 +120,7 @@ int jazz_esp_detect(Scsi_Host_Template *tpnt)
esp->irq = JAZZ_SCSI_IRQ;
request_irq(JAZZ_SCSI_IRQ, esp_intr, SA_INTERRUPT, "JAZZ SCSI",
- NULL);
+ esp->ehost);
/*
* FIXME, look if the scsi id is availabe from NVRAM
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index d57ce9d6814a..068b426b692f 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -292,10 +292,11 @@ static void
do_mac53c94_interrupt(int irq, void *dev_id, struct pt_regs *ptregs)
{
unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock, flags);
+ struct Scsi_Host *dev = ((struct fsc_state *) dev_id)->current_req->host;
+
+ spin_lock_irqsave(dev->host_lock, flags);
mac53c94_interrupt(irq, dev_id, ptregs);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
static void
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 739cbddc97a4..e94f828cd8b6 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -417,9 +417,9 @@ int mac_esp_detect(Scsi_Host_Template * tpnt)
esp->irq = IRQ_MAC_SCSI;
- request_irq(IRQ_MAC_SCSI, esp_intr, 0, "Mac ESP SCSI", esp);
+ request_irq(IRQ_MAC_SCSI, esp_intr, 0, "Mac ESP SCSI", esp->ehost);
#if 0 /* conflicts with IOP ADB */
- request_irq(IRQ_MAC_SCSIDRQ, fake_drq, 0, "Mac ESP DRQ", esp);
+ request_irq(IRQ_MAC_SCSIDRQ, fake_drq, 0, "Mac ESP DRQ", esp->ehost);
#endif
if (macintosh_config->scsi_type == MAC_SCSI_QUADRA) {
@@ -433,7 +433,7 @@ int mac_esp_detect(Scsi_Host_Template * tpnt)
esp->irq = IRQ_MAC_SCSIDRQ;
#if 0 /* conflicts with IOP ADB */
- request_irq(IRQ_MAC_SCSIDRQ, esp_intr, 0, "Mac ESP SCSI 2", esp);
+ request_irq(IRQ_MAC_SCSIDRQ, esp_intr, 0, "Mac ESP SCSI 2", esp->ehost);
#endif
esp->cfreq = 25000000;
diff --git a/drivers/scsi/mca_53c9x.c b/drivers/scsi/mca_53c9x.c
index 1ceb651fce39..d6d0d39da3ec 100644
--- a/drivers/scsi/mca_53c9x.c
+++ b/drivers/scsi/mca_53c9x.c
@@ -153,7 +153,7 @@ int mca_esp_detect(Scsi_Host_Template *tpnt)
esp->slot = slot;
if (request_irq(esp->irq, esp_intr, 0,
- "NCR 53c9x SCSI", esp_intr))
+ "NCR 53c9x SCSI", esp->ehost))
{
printk("Unable to request IRQ %d.\n", esp->irq);
esp_deallocate(esp);
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index ddd0bcd0a2c3..05aa504ff3d8 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -586,10 +586,10 @@ MODULE_LICENSE ("GPL");
#define DRIVER_LOCK(p)
#define DRIVER_UNLOCK(p)
#define IO_LOCK_T unsigned long io_flags = 0
-#define IO_LOCK(host) spin_lock_irqsave(&(host)->host_lock,io_flags)
-#define IO_UNLOCK(host) spin_unlock_irqrestore(&(host)->host_lock,io_flags)
-#define IO_LOCK_IRQ(host) spin_lock_irq(&(host)->host_lock)
-#define IO_UNLOCK_IRQ(host) spin_unlock_irq(&(host)->host_lock)
+#define IO_LOCK(host) spin_lock_irqsave(host->host_lock,io_flags)
+#define IO_UNLOCK(host) spin_unlock_irqrestore(host->host_lock,io_flags)
+#define IO_LOCK_IRQ(host) spin_lock_irq(host->host_lock)
+#define IO_UNLOCK_IRQ(host) spin_unlock_irq(host->host_lock)
#define queue_task_irq(a,b) queue_task(a,b)
#define queue_task_irq_off(a,b) queue_task(a,b)
@@ -614,8 +614,8 @@ MODULE_DESCRIPTION ("LSI Logic MegaRAID driver");
#define DRIVER_LOCK(p)
#define DRIVER_UNLOCK(p)
#define IO_LOCK_T unsigned long io_flags = 0
-#define IO_LOCK(host) spin_lock_irqsave(&io_request_lock,io_flags);
-#define IO_UNLOCK(host) spin_unlock_irqrestore(&io_request_lock,io_flags);
+#define IO_LOCK(host) spin_lock_irqsave(host->host_lock,io_flags);
+#define IO_UNLOCK(host) spin_unlock_irqrestore(host->host_lock,io_flags);
#define pci_free_consistent(a,b,c,d)
#define pci_unmap_single(a,b,c,d)
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index a8d363eb9fd7..48792431fd27 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -795,15 +795,15 @@ finish_cmds(void *data)
unsigned long flags;
for (;;) {
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(ms->host->host_lock, flags);
cmd = ms->completed_q;
if (cmd == NULL) {
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(ms->host->host_lock, flags);
break;
}
ms->completed_q = (Scsi_Cmnd *) cmd->host_scribble;
(*cmd->scsi_done)(cmd);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(ms->host->host_lock, flags);
}
}
#endif /* MESH_NEW_STYLE_EH */
@@ -1458,10 +1458,11 @@ static void
do_mesh_interrupt(int irq, void *dev_id, struct pt_regs *ptregs)
{
unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock, flags);
+ struct Scsi_Host *dev = ((struct mech_state *)dev_id)->host;
+
+ spin_lock_irqsave(dev->host_lock, flags);
mesh_interrupt(irq, dev_id, ptregs);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
static void handle_error(struct mesh_state *ms)
diff --git a/drivers/scsi/oktagon_esp.c b/drivers/scsi/oktagon_esp.c
index e77705b26147..61d7032e05d0 100644
--- a/drivers/scsi/oktagon_esp.c
+++ b/drivers/scsi/oktagon_esp.c
@@ -198,7 +198,7 @@ int oktagon_esp_detect(Scsi_Host_Template *tpnt)
esp->irq = IRQ_AMIGA_PORTS;
request_irq(IRQ_AMIGA_PORTS, esp_intr, SA_SHIRQ,
- "BSC Oktagon SCSI", esp_intr);
+ "BSC Oktagon SCSI", esp->ehost);
/* Figure out our scsi ID on the bus */
esp->scsi_id = 7;
diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c
index 83e1e5934bfa..a0d3ae9c70f3 100644
--- a/drivers/scsi/pas16.c
+++ b/drivers/scsi/pas16.c
@@ -451,7 +451,7 @@ int __init pas16_detect(Scsi_Host_Template * tpnt)
instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS);
if (instance->irq != IRQ_NONE)
- if (request_irq(instance->irq, do_pas16_intr, SA_INTERRUPT, "pas16", NULL)) {
+ if (request_irq(instance->irq, do_pas16_intr, SA_INTERRUPT, "pas16", instance)) {
printk("scsi%d : IRQ%d not free, interrupts disabled\n",
instance->host_no, instance->irq);
instance->irq = IRQ_NONE;
diff --git a/drivers/scsi/pci2000.c b/drivers/scsi/pci2000.c
index 2c9b1d44d559..0e86e7362f90 100644
--- a/drivers/scsi/pci2000.c
+++ b/drivers/scsi/pci2000.c
@@ -296,7 +296,7 @@ static void Irq_Handler (int irq, void *dev_id, struct pt_regs *regs)
goto out;
}
- spin_lock_irqsave(&shost->host_lock, flags);
+ spin_lock_irqsave(shost->host_lock, flags);
padapter = HOSTDATA(shost);
tag0 = tag & 0x7F; // mask off the error bit
diff --git a/drivers/scsi/pci2220i.c b/drivers/scsi/pci2220i.c
index f96194aaeaf0..27607cd040d1 100644
--- a/drivers/scsi/pci2220i.c
+++ b/drivers/scsi/pci2220i.c
@@ -1162,7 +1162,7 @@ static void TimerExpiry (unsigned long data)
* Disable interrupts, if they aren't already disabled and acquire
* the I/O spinlock.
*/
- spin_lock_irqsave (&host->host_lock, flags);
+ spin_lock_irqsave (host->host_lock, flags);
DEB (printk ("\nPCI2220I: Timeout expired "));
if ( padapter->failinprog )
@@ -1296,7 +1296,7 @@ timerExpiryDone:;
* which will enable interrupts if and only if they were
* enabled on entry.
*/
- spin_unlock_irqrestore (&host->host_lock, flags);
+ spin_unlock_irqrestore (host->host_lock, flags);
}
/****************************************************************
* Name: SetReconstruct :LOCAL
@@ -1344,7 +1344,7 @@ static void ReconTimerExpiry (unsigned long data)
* Disable interrupts, if they aren't already disabled and acquire
* the I/O spinlock.
*/
- spin_lock_irqsave(&host->host_lock, flags);
+ spin_lock_irqsave(host->host_lock, flags);
if ( padapter->SCpnt )
goto reconTimerExpiry;
@@ -1568,7 +1568,7 @@ reconTimerExpiry:;
* which will enable interrupts if and only if they were
* enabled on entry.
*/
- spin_unlock_irqrestore(&host->host_lock, flags);
+ spin_unlock_irqrestore(host->host_lock, flags);
}
/****************************************************************
* Name: Irq_Handler :LOCAL
@@ -1617,7 +1617,7 @@ static void Irq_Handler (int irq, void *dev_id, struct pt_regs *regs)
goto out;
}
- spin_lock_irqsave(&shost->host_lock, flags);
+ spin_lock_irqsave(shost->host_lock, flags);
padapter = HOSTDATA(shost);
pdev = padapter->pdev;
SCpnt = padapter->SCpnt;
@@ -2020,7 +2020,7 @@ static void Irq_Handler (int irq, void *dev_id, struct pt_regs *regs)
OpDone (padapter, zl);
irq_return:
- spin_unlock_irqrestore(&shost->host_lock, flags);
+ spin_unlock_irqrestore(shost->host_lock, flags);
out:;
}
/****************************************************************
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 862b18219f90..27fe3de505b5 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -119,7 +119,6 @@ int ppa_detect(Scsi_Host_Template * host)
* unlock to allow the lowlevel parport driver to probe
* the irqs
*/
- spin_unlock_irq(&io_request_lock);
pb = parport_enumerate();
printk("ppa: Version %s\n", PPA_VERSION);
@@ -128,7 +127,6 @@ int ppa_detect(Scsi_Host_Template * host)
if (!pb) {
printk("ppa: parport reports no devices.\n");
- spin_lock_irq(&io_request_lock);
return 0;
}
retry_entry:
@@ -154,7 +152,7 @@ int ppa_detect(Scsi_Host_Template * host)
"pardevice is owning the port for too longtime!\n",
i);
parport_unregister_device(ppa_hosts[i].dev);
- spin_lock_irq(&io_request_lock);
+ spin_lock_irq(ppa_hosts[i].cur_cmd->host->host_lock);
return 0;
}
}
@@ -223,13 +221,13 @@ int ppa_detect(Scsi_Host_Template * host)
printk(" supported by the imm (ZIP Plus) driver. If the\n");
printk(" cable is marked with \"AutoDetect\", this is what has\n");
printk(" happened.\n");
- spin_lock_irq(&io_request_lock);
+ spin_lock_irq(hreg->host_lock);
return 0;
}
try_again = 1;
goto retry_entry;
} else {
- spin_lock_irq(&io_request_lock);
+ spin_lock_irq(hreg->host_lock);
return 1; /* return number of hosts detected */
}
}
@@ -847,9 +845,9 @@ static void ppa_interrupt(void *data)
tmp->cur_cmd = 0;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(cmd->host->host_lock, flags);
cmd->scsi_done(cmd);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(cmd->host->host_lock, flags);
return;
}
diff --git a/drivers/scsi/psi240i.c b/drivers/scsi/psi240i.c
index f4110a0f3783..8ea40aab26cc 100644
--- a/drivers/scsi/psi240i.c
+++ b/drivers/scsi/psi240i.c
@@ -370,10 +370,11 @@ irqerror:;
static void do_Irq_Handler (int irq, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock, flags);
+ struct Scsi_Host *dev = dev_id;
+
+ spin_lock_irqsave(dev->host_lock, flags);
Irq_Handler(irq, dev_id, regs);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
/****************************************************************
* Name: Psi240i_QueueCommand
@@ -602,7 +603,7 @@ int Psi240i_Detect (Scsi_Host_Template *tpnt)
save_flags (flags);
cli ();
- if ( request_irq (chipConfig.irq, do_Irq_Handler, 0, "psi240i", NULL) )
+ if ( request_irq (chipConfig.irq, do_Irq_Handler, 0, "psi240i", pshost) )
{
printk ("Unable to allocate IRQ for PSI-240I controller.\n");
restore_flags (flags);
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index fd37c410efa9..d1ceb1d5193d 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1523,11 +1523,11 @@ void qla1280_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
return;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,1,95)
- spin_lock_irqsave(&io_request_lock, cpu_flags);
+ spin_lock_irqsave(ha->host->host_lock, cpu_flags);
if(test_and_set_bit(QLA1280_IN_ISR_BIT, &ha->flags))
{
COMTRACE('X')
- spin_unlock_irqrestore(&io_request_lock, cpu_flags);
+ spin_unlock_irqrestore(ha->host->host_lock, cpu_flags);
return;
}
ha->isr_count++;
@@ -1548,7 +1548,7 @@ void qla1280_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
qla1280_done(ha, (srb_t **)&ha->done_q_first, (srb_t **)&ha->done_q_last);
clear_bit(QLA1280_IN_ISR_BIT, &ha->flags);
- spin_unlock_irqrestore(&io_request_lock, cpu_flags);
+ spin_unlock_irqrestore(ha->host->host_lock, cpu_flags);
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2,1,95) */
if( test_bit(QLA1280_IN_ISR_BIT, (int *)&ha->flags) )
@@ -1619,7 +1619,7 @@ static void qla1280_do_dpc(void *p)
COMTRACE('p')
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,95)
- spin_lock_irqsave(&io_request_lock, cpu_flags);
+ spin_lock_irqsave(ha->host->host_lock, cpu_flags);
#endif
if (ha->flags.isp_abort_needed)
qla1280_abort_isp(ha);
@@ -1631,7 +1631,7 @@ static void qla1280_do_dpc(void *p)
qla1280_done(ha, (srb_t **)&ha->done_q_first, (srb_t **)&ha->done_q_last);
ha->flags.dpc_sched = FALSE;
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,1,95)
- spin_unlock_irqrestore(&io_request_lock, cpu_flags);
+ spin_unlock_irqrestore(ha->host->host_lock, cpu_flags);
#endif
}
diff --git a/drivers/scsi/qlogicfas.c b/drivers/scsi/qlogicfas.c
index df017785096c..8a0276a319bc 100644
--- a/drivers/scsi/qlogicfas.c
+++ b/drivers/scsi/qlogicfas.c
@@ -469,9 +469,9 @@ static void do_ql_ihandl(int irq, void *dev_id, struct pt_regs * regs)
unsigned long flags;
struct Scsi_Host *host = dev_id;
- spin_lock_irqsave(&host->host_lock, flags);
+ spin_lock_irqsave(host->host_lock, flags);
ql_ihandl(irq, dev_id, regs);
- spin_unlock_irqrestore(&host->host_lock, flags);
+ spin_unlock_irqrestore(host->host_lock, flags);
}
#endif
diff --git a/drivers/scsi/seagate.c b/drivers/scsi/seagate.c
index 2cf059b14403..761dfc562709 100644
--- a/drivers/scsi/seagate.c
+++ b/drivers/scsi/seagate.c
@@ -503,7 +503,7 @@ int __init seagate_st0x_detect (Scsi_Host_Template * tpnt)
hostno = instance->host_no;
if (request_irq (irq, do_seagate_reconnect_intr, SA_INTERRUPT,
(controller_type == SEAGATE) ? "seagate" : "tmc-8xx",
- NULL)) {
+ instance)) {
printk ("scsi%d : unable to allocate IRQ%d\n", hostno, irq);
return 0;
}
@@ -629,10 +629,11 @@ static int should_reconnect = 0;
static void do_seagate_reconnect_intr (int irq, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
- spin_lock_irqsave (&io_request_lock, flags);
+ struct Scsi_Host *dev = dev_id;
+
+ spin_lock_irqsave (dev->host_lock, flags);
seagate_reconnect_intr (irq, dev_id, regs);
- spin_unlock_irqrestore (&io_request_lock, flags);
+ spin_unlock_irqrestore (dev->host_lock, flags);
}
static void seagate_reconnect_intr (int irq, void *dev_id, struct pt_regs *regs)
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index a7b441d38447..aaffa09a5bf6 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -66,10 +66,11 @@ static inline unsigned long read_wd33c93_count(wd33c93_regs *regp)
static void sgiwd93_intr(int irq, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock, flags);
+ struct Scsi_Host *dev = dev_id;
+
+ spin_lock_irqsave(dev->host_lock, flags);
wd33c93_intr((struct Scsi_Host *) dev_id);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
#undef DEBUG_DMA
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index b4eeaeb2e816..e3be22e06095 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -907,9 +907,9 @@ do_sim710_intr_handle(int irq, void *dev_id, struct pt_regs *regs)
struct Scsi_Host *host = dev_id;
unsigned long flags;
- spin_lock_irqsave(&host->host_lock, flags);
+ spin_lock_irqsave(host->host_lock, flags);
sim710_intr_handle(irq, host, regs);
- spin_unlock_irqrestore(&host->host_lock, flags);
+ spin_unlock_irqrestore(host->host_lock, flags);
}
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index d6bad78edb00..7afe0407e245 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -112,7 +112,7 @@ int sun3x_esp_detect(Scsi_Host_Template *tpnt)
esp->irq = 2;
if (request_irq(esp->irq, esp_intr, SA_INTERRUPT,
- "SUN3X SCSI", NULL)) {
+ "SUN3X SCSI", esp->ehost)) {
esp_deallocate(esp);
return 0;
}
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
index 07b487d8383b..2ad532933c49 100644
--- a/drivers/scsi/sym53c416.c
+++ b/drivers/scsi/sym53c416.c
@@ -337,6 +337,7 @@ static __inline__ unsigned int sym53c416_write(int base, unsigned char *buffer,
static void sym53c416_intr_handle(int irq, void *dev_id, struct pt_regs *regs)
{
+ struct Scsi_Host *dev = dev_id;
int base = 0;
int i;
unsigned long flags = 0;
@@ -359,11 +360,11 @@ static void sym53c416_intr_handle(int irq, void *dev_id, struct pt_regs *regs)
}
/* Now we have the base address and we can start handling the interrupt */
- spin_lock_irqsave(&io_request_lock,flags);
+ spin_lock_irqsave(dev->host_lock,flags);
status_reg = inb(base + STATUS_REG);
pio_int_reg = inb(base + PIO_INT_REG);
int_reg = inb(base + INT_REG);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
/* First, we handle error conditions */
if(int_reg & SCI) /* SCSI Reset */
@@ -371,9 +372,9 @@ static void sym53c416_intr_handle(int irq, void *dev_id, struct pt_regs *regs)
printk(KERN_DEBUG "sym53c416: Reset received\n");
current_command->SCp.phase = idle;
current_command->result = DID_RESET << 16;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
current_command->scsi_done(current_command);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
return;
}
if(int_reg & ILCMD) /* Illegal Command */
@@ -381,9 +382,9 @@ static void sym53c416_intr_handle(int irq, void *dev_id, struct pt_regs *regs)
printk(KERN_WARNING "sym53c416: Illegal Command: 0x%02x.\n", inb(base + COMMAND_REG));
current_command->SCp.phase = idle;
current_command->result = DID_ERROR << 16;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
current_command->scsi_done(current_command);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
return;
}
if(status_reg & GE) /* Gross Error */
@@ -391,9 +392,9 @@ static void sym53c416_intr_handle(int irq, void *dev_id, struct pt_regs *regs)
printk(KERN_WARNING "sym53c416: Controller reports gross error.\n");
current_command->SCp.phase = idle;
current_command->result = DID_ERROR << 16;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
current_command->scsi_done(current_command);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
return;
}
if(status_reg & PE) /* Parity Error */
@@ -401,9 +402,9 @@ static void sym53c416_intr_handle(int irq, void *dev_id, struct pt_regs *regs)
printk(KERN_WARNING "sym53c416:SCSI parity error.\n");
current_command->SCp.phase = idle;
current_command->result = DID_PARITY << 16;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
current_command->scsi_done(current_command);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
return;
}
if(pio_int_reg & (CE | OUE))
@@ -411,9 +412,9 @@ static void sym53c416_intr_handle(int irq, void *dev_id, struct pt_regs *regs)
printk(KERN_WARNING "sym53c416: PIO interrupt error.\n");
current_command->SCp.phase = idle;
current_command->result = DID_ERROR << 16;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
current_command->scsi_done(current_command);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
return;
}
if(int_reg & DIS) /* Disconnect */
@@ -423,9 +424,9 @@ static void sym53c416_intr_handle(int irq, void *dev_id, struct pt_regs *regs)
else
current_command->result = (current_command->SCp.Status & 0xFF) | ((current_command->SCp.Message & 0xFF) << 8) | (DID_OK << 16);
current_command->SCp.phase = idle;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(dev->host_lock, flags);
current_command->scsi_done(current_command);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
return;
}
/* Now we handle SCSI phases */
@@ -719,7 +720,7 @@ int sym53c416_detect(Scsi_Host_Template *tpnt)
cli();
/* FIXME: Request_irq with CLI is not safe */
/* Request for specified IRQ */
- if(request_irq(hosts[i].irq, sym53c416_intr_handle, 0, ID, NULL))
+ if(request_irq(hosts[i].irq, sym53c416_intr_handle, 0, ID, shpnt))
{
restore_flags(flags);
printk(KERN_ERR "sym53c416: Unable to assign IRQ %d\n", hosts[i].irq);
diff --git a/drivers/scsi/sym53c8xx_comm.h b/drivers/scsi/sym53c8xx_comm.h
index 9fde4ab6b910..319c5681d9d3 100644
--- a/drivers/scsi/sym53c8xx_comm.h
+++ b/drivers/scsi/sym53c8xx_comm.h
@@ -439,9 +439,9 @@ spinlock_t DRIVER_SMP_LOCK = SPIN_LOCK_UNLOCKED;
#define NCR_UNLOCK_NCB(np, flags) spin_unlock_irqrestore(&np->smp_lock, flags)
#define NCR_LOCK_SCSI_DONE(host, flags) \
- spin_lock_irqsave(&(host)->host_lock, flags)
+ spin_lock_irqsave((host)->host_lock, flags)
#define NCR_UNLOCK_SCSI_DONE(host, flags) \
- spin_unlock_irqrestore(&((host)->host_lock), flags)
+ spin_unlock_irqrestore(((host)->host_lock), flags)
#else
diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c
index 7119128d71ed..7a029d925f99 100644
--- a/drivers/scsi/t128.c
+++ b/drivers/scsi/t128.c
@@ -248,7 +248,7 @@ int __init t128_detect(Scsi_Host_Template * tpnt){
instance->irq = NCR5380_probe_irq(instance, T128_IRQS);
if (instance->irq != IRQ_NONE)
- if (request_irq(instance->irq, do_t128_intr, SA_INTERRUPT, "t128", NULL)) {
+ if (request_irq(instance->irq, do_t128_intr, SA_INTERRUPT, "t128", instance)) {
printk("scsi%d : IRQ%d not free, interrupts disabled\n",
instance->host_no, instance->irq);
instance->irq = IRQ_NONE;
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index eef872ae6a77..24ab22011388 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -304,8 +304,8 @@ MODULE_DEVICE_TABLE(pci, tmscsim_pci_tbl);
# define DC390_IFLAGS unsigned long iflags;
# define DC390_DFLAGS unsigned long dflags;
-# define DC390_LOCK_IO spin_lock_irqsave (&io_request_lock, iflags)
-# define DC390_UNLOCK_IO spin_unlock_irqrestore (&io_request_lock, iflags)
+# define DC390_LOCK_IO spin_lock_irqsave (((struct Scsi_Host *)dev)->host_lock, iflags)
+# define DC390_UNLOCK_IO spin_unlock_irqrestore (((struct Scsi_Host *)dev)->host_lock, iflags)
# define DC390_LOCK_DRV spin_lock_irqsave (&dc390_drvlock, dflags)
# define DC390_UNLOCK_DRV spin_unlock_irqrestore (&dc390_drvlock, dflags)
@@ -331,8 +331,8 @@ MODULE_DEVICE_TABLE(pci, tmscsim_pci_tbl);
# define DC390_AFLAGS unsigned long aflags;
# define DC390_IFLAGS
# define DC390_DFLAGS unsigned long dflags;
-# define DC390_LOCK_IO /* spin_lock_irqsave (&io_request_lock, iflags) */
-# define DC390_UNLOCK_IO /* spin_unlock_irqrestore (&io_request_lock, iflags) */
+# define DC390_LOCK_IO(dev) /* spin_lock_irqsave (&io_request_lock, iflags) */
+# define DC390_UNLOCK_IO(dev) /* spin_unlock_irqrestore (&io_request_lock, iflags) */
# define DC390_LOCK_DRV spin_lock_irqsave (&dc390_drvlock, dflags)
# define DC390_UNLOCK_DRV spin_unlock_irqrestore (&dc390_drvlock, dflags)
# define DC390_LOCK_DRV_NI spin_lock (&dc390_drvlock)
@@ -349,8 +349,8 @@ MODULE_DEVICE_TABLE(pci, tmscsim_pci_tbl);
# define DC390_IFLAGS unsigned long iflags;
# define DC390_DFLAGS unsigned long dflags;
spinlock_t dc390_drvlock = SPIN_LOCK_UNLOCKED;
-# define DC390_LOCK_IO spin_lock_irqsave (&io_request_lock, iflags)
-# define DC390_UNLOCK_IO spin_unlock_irqrestore (&io_request_lock, iflags)
+# define DC390_LOCK_IO(dev) spin_lock_irqsave (((struct Scsi_Host *)dev)->host_lock, iflags)
+# define DC390_UNLOCK_IO(dev) spin_unlock_irqrestore (((struct Scsi_Host *)dev)->host_lock, iflags)
# define DC390_LOCK_DRV spin_lock_irqsave (&dc390_drvlock, dflags)
# define DC390_UNLOCK_DRV spin_unlock_irqrestore (&dc390_drvlock, dflags)
# define DC390_LOCK_DRV_NI spin_lock (&dc390_drvlock)
@@ -1074,11 +1074,11 @@ void DC390_waiting_timed_out (unsigned long ptr)
DC390_IFLAGS
DC390_AFLAGS
DEBUG0(printk ("DC390: Debug: Waiting queue woken up by timer!\n");)
- DC390_LOCK_IO;
+ DC390_LOCK_IO(pACB.pScsiHost);
DC390_LOCK_ACB;
dc390_Waiting_process (pACB);
DC390_UNLOCK_ACB;
- DC390_UNLOCK_IO;
+ DC390_UNLOCK_IO(pACB.pScsiHost);
}
/***********************************************************************
@@ -2558,7 +2558,7 @@ int dc390_set_info (char *buffer, int length, PACB pACB)
DC390_AFLAGS
pos[length] = 0;
- DC390_LOCK_IO;
+ DC390_LOCK_IO(pACB.pScsiHost);
DC390_LOCK_ACB;
/* UPPERCASE */
/* Don't use kernel toupper, because of 2.0.x bug: ctmp unexported */
@@ -2726,7 +2726,7 @@ int dc390_set_info (char *buffer, int length, PACB pACB)
DC390_UNLOCK_ACB;
if (needs_inquiry)
{ dc390_updateDCB (pACB, pDCB); dc390_inquiry (pACB, pDCB); };
- DC390_UNLOCK_IO;
+ DC390_UNLOCK_IO(pACB.pScsiHost);
return (length);
einv2:
@@ -2734,7 +2734,7 @@ int dc390_set_info (char *buffer, int length, PACB pACB)
einv:
/* spin_unlock (strtok_lock); */
DC390_UNLOCK_ACB;
- DC390_UNLOCK_IO;
+ DC390_UNLOCK_IO(pACB.pScsiHost);
printk (KERN_WARNING "DC390: parse error near \"%s\"\n", (pos? pos: "NULL"));
return (-EINVAL);
@@ -2744,7 +2744,7 @@ int dc390_set_info (char *buffer, int length, PACB pACB)
printk (KERN_WARNING "DC390: Driver reset requested!\n");
DC390_UNLOCK_ACB;
DC390_reset (&cmd, 0);
- DC390_UNLOCK_IO;
+ DC390_UNLOCK_IO(pACB.pScsiHost);
};
return (length);
@@ -2752,7 +2752,7 @@ int dc390_set_info (char *buffer, int length, PACB pACB)
{
dc390_dumpinfo (pACB, 0, 0);
DC390_UNLOCK_ACB;
- DC390_UNLOCK_IO;
+ DC390_UNLOCK_IO(pACB.pScsiHost);
}
return (length);
@@ -2766,7 +2766,7 @@ int dc390_set_info (char *buffer, int length, PACB pACB)
dev, pDCB->TargetID, pDCB->TargetLUN);
DC390_UNLOCK_ACB;
dc390_inquiry (pACB, pDCB);
- DC390_UNLOCK_IO;
+ DC390_UNLOCK_IO(pACB.pScsiHost);
};
return (length);
@@ -2781,7 +2781,7 @@ int dc390_set_info (char *buffer, int length, PACB pACB)
/* TO DO: We should make sure no pending commands are left */
dc390_remove_dev (pACB, pDCB);
DC390_UNLOCK_ACB;
- DC390_UNLOCK_IO;
+ DC390_UNLOCK_IO(pACB.pScsiHost);
};
return (length);
@@ -2796,7 +2796,7 @@ int dc390_set_info (char *buffer, int length, PACB pACB)
dc390_initDCB (pACB, &pDCB, id, lun);
DC390_UNLOCK_ACB;
dc390_inquiry (pACB, pDCB);
- DC390_UNLOCK_IO;
+ DC390_UNLOCK_IO(pACB.pScsiHost);
};
return (length);
@@ -2812,7 +2812,7 @@ int dc390_set_info (char *buffer, int length, PACB pACB)
DC390_UNLOCK_ACB;
dc390_sendstart (pACB, pDCB);
dc390_inquiry (pACB, pDCB);
- DC390_UNLOCK_IO;
+ DC390_UNLOCK_IO(pACB.pScsiHost);
};
return (length);
@@ -2820,7 +2820,7 @@ int dc390_set_info (char *buffer, int length, PACB pACB)
printk (KERN_WARNING "DC390: Ignore cmnd to illegal Dev(Idx) %i. Valid range: 0 - %i.\n",
dev, pACB->DCBCnt - 1);
DC390_UNLOCK_ACB;
- DC390_UNLOCK_IO;
+ DC390_UNLOCK_IO(pACB.pScsiHost);
return (-EINVAL);
@@ -3041,7 +3041,7 @@ int DC390_release (struct Scsi_Host *host)
DC390_AFLAGS DC390_IFLAGS
PACB pACB = (PACB)(host->hostdata);
- DC390_LOCK_IO;
+ DC390_LOCK_IO(host);
DC390_LOCK_ACB;
/* TO DO: We should check for outstanding commands first. */
@@ -3056,7 +3056,7 @@ int DC390_release (struct Scsi_Host *host)
release_region(host->io_port,host->n_io_port);
dc390_freeDCBs (host);
DC390_UNLOCK_ACB;
- DC390_UNLOCK_IO;
+ DC390_UNLOCK_IO(host);
return( 1 );
}
#endif /* def MODULE */
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 2f7f1cab84b9..945939bcdfbb 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1364,10 +1364,10 @@ static inline int do_reset(Scsi_Cmnd *SCarg) {
HD(j)->in_reset = TRUE;
- spin_unlock_irq(&sh[j]->host_lock);
+ spin_unlock_irq(sh[j]->host_lock);
time = jiffies;
while ((jiffies - time) < (10 * HZ) && limit++ < 200000) udelay(100L);
- spin_lock_irq(&sh[j]->host_lock);
+ spin_lock_irq(sh[j]->host_lock);
printk("%s: reset, interrupts disabled, loops %d.\n", BN(j), limit);
@@ -1822,9 +1822,9 @@ static void do_interrupt_handler(int irq, void *shap, struct pt_regs *regs) {
/* Check if the interrupt must be processed by this handler */
if ((j = (unsigned int)((char *)shap - sha)) >= num_boards) return;
- spin_lock_irqsave(&sh[j]->host_lock, spin_flags);
+ spin_lock_irqsave(sh[j]->host_lock, spin_flags);
ihdlr(irq, j);
- spin_unlock_irqrestore(&sh[j]->host_lock, spin_flags);
+ spin_unlock_irqrestore(sh[j]->host_lock, spin_flags);
}
int u14_34f_release(struct Scsi_Host *shpnt) {
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index a0104d6fe2c2..64f088ef9a44 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -500,7 +500,11 @@ static int ultrastor_14f_detect(Scsi_Host_Template * tpnt)
config.mscp_free = ~0;
#endif
- if (request_irq(config.interrupt, do_ultrastor_interrupt, 0, "Ultrastor", NULL)) {
+ /*
+ * Brrr, &config.mscp[0].SCint->host) it is something magical....
+ * XXX and FIXME
+ */
+ if (request_irq(config.interrupt, do_ultrastor_interrupt, 0, "Ultrastor", &config.mscp[0].SCint->host)) {
printk("Unable to allocate IRQ%u for UltraStor controller.\n",
config.interrupt);
return FALSE;
@@ -570,12 +574,7 @@ static int ultrastor_24f_detect(Scsi_Host_Template * tpnt)
printk("U24F: invalid IRQ\n");
return FALSE;
}
- if (request_irq(config.interrupt, do_ultrastor_interrupt, 0, "Ultrastor", NULL))
- {
- printk("Unable to allocate IRQ%u for UltraStor controller.\n",
- config.interrupt);
- return FALSE;
- }
+
/* BIOS addr set */
/* base port set */
config.port_address = addr;
@@ -605,6 +604,13 @@ static int ultrastor_24f_detect(Scsi_Host_Template * tpnt)
free_irq(config.interrupt, do_ultrastor_interrupt);
return FALSE;
}
+
+ if (request_irq(config.interrupt, do_ultrastor_interrupt, 0, "Ultrastor", shpnt))
+ {
+ printk("Unable to allocate IRQ%u for UltraStor controller.\n",
+ config.interrupt);
+ return FALSE;
+ }
shpnt->irq = config.interrupt;
shpnt->dma_channel = config.dma_channel;
@@ -1159,10 +1165,11 @@ static void ultrastor_interrupt(int irq, void *dev_id, struct pt_regs *regs)
static void do_ultrastor_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
unsigned long flags;
-
- spin_lock_irqsave(&io_request_lock, flags);
+ struct Scsi_Host *dev = dev_id;
+
+ spin_lock_irqsave(dev->host_lock, flags);
ultrastor_interrupt(irq, dev_id, regs);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(dev->host_lock, flags);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index 40454961d3d5..d3a84ab0f6ad 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -866,18 +866,18 @@ static inline Scb *alloc_scbs(struct Scsi_Host *host, int needed)
save_flags (flags);
cli ();
while (busy) { /* someone else is allocating */
- spin_unlock_irq(&host->host_lock);
+ spin_unlock_irq(host->host_lock);
for (now = jiffies; now == jiffies; ); /* wait a jiffy */
- spin_lock_irq(&host->host_lock);
+ spin_lock_irq(host->host_lock);
}
busy = 1; /* not busy now; it's our turn */
while (freescbs < needed) {
timeout = jiffies + WAITnexttimeout;
do {
- spin_unlock_irq(&host->host_lock);
+ spin_unlock_irq(host->host_lock);
for (now = jiffies; now == jiffies; ); /* wait a jiffy */
- spin_lock_irq(&host->host_lock);
+ spin_lock_irq(host->host_lock);
} while (freescbs < needed && time_before_eq(jiffies, timeout));
/*
* If we get here with enough free Scbs, we can take them.
@@ -1144,9 +1144,9 @@ void do_wd7000_intr_handle (int irq, void *dev_id, struct pt_regs *regs)
unsigned long flags;
struct Scsi_Host *host = dev_id;
- spin_lock_irqsave(&host->host_lock, flags);
+ spin_lock_irqsave(host->host_lock, flags);
wd7000_intr_handle(irq, dev_id, regs);
- spin_unlock_irqrestore(&host->host_lock, flags);
+ spin_unlock_irqrestore(host->host_lock, flags);
}
diff --git a/drivers/sound/i810_audio.c b/drivers/sound/i810_audio.c
index 78b062c67627..ed70efba7aa6 100644
--- a/drivers/sound/i810_audio.c
+++ b/drivers/sound/i810_audio.c
@@ -14,7 +14,7 @@
* Analog Devices (A major AC97 codec maker)
* Intel Corp (you've probably heard of them already)
*
- * AC97 clues and assistance provided by
+ * AC97 clues and assistance provided by
* Analog Devices
* Zach 'Fufu' Brown
* Jeff Garzik
@@ -63,6 +63,8 @@
* This is available via the 'ftsodell=1' option.
*
* If you need to force a specific rate set the clocking= option
+ *
+ * This driver is cursed. (Ben LaHaise)
*/
#include <linux/module.h>
@@ -102,15 +104,22 @@
#ifndef PCI_DEVICE_ID_INTEL_440MX
#define PCI_DEVICE_ID_INTEL_440MX 0x7195
#endif
+#ifndef PCI_DEVICE_ID_SI_7012
+#define PCI_DEVICE_ID_SI_7012 0x7012
+#endif
+#ifndef PCI_DEVICE_ID_NVIDIA_MCP1_AUDIO
+#define PCI_DEVICE_ID_NVIDIA_MCP1_AUDIO 0x01b1
+#endif
static int ftsodell=0;
static int strict_clocking=0;
-static unsigned int clocking=48000;
+static unsigned int clocking=0;
static int spdif_locked=0;
//#define DEBUG
//#define DEBUG2
//#define DEBUG_INTERRUPTS
+//#define DEBUG_MMAP
#define ADC_RUNNING 1
#define DAC_RUNNING 2
@@ -197,7 +206,7 @@ enum {
#define INT_MASK (INT_SEC|INT_PRI|INT_MC|INT_PO|INT_PI|INT_MO|INT_NI|INT_GPI)
-#define DRIVER_VERSION "0.04"
+#define DRIVER_VERSION "0.21"
/* magic numbers to protect our data structures */
#define I810_CARD_MAGIC 0x5072696E /* "Prin" */
@@ -220,7 +229,9 @@ enum {
ICH82901AB,
INTEL440MX,
INTELICH2,
- INTELICH3
+ INTELICH3,
+ SI7012,
+ NVIDIA_NFORCE
};
static char * card_names[] = {
@@ -228,7 +239,9 @@ static char * card_names[] = {
"Intel ICH 82901AB",
"Intel 440MX",
"Intel ICH2",
- "Intel ICH3"
+ "Intel ICH3",
+ "SiS 7012",
+ "NVIDIA nForce Audio"
};
static struct pci_device_id i810_pci_tbl [] __initdata = {
@@ -242,6 +255,10 @@ static struct pci_device_id i810_pci_tbl [] __initdata = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, INTELICH2},
{PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH3,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, INTELICH3},
+ {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7012,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, SI7012},
+ {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_MCP1_AUDIO,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, NVIDIA_NFORCE},
{0,}
};
@@ -357,39 +374,17 @@ struct i810_card {
struct i810_channel *(*alloc_rec_pcm_channel)(struct i810_card *);
struct i810_channel *(*alloc_rec_mic_channel)(struct i810_card *);
void (*free_pcm_channel)(struct i810_card *, int chan);
+
+ /* We have a *very* long init time possibly, so use this to block */
+ /* attempts to open our devices before we are ready (stops oops'es) */
+ int initializing;
};
static struct i810_card *devs = NULL;
static int i810_open_mixdev(struct inode *inode, struct file *file);
-static int i810_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg);
-
-static inline unsigned ld2(unsigned int x)
-{
- unsigned r = 0;
-
- if (x >= 0x10000) {
- x >>= 16;
- r += 16;
- }
- if (x >= 0x100) {
- x >>= 8;
- r += 8;
- }
- if (x >= 0x10) {
- x >>= 4;
- r += 4;
- }
- if (x >= 4) {
- x >>= 2;
- r += 2;
- }
- if (x >= 2)
- r++;
- return r;
-}
-
+static int i810_ioctl_mixdev(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg);
static u16 i810_ac97_get(struct ac97_codec *dev, u8 reg);
static void i810_ac97_set(struct ac97_codec *dev, u8 reg, u16 data);
@@ -606,9 +601,8 @@ static unsigned int i810_set_dac_rate(struct i810_state * state, unsigned int ra
rate = 8000;
dmabuf->rate = (rate * 48000)/clocking;
}
-
- new_rate = ac97_set_dac_rate(codec, rate);
+ new_rate=ac97_set_dac_rate(codec, rate);
if(new_rate != rate) {
dmabuf->rate = (new_rate * 48000)/clocking;
}
@@ -666,47 +660,49 @@ static unsigned int i810_set_adc_rate(struct i810_state * state, unsigned int ra
static inline unsigned i810_get_dma_addr(struct i810_state *state, int rec)
{
struct dmabuf *dmabuf = &state->dmabuf;
- unsigned int civ, offset;
- struct i810_channel *c;
+ unsigned int civ, offset, port, port_picb, bytes = 2;
if (!dmabuf->enable)
return 0;
+
if (rec)
- c = dmabuf->read_channel;
+ port = state->card->iobase + dmabuf->read_channel->port;
else
- c = dmabuf->write_channel;
+ port = state->card->iobase + dmabuf->write_channel->port;
+
+ if(state->card->pci_id == PCI_DEVICE_ID_SI_7012) {
+ port_picb = port + OFF_SR;
+ bytes = 1;
+ } else
+ port_picb = port + OFF_PICB;
+
do {
- civ = inb(state->card->iobase+c->port+OFF_CIV);
- offset = (civ + 1) * dmabuf->fragsize -
- 2 * inw(state->card->iobase+c->port+OFF_PICB);
- /* CIV changed before we read PICB (very seldom) ?
- * then PICB was rubbish, so try again */
- } while (civ != inb(state->card->iobase+c->port+OFF_CIV));
+ civ = inb(port+OFF_CIV) & 31;
+ offset = inw(port_picb);
+ /* Must have a delay here! */
+ if(offset == 0)
+ udelay(1);
+ /* Reread both registers and make sure that that total
+ * offset from the first reading to the second is 0.
+ * There is an issue with SiS hardware where it will count
+ * picb down to 0, then update civ to the next value,
+ * then set the new picb to fragsize bytes. We can catch
+ * it between the civ update and the picb update, making
+ * it look as though we are 1 fragsize ahead of where we
+ * are. The next to we get the address though, it will
+ * be back in the right place, and we will suddenly think
+ * we just went forward dmasize - fragsize bytes, causing
+ * totally stupid *huge* dma overrun messages. We are
+ * assuming that the 1us delay is more than long enough
+ * that we won't have to worry about the chip still being
+ * out of sync with reality ;-)
+ */
+ } while (civ != (inb(port+OFF_CIV) & 31) || offset != inw(port_picb));
- return offset;
+ return (((civ + 1) * dmabuf->fragsize - (bytes * offset))
+ % dmabuf->dmasize);
}
-//static void resync_dma_ptrs(struct i810_state *state, int rec)
-//{
-// struct dmabuf *dmabuf = &state->dmabuf;
-// struct i810_channel *c;
-// int offset;
-//
-// if(rec) {
-// c = dmabuf->read_channel;
-// } else {
-// c = dmabuf->write_channel;
-// }
-// if(c==NULL)
-// return;
-// offset = inb(state->card->iobase+c->port+OFF_CIV);
-// if(offset == inb(state->card->iobase+c->port+OFF_LVI))
-// offset++;
-// offset *= dmabuf->fragsize;
-//
-// dmabuf->hwptr=dmabuf->swptr = offset;
-//}
-
/* Stop recording (lock held) */
static inline void __stop_adc(struct i810_state *state)
{
@@ -718,7 +714,10 @@ static inline void __stop_adc(struct i810_state *state)
// wait for the card to acknowledge shutdown
while( inb(card->iobase + PI_CR) != 0 ) ;
// now clear any latent interrupt bits (like the halt bit)
- outb( inb(card->iobase + PI_SR), card->iobase + PI_SR );
+ if(card->pci_id == PCI_DEVICE_ID_SI_7012)
+ outb( inb(card->iobase + PI_PICB), card->iobase + PI_PICB );
+ else
+ outb( inb(card->iobase + PI_SR), card->iobase + PI_SR );
outl( inl(card->iobase + GLOB_STA) & INT_PI, card->iobase + GLOB_STA);
}
@@ -732,21 +731,27 @@ static void stop_adc(struct i810_state *state)
spin_unlock_irqrestore(&card->lock, flags);
}
-static void start_adc(struct i810_state *state)
+static inline void __start_adc(struct i810_state *state)
{
struct dmabuf *dmabuf = &state->dmabuf;
- struct i810_card *card = state->card;
- unsigned long flags;
if (dmabuf->count < dmabuf->dmasize && dmabuf->ready && !dmabuf->enable &&
(dmabuf->trigger & PCM_ENABLE_INPUT)) {
- spin_lock_irqsave(&card->lock, flags);
dmabuf->enable |= ADC_RUNNING;
- outb((1<<4) | (1<<2) | 1, card->iobase + PI_CR);
- spin_unlock_irqrestore(&card->lock, flags);
+ outb((1<<4) | (1<<2) | 1, state->card->iobase + PI_CR);
}
}
+static void start_adc(struct i810_state *state)
+{
+ struct i810_card *card = state->card;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+ __start_adc(state);
+ spin_unlock_irqrestore(&card->lock, flags);
+}
+
/* stop playback (lock held) */
static inline void __stop_dac(struct i810_state *state)
{
@@ -758,7 +763,10 @@ static inline void __stop_dac(struct i810_state *state)
// wait for the card to acknowledge shutdown
while( inb(card->iobase + PO_CR) != 0 ) ;
// now clear any latent interrupt bits (like the halt bit)
- outb( inb(card->iobase + PO_SR), card->iobase + PO_SR );
+ if(card->pci_id == PCI_DEVICE_ID_SI_7012)
+ outb( inb(card->iobase + PO_PICB), card->iobase + PO_PICB );
+ else
+ outb( inb(card->iobase + PO_SR), card->iobase + PO_SR );
outl( inl(card->iobase + GLOB_STA) & INT_PO, card->iobase + GLOB_STA);
}
@@ -772,20 +780,25 @@ static void stop_dac(struct i810_state *state)
spin_unlock_irqrestore(&card->lock, flags);
}
-static void start_dac(struct i810_state *state)
+static inline void __start_dac(struct i810_state *state)
{
struct dmabuf *dmabuf = &state->dmabuf;
- struct i810_card *card = state->card;
- unsigned long flags;
if (dmabuf->count > 0 && dmabuf->ready && !dmabuf->enable &&
(dmabuf->trigger & PCM_ENABLE_OUTPUT)) {
- spin_lock_irqsave(&card->lock, flags);
dmabuf->enable |= DAC_RUNNING;
- outb((1<<4) | (1<<2) | 1, card->iobase + PO_CR);
- spin_unlock_irqrestore(&card->lock, flags);
+ outb((1<<4) | (1<<2) | 1, state->card->iobase + PO_CR);
}
}
+static void start_dac(struct i810_state *state)
+{
+ struct i810_card *card = state->card;
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->lock, flags);
+ __start_dac(state);
+ spin_unlock_irqrestore(&card->lock, flags);
+}
#define DMABUF_DEFAULTORDER (16-PAGE_SHIFT)
#define DMABUF_MINORDER 1
@@ -805,6 +818,8 @@ static int alloc_dmabuf(struct i810_state *state)
dmabuf->ossfragsize = (PAGE_SIZE<<DMABUF_DEFAULTORDER)/dmabuf->ossmaxfrags;
size = dmabuf->ossfragsize * dmabuf->ossmaxfrags;
+ if(dmabuf->rawbuf && (PAGE_SIZE << dmabuf->buforder) == size)
+ return 0;
/* alloc enough to satisfy the oss params */
for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--) {
if ( (PAGE_SIZE<<order) > size )
@@ -873,14 +888,19 @@ static int prog_dmabuf(struct i810_state *state, unsigned rec)
dmabuf->swptr = dmabuf->hwptr = 0;
spin_unlock_irqrestore(&state->card->lock, flags);
- /* allocate DMA buffer if not allocated yet */
- if (dmabuf->rawbuf)
- dealloc_dmabuf(state);
+ /* allocate DMA buffer, let alloc_dmabuf determine if we are already
+ * allocated well enough or if we should replace the current buffer
+ * (assuming one is already allocated, if it isn't, then allocate it).
+ */
if ((ret = alloc_dmabuf(state)))
return ret;
/* FIXME: figure out all this OSS fragment stuff */
/* I did, it now does what it should according to the OSS API. DL */
+ /* We may not have realloced our dmabuf, but the fragment size to
+ * fragment number ratio may have changed, so go ahead and reprogram
+ * things
+ */
dmabuf->dmasize = PAGE_SIZE << dmabuf->buforder;
dmabuf->numfrag = SG_LEN;
dmabuf->fragsize = dmabuf->dmasize/dmabuf->numfrag;
@@ -922,6 +942,8 @@ static int prog_dmabuf(struct i810_state *state, unsigned rec)
sg->busaddr=virt_to_bus(dmabuf->rawbuf+dmabuf->fragsize*i);
// the card will always be doing 16bit stereo
sg->control=dmabuf->fragsamples;
+ if(state->card->pci_id == PCI_DEVICE_ID_SI_7012)
+ sg->control <<= 1;
sg->control|=CON_BUFPAD;
// set us up to get IOC interrupts as often as needed to
// satisfy numfrag requirements, no more
@@ -935,7 +957,6 @@ static int prog_dmabuf(struct i810_state *state, unsigned rec)
outl(virt_to_bus(&c->sg[0]), state->card->iobase+c->port+OFF_BDBAR);
outb(0, state->card->iobase+c->port+OFF_CIV);
outb(0, state->card->iobase+c->port+OFF_LVI);
- dmabuf->count = 0;
spin_unlock_irqrestore(&state->card->lock, flags);
@@ -969,31 +990,34 @@ static void __i810_update_lvi(struct i810_state *state, int rec)
else
port += dmabuf->write_channel->port;
- if(dmabuf->mapped) {
- if(rec)
- dmabuf->swptr = (dmabuf->hwptr + dmabuf->dmasize
- - dmabuf->count) % dmabuf->dmasize;
- else
- dmabuf->swptr = (dmabuf->hwptr + dmabuf->count)
- % dmabuf->dmasize;
- }
- /*
- * two special cases, count == 0 on write
- * means no data, and count == dmasize
- * means no data on read, handle appropriately
+ /* if we are currently stopped, then our CIV is actually set to our
+ * *last* sg segment and we are ready to wrap to the next. However,
+ * if we set our LVI to the last sg segment, then it won't wrap to
+ * the next sg segment, it won't even get a start. So, instead, when
+ * we are stopped, we set both the LVI value and also we increment
+ * the CIV value to the next sg segment to be played so that when
+ * we call start_{dac,adc}, things will operate properly
*/
- if(!rec && dmabuf->count == 0) {
- outb(inb(port+OFF_CIV),port+OFF_LVI);
- return;
- }
- if(rec && dmabuf->count == dmabuf->dmasize) {
- outb(inb(port+OFF_CIV),port+OFF_LVI);
- return;
+ if (!dmabuf->enable && dmabuf->ready) {
+ if(rec && dmabuf->count < dmabuf->dmasize &&
+ (dmabuf->trigger & PCM_ENABLE_INPUT))
+ {
+ outb((inb(port+OFF_CIV)+1)&31, port+OFF_LVI);
+ __start_adc(state);
+ while( !(inb(port + OFF_CR) & ((1<<4) | (1<<2))) ) ;
+ } else if (!rec && dmabuf->count &&
+ (dmabuf->trigger & PCM_ENABLE_OUTPUT))
+ {
+ outb((inb(port+OFF_CIV)+1)&31, port+OFF_LVI);
+ __start_dac(state);
+ while( !(inb(port + OFF_CR) & ((1<<4) | (1<<2))) ) ;
+ }
}
+
/* swptr - 1 is the tail of our transfer */
x = (dmabuf->dmasize + dmabuf->swptr - 1) % dmabuf->dmasize;
x /= dmabuf->fragsize;
- outb(x&31, port+OFF_LVI);
+ outb(x, port+OFF_LVI);
}
static void i810_update_lvi(struct i810_state *state, int rec)
@@ -1020,7 +1044,9 @@ static void i810_update_ptr(struct i810_state *state)
/* update hardware pointer */
hwptr = i810_get_dma_addr(state, 1);
diff = (dmabuf->dmasize + hwptr - dmabuf->hwptr) % dmabuf->dmasize;
-// printk("HWP %d,%d,%d\n", hwptr, dmabuf->hwptr, diff);
+#if defined(DEBUG_INTERRUPTS) || defined(DEBUG_MMAP)
+ printk("ADC HWP %d,%d,%d\n", hwptr, dmabuf->hwptr, diff);
+#endif
dmabuf->hwptr = hwptr;
dmabuf->total_bytes += diff;
dmabuf->count += diff;
@@ -1029,8 +1055,8 @@ static void i810_update_ptr(struct i810_state *state)
/* this is normal for the end of a read */
/* only give an error if we went past the */
/* last valid sg entry */
- if(inb(state->card->iobase + PI_CIV) !=
- inb(state->card->iobase + PI_LVI)) {
+ if((inb(state->card->iobase + PI_CIV) & 31) !=
+ (inb(state->card->iobase + PI_LVI) & 31)) {
printk(KERN_WARNING "i810_audio: DMA overrun on read\n");
dmabuf->error++;
}
@@ -1043,7 +1069,9 @@ static void i810_update_ptr(struct i810_state *state)
/* update hardware pointer */
hwptr = i810_get_dma_addr(state, 0);
diff = (dmabuf->dmasize + hwptr - dmabuf->hwptr) % dmabuf->dmasize;
-// printk("HWP %d,%d,%d\n", hwptr, dmabuf->hwptr, diff);
+#if defined(DEBUG_INTERRUPTS) || defined(DEBUG_MMAP)
+ printk("DAC HWP %d,%d,%d\n", hwptr, dmabuf->hwptr, diff);
+#endif
dmabuf->hwptr = hwptr;
dmabuf->total_bytes += diff;
dmabuf->count -= diff;
@@ -1052,13 +1080,13 @@ static void i810_update_ptr(struct i810_state *state)
/* this is normal for the end of a write */
/* only give an error if we went past the */
/* last valid sg entry */
- if(inb(state->card->iobase + PO_CIV) !=
- inb(state->card->iobase + PO_LVI)) {
+ if((inb(state->card->iobase + PO_CIV) & 31) !=
+ (inb(state->card->iobase + PO_LVI) & 31)) {
printk(KERN_WARNING "i810_audio: DMA overrun on write\n");
printk("i810_audio: CIV %d, LVI %d, hwptr %x, "
"count %d\n",
- inb(state->card->iobase + PO_CIV),
- inb(state->card->iobase + PO_LVI),
+ inb(state->card->iobase + PO_CIV) & 31,
+ inb(state->card->iobase + PO_LVI) & 31,
dmabuf->hwptr, dmabuf->count);
dmabuf->error++;
}
@@ -1068,7 +1096,43 @@ static void i810_update_ptr(struct i810_state *state)
}
}
-static int drain_dac(struct i810_state *state, int nonblock)
+static inline int i810_get_free_write_space(struct i810_state *state)
+{
+ struct dmabuf *dmabuf = &state->dmabuf;
+ int free;
+
+ i810_update_ptr(state);
+ // catch underruns during playback
+ if (dmabuf->count < 0) {
+ dmabuf->count = 0;
+ dmabuf->swptr = dmabuf->hwptr;
+ }
+ free = dmabuf->dmasize - dmabuf->count;
+ free -= (dmabuf->hwptr % dmabuf->fragsize);
+ if(free < 0)
+ return(0);
+ return(free);
+}
+
+static inline int i810_get_available_read_data(struct i810_state *state)
+{
+ struct dmabuf *dmabuf = &state->dmabuf;
+ int avail;
+
+ i810_update_ptr(state);
+ // catch overruns during record
+ if (dmabuf->count > dmabuf->dmasize) {
+ dmabuf->count = dmabuf->dmasize;
+ dmabuf->swptr = dmabuf->hwptr;
+ }
+ avail = dmabuf->count;
+ avail -= (dmabuf->hwptr % dmabuf->fragsize);
+ if(avail < 0)
+ return(0);
+ return(avail);
+}
+
+static int drain_dac(struct i810_state *state, int signals_allowed)
{
DECLARE_WAITQUEUE(wait, current);
struct dmabuf *dmabuf = &state->dmabuf;
@@ -1078,12 +1142,12 @@ static int drain_dac(struct i810_state *state, int nonblock)
if (!dmabuf->ready)
return 0;
-
+ if(dmabuf->mapped) {
+ stop_dac(state);
+ return 0;
+ }
add_wait_queue(&dmabuf->wait, &wait);
for (;;) {
- /* It seems that we have to set the current state to TASK_INTERRUPTIBLE
- every time to make the process really go to sleep */
- set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irqsave(&state->card->lock, flags);
i810_update_ptr(state);
@@ -1093,33 +1157,46 @@ static int drain_dac(struct i810_state *state, int nonblock)
if (count <= 0)
break;
- if (signal_pending(current))
- break;
-
- i810_update_lvi(state,0);
- if (dmabuf->enable != DAC_RUNNING)
- start_dac(state);
-
- if (nonblock) {
- remove_wait_queue(&dmabuf->wait, &wait);
- set_current_state(TASK_RUNNING);
- return -EBUSY;
+ /*
+ * This will make sure that our LVI is correct, that our
+ * pointer is updated, and that the DAC is running. We
+ * have to force the setting of dmabuf->trigger to avoid
+ * any possible deadlocks.
+ */
+ if(!dmabuf->enable) {
+ dmabuf->trigger = PCM_ENABLE_OUTPUT;
+ i810_update_lvi(state,0);
}
+ if (signal_pending(current) && signals_allowed) {
+ break;
+ }
- tmo = (dmabuf->dmasize * HZ) / dmabuf->rate;
- tmo >>= 1;
- if (!schedule_timeout(tmo ? tmo : 1) && tmo){
+ /* It seems that we have to set the current state to
+ * TASK_INTERRUPTIBLE every time to make the process
+ * really go to sleep. This also has to be *after* the
+ * update_ptr() call because update_ptr is likely to
+ * do a wake_up() which will unset this before we ever
+ * try to sleep, resuling in a tight loop in this code
+ * instead of actually sleeping and waiting for an
+ * interrupt to wake us up!
+ */
+ set_current_state(TASK_INTERRUPTIBLE);
+ /*
+ * set the timeout to significantly longer than it *should*
+ * take for the DAC to drain the DMA buffer
+ */
+ tmo = (count * HZ) / (dmabuf->rate);
+ if (!schedule_timeout(tmo >= 2 ? tmo : 2)){
printk(KERN_ERR "i810_audio: drain_dac, dma timeout?\n");
+ count = 0;
break;
}
}
- stop_dac(state);
- synchronize_irq();
- remove_wait_queue(&dmabuf->wait, &wait);
set_current_state(TASK_RUNNING);
- if (signal_pending(current))
+ remove_wait_queue(&dmabuf->wait, &wait);
+ if(count > 0 && signal_pending(current) && signals_allowed)
return -ERESTARTSYS;
-
+ stop_dac(state);
return 0;
}
@@ -1143,52 +1220,72 @@ static void i810_channel_interrupt(struct i810_card *card)
if(!state->dmabuf.ready)
continue;
dmabuf = &state->dmabuf;
- if(dmabuf->enable & DAC_RUNNING)
+ if(dmabuf->enable & DAC_RUNNING) {
c=dmabuf->write_channel;
- else if(dmabuf->enable & ADC_RUNNING)
+ } else if(dmabuf->enable & ADC_RUNNING) {
c=dmabuf->read_channel;
- else /* This can occur going from R/W to close */
+ } else /* This can occur going from R/W to close */
continue;
port+=c->port;
-
- status = inw(port + OFF_SR);
+
+ if(card->pci_id == PCI_DEVICE_ID_SI_7012)
+ status = inw(port + OFF_PICB);
+ else
+ status = inw(port + OFF_SR);
+
#ifdef DEBUG_INTERRUPTS
printk("NUM %d PORT %X IRQ ( ST%d ", c->num, c->port, status);
#endif
if(status & DMA_INT_COMPLETE)
{
+ /* only wake_up() waiters if this interrupt signals
+ * us being beyond a userfragsize of data open or
+ * available, and i810_update_ptr() does that for
+ * us
+ */
i810_update_ptr(state);
#ifdef DEBUG_INTERRUPTS
printk("COMP %d ", dmabuf->hwptr /
dmabuf->fragsize);
#endif
}
- if(status & DMA_INT_LVI)
+ if(status & (DMA_INT_LVI | DMA_INT_DCH))
{
+ /* wake_up() unconditionally on LVI and DCH */
i810_update_ptr(state);
wake_up(&dmabuf->wait);
#ifdef DEBUG_INTERRUPTS
- printk("LVI ");
+ if(status & DMA_INT_LVI)
+ printk("LVI ");
+ if(status & DMA_INT_DCH)
+ printk("DCH -");
#endif
- }
- if(status & DMA_INT_DCH)
- {
- i810_update_ptr(state);
if(dmabuf->enable & DAC_RUNNING)
count = dmabuf->count;
else
count = dmabuf->dmasize - dmabuf->count;
if(count > 0) {
outb(inb(port+OFF_CR) | 1, port+OFF_CR);
+#ifdef DEBUG_INTERRUPTS
+ printk(" CONTINUE ");
+#endif
} else {
+ if (dmabuf->enable & DAC_RUNNING)
+ __stop_dac(state);
+ if (dmabuf->enable & ADC_RUNNING)
+ __stop_adc(state);
+ dmabuf->enable = 0;
wake_up(&dmabuf->wait);
#ifdef DEBUG_INTERRUPTS
- printk("DCH - STOP ");
+ printk(" STOP ");
#endif
}
}
- outw(status & DMA_INT_MASK, port + OFF_SR);
+ if(card->pci_id == PCI_DEVICE_ID_SI_7012)
+ outw(status & DMA_INT_MASK, port + OFF_PICB);
+ else
+ outw(status & DMA_INT_MASK, port + OFF_SR);
}
#ifdef DEBUG_INTERRUPTS
printk(")\n");
@@ -1254,15 +1351,14 @@ static ssize_t i810_read(struct file *file, char *buffer, size_t count, loff_t *
return ret;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
- dmabuf->trigger &= ~PCM_ENABLE_OUTPUT;
ret = 0;
add_wait_queue(&dmabuf->wait, &waita);
while (count > 0) {
+ set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irqsave(&card->lock, flags);
if (PM_SUSPENDED(card)) {
spin_unlock_irqrestore(&card->lock, flags);
- set_current_state(TASK_INTERRUPTIBLE);
schedule();
if (signal_pending(current)) {
if (!ret) ret = -EAGAIN;
@@ -1271,10 +1367,7 @@ static ssize_t i810_read(struct file *file, char *buffer, size_t count, loff_t *
continue;
}
swptr = dmabuf->swptr;
- if (dmabuf->count > dmabuf->dmasize) {
- dmabuf->count = dmabuf->dmasize;
- }
- cnt = dmabuf->count - dmabuf->fragsize;
+ cnt = i810_get_available_read_data(state);
// this is to make the copy_to_user simpler below
if(cnt > (dmabuf->dmasize - swptr))
cnt = dmabuf->dmasize - swptr;
@@ -1282,20 +1375,39 @@ static ssize_t i810_read(struct file *file, char *buffer, size_t count, loff_t *
if (cnt > count)
cnt = count;
+ /* Lop off the last two bits to force the code to always
+ * write in full samples. This keeps software that sets
+ * O_NONBLOCK but doesn't check the return value of the
+ * write call from getting things out of state where they
+ * think a full 4 byte sample was written when really only
+ * a portion was, resulting in odd sound and stereo
+ * hysteresis.
+ */
+ cnt &= ~0x3;
if (cnt <= 0) {
unsigned long tmo;
- if(!dmabuf->enable) {
- dmabuf->trigger |= PCM_ENABLE_INPUT;
- start_adc(state);
- }
+ /*
+ * Don't let us deadlock. The ADC won't start if
+ * dmabuf->trigger isn't set. A call to SETTRIGGER
+ * could have turned it off after we set it to on
+ * previously.
+ */
+ dmabuf->trigger = PCM_ENABLE_INPUT;
+ /*
+ * This does three things. Updates LVI to be correct,
+ * makes sure the ADC is running, and updates the
+ * hwptr.
+ */
i810_update_lvi(state,1);
if (file->f_flags & O_NONBLOCK) {
if (!ret) ret = -EAGAIN;
- return ret;
+ goto done;
}
- /* This isnt strictly right for the 810 but it'll do */
- tmo = (dmabuf->dmasize * HZ) / (dmabuf->rate * 2);
- tmo >>= 1;
+ /* Set the timeout to how long it would take to fill
+ * two of our buffers. If we haven't been woke up
+ * by then, then we know something is wrong.
+ */
+ tmo = (dmabuf->dmasize * HZ * 2) / (dmabuf->rate * 4);
/* There are two situations when sleep_on_timeout returns, one is when
the interrupt is serviced correctly and the process is waked up by
ISR ON TIME. Another is when timeout is expired, which means that
@@ -1303,7 +1415,7 @@ static ssize_t i810_read(struct file *file, char *buffer, size_t count, loff_t *
is TOO LATE for the process to be scheduled to run (scheduler latency)
which results in a (potential) buffer overrun. And worse, there is
NOTHING we can do to prevent it. */
- if (!interruptible_sleep_on_timeout(&dmabuf->wait, tmo)) {
+ if (!schedule_timeout(tmo >= 2 ? tmo : 2)) {
#ifdef DEBUG
printk(KERN_ERR "i810_audio: recording schedule timeout, "
"dmasz %u fragsz %u count %i hwptr %u swptr %u\n",
@@ -1315,7 +1427,7 @@ static ssize_t i810_read(struct file *file, char *buffer, size_t count, loff_t *
}
if (signal_pending(current)) {
ret = ret ? ret : -ERESTARTSYS;
- return ret;
+ goto done;
}
continue;
}
@@ -1341,10 +1453,8 @@ static ssize_t i810_read(struct file *file, char *buffer, size_t count, loff_t *
buffer += cnt;
ret += cnt;
}
- i810_update_lvi(state,1);
- if(!(dmabuf->enable & ADC_RUNNING))
- start_adc(state);
done:
+ i810_update_lvi(state,1);
set_current_state(TASK_RUNNING);
remove_wait_queue(&dmabuf->wait, &waita);
@@ -1384,15 +1494,14 @@ static ssize_t i810_write(struct file *file, const char *buffer, size_t count, l
return ret;
if (!access_ok(VERIFY_READ, buffer, count))
return -EFAULT;
- dmabuf->trigger &= ~PCM_ENABLE_INPUT;
ret = 0;
add_wait_queue(&dmabuf->wait, &waita);
while (count > 0) {
+ set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irqsave(&state->card->lock, flags);
if (PM_SUSPENDED(card)) {
spin_unlock_irqrestore(&card->lock, flags);
- set_current_state(TASK_INTERRUPTIBLE);
schedule();
if (signal_pending(current)) {
if (!ret) ret = -EAGAIN;
@@ -1402,11 +1511,12 @@ static ssize_t i810_write(struct file *file, const char *buffer, size_t count, l
}
swptr = dmabuf->swptr;
- if (dmabuf->count < 0) {
- dmabuf->count = 0;
- }
- cnt = dmabuf->dmasize - dmabuf->fragsize - dmabuf->count;
- // this is to make the copy_from_user simpler below
+ cnt = i810_get_free_write_space(state);
+ /* Bound the maximum size to how much we can copy to the
+ * dma buffer before we hit the end. If we have more to
+ * copy then it will get done in a second pass of this
+ * loop starting from the beginning of the buffer.
+ */
if(cnt > (dmabuf->dmasize - swptr))
cnt = dmabuf->dmasize - swptr;
spin_unlock_irqrestore(&state->card->lock, flags);
@@ -1416,25 +1526,30 @@ static ssize_t i810_write(struct file *file, const char *buffer, size_t count, l
#endif
if (cnt > count)
cnt = count;
+ /* Lop off the last two bits to force the code to always
+ * write in full samples. This keeps software that sets
+ * O_NONBLOCK but doesn't check the return value of the
+ * write call from getting things out of state where they
+ * think a full 4 byte sample was written when really only
+ * a portion was, resulting in odd sound and stereo
+ * hysteresis.
+ */
+ cnt &= ~0x3;
if (cnt <= 0) {
unsigned long tmo;
// There is data waiting to be played
- if(!dmabuf->enable && dmabuf->count) {
- /* force the starting incase SETTRIGGER has been used */
- /* to stop it, otherwise this is a deadlock situation */
- dmabuf->trigger |= PCM_ENABLE_OUTPUT;
- start_dac(state);
- }
- // Update the LVI pointer in case we have already
- // written data in this syscall and are just waiting
- // on the tail bit of data
+ /*
+ * Force the trigger setting since we would
+ * deadlock with it set any other way
+ */
+ dmabuf->trigger = PCM_ENABLE_OUTPUT;
i810_update_lvi(state,0);
if (file->f_flags & O_NONBLOCK) {
if (!ret) ret = -EAGAIN;
goto ret;
}
/* Not strictly correct but works */
- tmo = (dmabuf->dmasize * HZ) / (dmabuf->rate * 4);
+ tmo = (dmabuf->dmasize * HZ * 2) / (dmabuf->rate * 4);
/* There are two situations when sleep_on_timeout returns, one is when
the interrupt is serviced correctly and the process is waked up by
ISR ON TIME. Another is when timeout is expired, which means that
@@ -1442,7 +1557,7 @@ static ssize_t i810_write(struct file *file, const char *buffer, size_t count, l
is TOO LATE for the process to be scheduled to run (scheduler latency)
which results in a (potential) buffer underrun. And worse, there is
NOTHING we can do to prevent it. */
- if (!interruptible_sleep_on_timeout(&dmabuf->wait, tmo)) {
+ if (!schedule_timeout(tmo >= 2 ? tmo : 2)) {
#ifdef DEBUG
printk(KERN_ERR "i810_audio: playback schedule timeout, "
"dmasz %u fragsz %u count %i hwptr %u swptr %u\n",
@@ -1484,10 +1599,8 @@ static ssize_t i810_write(struct file *file, const char *buffer, size_t count, l
x = dmabuf->fragsize - (swptr % dmabuf->fragsize);
memset(dmabuf->rawbuf + swptr, '\0', x);
}
+ret:
i810_update_lvi(state,0);
- if (!dmabuf->enable && dmabuf->count >= dmabuf->userfragsize)
- start_dac(state);
- ret:
set_current_state(TASK_RUNNING);
remove_wait_queue(&dmabuf->wait, &waita);
@@ -1506,22 +1619,19 @@ static unsigned int i810_poll(struct file *file, struct poll_table_struct *wait)
return 0;
poll_wait(file, &dmabuf->wait, wait);
spin_lock_irqsave(&state->card->lock, flags);
- i810_update_ptr(state);
- if (file->f_mode & FMODE_READ && dmabuf->enable & ADC_RUNNING) {
- if (dmabuf->count >= (signed)dmabuf->fragsize)
+ if (dmabuf->enable & ADC_RUNNING ||
+ dmabuf->trigger & PCM_ENABLE_INPUT) {
+ if (i810_get_available_read_data(state) >=
+ (signed)dmabuf->userfragsize)
mask |= POLLIN | POLLRDNORM;
}
- if (file->f_mode & FMODE_WRITE && dmabuf->enable & DAC_RUNNING) {
- if (dmabuf->mapped) {
- if (dmabuf->count >= (signed)dmabuf->fragsize)
- mask |= POLLOUT | POLLWRNORM;
- } else {
- if ((signed)dmabuf->dmasize >= dmabuf->count + (signed)dmabuf->fragsize)
- mask |= POLLOUT | POLLWRNORM;
- }
+ if (dmabuf->enable & DAC_RUNNING ||
+ dmabuf->trigger & PCM_ENABLE_OUTPUT) {
+ if (i810_get_free_write_space(state) >=
+ (signed)dmabuf->userfragsize)
+ mask |= POLLOUT | POLLWRNORM;
}
spin_unlock_irqrestore(&state->card->lock, flags);
-
return mask;
}
@@ -1559,16 +1669,13 @@ static int i810_mmap(struct file *file, struct vm_area_struct *vma)
if (size > (PAGE_SIZE << dmabuf->buforder))
goto out;
ret = -EAGAIN;
- if (remap_page_range(vma, vma->vm_start, virt_to_phys(dmabuf->rawbuf),
+ if (remap_page_range(vma->vm_start, virt_to_phys(dmabuf->rawbuf),
size, vma->vm_page_prot))
goto out;
dmabuf->mapped = 1;
- if(vma->vm_flags & VM_WRITE)
- dmabuf->count = dmabuf->dmasize;
- else
- dmabuf->count = 0;
+ dmabuf->trigger = 0;
ret = 0;
-#ifdef DEBUG
+#ifdef DEBUG_MMAP
printk("i810_audio: mmap'ed %ld bytes of data space\n", size);
#endif
out:
@@ -1579,16 +1686,15 @@ out:
static int i810_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
{
struct i810_state *state = (struct i810_state *)file->private_data;
+ struct i810_channel *c = NULL;
struct dmabuf *dmabuf = &state->dmabuf;
unsigned long flags;
audio_buf_info abinfo;
count_info cinfo;
unsigned int i_glob_cnt;
- int val = 0, mapped, ret;
+ int val = 0, ret;
struct ac97_codec *codec = state->card->ac97_codec[0];
- mapped = ((file->f_mode & FMODE_WRITE) && dmabuf->mapped) ||
- ((file->f_mode & FMODE_READ) && dmabuf->mapped);
#ifdef DEBUG
printk("i810_audio: i810_ioctl, arg=0x%x, cmd=", arg ? *(int *)arg : 0);
#endif
@@ -1605,13 +1711,23 @@ static int i810_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
#ifdef DEBUG
printk("SNDCTL_DSP_RESET\n");
#endif
- /* FIXME: spin_lock ? */
+ spin_lock_irqsave(&state->card->lock, flags);
if (dmabuf->enable == DAC_RUNNING) {
- stop_dac(state);
+ c = dmabuf->write_channel;
+ __stop_dac(state);
}
if (dmabuf->enable == ADC_RUNNING) {
- stop_adc(state);
+ c = dmabuf->read_channel;
+ __stop_adc(state);
}
+ if (c != NULL) {
+ outb(2, state->card->iobase+c->port+OFF_CR); /* reset DMA machine */
+ outl(virt_to_bus(&c->sg[0]), state->card->iobase+c->port+OFF_BDBAR);
+ outb(0, state->card->iobase+c->port+OFF_CIV);
+ outb(0, state->card->iobase+c->port+OFF_LVI);
+ }
+
+ spin_unlock_irqrestore(&state->card->lock, flags);
synchronize_irq();
dmabuf->ready = 0;
dmabuf->swptr = dmabuf->hwptr = 0;
@@ -1624,10 +1740,9 @@ static int i810_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
#endif
if (dmabuf->enable != DAC_RUNNING || file->f_flags & O_NONBLOCK)
return 0;
- drain_dac(state, 0);
- dmabuf->ready = 0;
- dmabuf->swptr = dmabuf->hwptr = 0;
- dmabuf->count = dmabuf->total_bytes = 0;
+ if((val = drain_dac(state, 1)))
+ return val;
+ dmabuf->total_bytes = 0;
return 0;
case SNDCTL_DSP_SPEED: /* set smaple rate */
@@ -1678,9 +1793,6 @@ static int i810_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
#ifdef DEBUG
printk("SNDCTL_DSP_STEREO\n");
#endif
- if (get_user(val, (int *)arg))
- return -EFAULT;
-
if (dmabuf->enable & DAC_RUNNING) {
stop_dac(state);
}
@@ -1713,18 +1825,7 @@ static int i810_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
#ifdef DEBUG
printk("SNDCTL_DSP_SETFMT\n");
#endif
- if (get_user(val, (int *)arg))
- return -EFAULT;
-
- switch ( val ) {
- case AFMT_S16_LE:
- break;
- case AFMT_QUERY:
- default:
- val = AFMT_S16_LE;
- break;
- }
- return put_user(val, (int *)arg);
+ return put_user(AFMT_S16_LE, (int *)arg);
case SNDCTL_DSP_CHANNELS:
#ifdef DEBUG
@@ -1824,22 +1925,47 @@ static int i810_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
dmabuf->ossfragsize = 1<<(val & 0xffff);
dmabuf->ossmaxfrags = (val >> 16) & 0xffff;
- if (dmabuf->ossmaxfrags <= 4)
- dmabuf->ossmaxfrags = 4;
- else if (dmabuf->ossmaxfrags <= 8)
- dmabuf->ossmaxfrags = 8;
- else if (dmabuf->ossmaxfrags <= 16)
- dmabuf->ossmaxfrags = 16;
- else
- dmabuf->ossmaxfrags = 32;
+ if (!dmabuf->ossfragsize || !dmabuf->ossmaxfrags)
+ return -EINVAL;
+ /*
+ * Bound the frag size into our allowed range of 256 - 4096
+ */
+ if (dmabuf->ossfragsize < 256)
+ dmabuf->ossfragsize = 256;
+ else if (dmabuf->ossfragsize > 4096)
+ dmabuf->ossfragsize = 4096;
+ /*
+ * The numfrags could be something reasonable, or it could
+ * be 0xffff meaning "Give me as much as possible". So,
+ * we check the numfrags * fragsize doesn't exceed our
+ * 64k buffer limit, nor is it less than our 8k minimum.
+ * If it fails either one of these checks, then adjust the
+ * number of fragments, not the size of them. It's OK if
+ * our number of fragments doesn't equal 32 or anything
+ * like our hardware based number now since we are using
+ * a different frag count for the hardware. Before we get
+ * into this though, bound the maxfrags to avoid overflow
+ * issues. A reasonable bound would be 64k / 256 since our
+ * maximum buffer size is 64k and our minimum frag size is
+ * 256. On the other end, our minimum buffer size is 8k and
+ * our maximum frag size is 4k, so the lower bound should
+ * be 2.
+ */
+
+ if(dmabuf->ossmaxfrags > 256)
+ dmabuf->ossmaxfrags = 256;
+ else if (dmabuf->ossmaxfrags < 2)
+ dmabuf->ossmaxfrags = 2;
+
val = dmabuf->ossfragsize * dmabuf->ossmaxfrags;
- if (val < 16384)
- val = 16384;
- if (val > 65536)
- val = 65536;
- dmabuf->ossmaxfrags = val/dmabuf->ossfragsize;
- if(dmabuf->ossmaxfrags<4)
- dmabuf->ossfragsize = val/4;
+ while (val < 8192) {
+ val <<= 1;
+ dmabuf->ossmaxfrags <<= 1;
+ }
+ while (val > 65536) {
+ val >>= 1;
+ dmabuf->ossmaxfrags >>= 1;
+ }
dmabuf->ready = 0;
#ifdef DEBUG
printk("SNDCTL_DSP_SETFRAGMENT 0x%x, %d, %d\n", val,
@@ -1857,13 +1983,13 @@ static int i810_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
i810_update_ptr(state);
abinfo.fragsize = dmabuf->userfragsize;
abinfo.fragstotal = dmabuf->userfrags;
- if(dmabuf->mapped)
- abinfo.bytes = dmabuf->count;
- else
- abinfo.bytes = dmabuf->dmasize - dmabuf->count;
+ if (dmabuf->mapped)
+ abinfo.bytes = dmabuf->dmasize;
+ else
+ abinfo.bytes = i810_get_free_write_space(state);
abinfo.fragments = abinfo.bytes / dmabuf->userfragsize;
spin_unlock_irqrestore(&state->card->lock, flags);
-#ifdef DEBUG
+#if defined(DEBUG) || defined(DEBUG_MMAP)
printk("SNDCTL_DSP_GETOSPACE %d, %d, %d, %d\n", abinfo.bytes,
abinfo.fragsize, abinfo.fragments, abinfo.fragstotal);
#endif
@@ -1875,17 +2001,17 @@ static int i810_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
if (!dmabuf->ready && (val = prog_dmabuf(state, 0)) != 0)
return val;
spin_lock_irqsave(&state->card->lock, flags);
- i810_update_ptr(state);
+ val = i810_get_free_write_space(state);
cinfo.bytes = dmabuf->total_bytes;
cinfo.ptr = dmabuf->hwptr;
- cinfo.blocks = (dmabuf->dmasize - dmabuf->count)/dmabuf->userfragsize;
- if (dmabuf->mapped) {
- dmabuf->count = (dmabuf->dmasize -
- (dmabuf->count & (dmabuf->userfragsize-1)));
+ cinfo.blocks = val/dmabuf->userfragsize;
+ if (dmabuf->mapped && (dmabuf->trigger & PCM_ENABLE_OUTPUT)) {
+ dmabuf->count += val;
+ dmabuf->swptr = (dmabuf->swptr + val) % dmabuf->dmasize;
__i810_update_lvi(state, 0);
}
spin_unlock_irqrestore(&state->card->lock, flags);
-#ifdef DEBUG
+#if defined(DEBUG) || defined(DEBUG_MMAP)
printk("SNDCTL_DSP_GETOPTR %d, %d, %d, %d\n", cinfo.bytes,
cinfo.blocks, cinfo.ptr, dmabuf->count);
#endif
@@ -1897,13 +2023,12 @@ static int i810_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
if (!dmabuf->ready && (val = prog_dmabuf(state, 1)) != 0)
return val;
spin_lock_irqsave(&state->card->lock, flags);
- i810_update_ptr(state);
+ abinfo.bytes = i810_get_available_read_data(state);
abinfo.fragsize = dmabuf->userfragsize;
abinfo.fragstotal = dmabuf->userfrags;
- abinfo.bytes = dmabuf->dmasize - dmabuf->count;
abinfo.fragments = abinfo.bytes / dmabuf->userfragsize;
spin_unlock_irqrestore(&state->card->lock, flags);
-#ifdef DEBUG
+#if defined(DEBUG) || defined(DEBUG_MMAP)
printk("SNDCTL_DSP_GETISPACE %d, %d, %d, %d\n", abinfo.bytes,
abinfo.fragsize, abinfo.fragments, abinfo.fragstotal);
#endif
@@ -1915,16 +2040,17 @@ static int i810_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
if (!dmabuf->ready && (val = prog_dmabuf(state, 0)) != 0)
return val;
spin_lock_irqsave(&state->card->lock, flags);
- i810_update_ptr(state);
+ val = i810_get_available_read_data(state);
cinfo.bytes = dmabuf->total_bytes;
- cinfo.blocks = dmabuf->count/dmabuf->userfragsize;
+ cinfo.blocks = val/dmabuf->userfragsize;
cinfo.ptr = dmabuf->hwptr;
- if (dmabuf->mapped) {
- dmabuf->count &= (dmabuf->userfragsize-1);
+ if (dmabuf->mapped && (dmabuf->trigger & PCM_ENABLE_INPUT)) {
+ dmabuf->count -= val;
+ dmabuf->swptr = (dmabuf->swptr + val) % dmabuf->dmasize;
__i810_update_lvi(state, 1);
}
spin_unlock_irqrestore(&state->card->lock, flags);
-#ifdef DEBUG
+#if defined(DEBUG) || defined(DEBUG_MMAP)
printk("SNDCTL_DSP_GETIPTR %d, %d, %d, %d\n", cinfo.bytes,
cinfo.blocks, cinfo.ptr, dmabuf->count);
#endif
@@ -1954,7 +2080,7 @@ static int i810_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
case SNDCTL_DSP_SETTRIGGER:
if (get_user(val, (int *)arg))
return -EFAULT;
-#ifdef DEBUG
+#if defined(DEBUG) || defined(DEBUG_MMAP)
printk("SNDCTL_DSP_SETTRIGGER 0x%x\n", val);
#endif
if( !(val & PCM_ENABLE_INPUT) && dmabuf->enable == ADC_RUNNING) {
@@ -1964,7 +2090,7 @@ static int i810_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
stop_dac(state);
}
dmabuf->trigger = val;
- if(val & PCM_ENABLE_OUTPUT) {
+ if(val & PCM_ENABLE_OUTPUT && !(dmabuf->enable & DAC_RUNNING)) {
if (!dmabuf->write_channel) {
dmabuf->ready = 0;
dmabuf->write_channel = state->card->alloc_pcm_channel(state->card);
@@ -1974,13 +2100,18 @@ static int i810_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
if (!dmabuf->ready && (ret = prog_dmabuf(state, 0)))
return ret;
if (dmabuf->mapped) {
- dmabuf->count = dmabuf->dmasize;
- i810_update_lvi(state,0);
- }
- if (!dmabuf->enable && dmabuf->count > dmabuf->userfragsize)
+ spin_lock_irqsave(&state->card->lock, flags);
+ i810_update_ptr(state);
+ dmabuf->count = 0;
+ dmabuf->swptr = dmabuf->hwptr;
+ dmabuf->count = i810_get_free_write_space(state);
+ dmabuf->swptr = (dmabuf->swptr + dmabuf->count) % dmabuf->dmasize;
+ __i810_update_lvi(state, 0);
+ spin_unlock_irqrestore(&state->card->lock, flags);
+ } else
start_dac(state);
}
- if(val & PCM_ENABLE_INPUT) {
+ if(val & PCM_ENABLE_INPUT && !(dmabuf->enable & ADC_RUNNING)) {
if (!dmabuf->read_channel) {
dmabuf->ready = 0;
dmabuf->read_channel = state->card->alloc_rec_pcm_channel(state->card);
@@ -1990,12 +2121,14 @@ static int i810_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
if (!dmabuf->ready && (ret = prog_dmabuf(state, 1)))
return ret;
if (dmabuf->mapped) {
+ spin_lock_irqsave(&state->card->lock, flags);
+ i810_update_ptr(state);
+ dmabuf->swptr = dmabuf->hwptr;
dmabuf->count = 0;
- i810_update_lvi(state,1);
+ spin_unlock_irqrestore(&state->card->lock, flags);
}
- if (!dmabuf->enable && dmabuf->count <
- (dmabuf->dmasize - dmabuf->userfragsize))
- start_adc(state);
+ i810_update_lvi(state, 1);
+ start_adc(state);
}
return 0;
@@ -2199,7 +2332,19 @@ static int i810_open(struct inode *inode, struct file *file)
/* find an avaiable virtual channel (instance of /dev/dsp) */
while (card != NULL) {
- for (i = 0; i < NR_HW_CH; i++) {
+ /*
+ * If we are initializing and then fail, card could go
+ * away unuexpectedly while we are in the for() loop.
+ * So, check for card on each iteration before we check
+ * for card->initializing to avoid a possible oops.
+ * This usually only matters for times when the driver is
+ * autoloaded by kmod.
+ */
+ for (i = 0; i < 50 && card && card->initializing; i++) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ/20);
+ }
+ for (i = 0; i < NR_HW_CH && card && !card->initializing; i++) {
if (card->states[i] == NULL) {
state = card->states[i] = (struct i810_state *)
kmalloc(sizeof(struct i810_state), GFP_KERNEL);
@@ -2233,8 +2378,8 @@ found_virt:
card->states[i] = NULL;;
return -EBUSY;
}
- i810_set_adc_rate(state, 8000);
dmabuf->trigger |= PCM_ENABLE_INPUT;
+ i810_set_adc_rate(state, 8000);
}
if(file->f_mode & FMODE_WRITE) {
if((dmabuf->write_channel = card->alloc_pcm_channel(card)) == NULL) {
@@ -2245,13 +2390,13 @@ found_virt:
/* Initialize to 8kHz? What if we don't support 8kHz? */
/* Let's change this to check for S/PDIF stuff */
+ dmabuf->trigger |= PCM_ENABLE_OUTPUT;
if ( spdif_locked ) {
i810_set_dac_rate(state, spdif_locked);
i810_set_spdif_output(state, AC97_EA_SPSA_3_4, spdif_locked);
} else {
i810_set_dac_rate(state, 8000);
}
- dmabuf->trigger |= PCM_ENABLE_OUTPUT;
}
/* set default sample format. According to OSS Programmer's Guide /dev/dsp
@@ -2278,11 +2423,10 @@ static int i810_release(struct inode *inode, struct file *file)
lock_kernel();
/* stop DMA state machine and free DMA buffers/channels */
- if(dmabuf->enable & DAC_RUNNING ||
- (dmabuf->count && (dmabuf->trigger & PCM_ENABLE_OUTPUT))) {
- drain_dac(state,0);
+ if(dmabuf->trigger & PCM_ENABLE_OUTPUT) {
+ drain_dac(state, 0);
}
- if(dmabuf->enable & ADC_RUNNING) {
+ if(dmabuf->trigger & PCM_ENABLE_INPUT) {
stop_adc(state);
}
spin_lock_irqsave(&card->lock, flags);
@@ -2348,13 +2492,26 @@ static int i810_open_mixdev(struct inode *inode, struct file *file)
unsigned int minor = minor(inode->i_rdev);
struct i810_card *card = devs;
- for (card = devs; card != NULL; card = card->next)
- for (i = 0; i < NR_AC97; i++)
+ for (card = devs; card != NULL; card = card->next) {
+ /*
+ * If we are initializing and then fail, card could go
+ * away unuexpectedly while we are in the for() loop.
+ * So, check for card on each iteration before we check
+ * for card->initializing to avoid a possible oops.
+ * This usually only matters for times when the driver is
+ * autoloaded by kmod.
+ */
+ for (i = 0; i < 50 && card && card->initializing; i++) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ/20);
+ }
+ for (i = 0; i < NR_AC97 && card && !card->initializing; i++)
if (card->ac97_codec[i] != NULL &&
card->ac97_codec[i]->dev_mixer == minor) {
file->private_data = card->ac97_codec[i];
return 0;
}
+ }
return -ENODEV;
}
@@ -2700,6 +2857,7 @@ static int __init i810_probe(struct pci_dev *pci_dev, const struct pci_device_id
}
memset(card, 0, sizeof(*card));
+ card->initializing = 1;
card->iobase = pci_resource_start (pci_dev, 1);
card->ac97base = pci_resource_start (pci_dev, 0);
card->pci_dev = pci_dev;
@@ -2756,7 +2914,8 @@ static int __init i810_probe(struct pci_dev *pci_dev, const struct pci_device_id
}
pci_set_drvdata(pci_dev, card);
- if(clocking == 48000) {
+ if(clocking == 0) {
+ clocking = 48000;
i810_configure_clocking();
}
@@ -2775,7 +2934,7 @@ static int __init i810_probe(struct pci_dev *pci_dev, const struct pci_device_id
kfree(card);
return -ENODEV;
}
-
+ card->initializing = 0;
return 0;
}
@@ -2793,6 +2952,7 @@ static void __exit i810_remove(struct pci_dev *pci_dev)
if (card->ac97_codec[i] != NULL) {
unregister_sound_mixer(card->ac97_codec[i]->dev_mixer);
kfree (card->ac97_codec[i]);
+ card->ac97_codec[i] = NULL;
}
unregister_sound_dsp(card->dev_audio);
kfree(card);
@@ -2961,14 +3121,11 @@ static int __init i810_init_module (void)
if(ftsodell != 0) {
printk("i810_audio: ftsodell is now a deprecated option.\n");
}
- if(clocking == 48000) {
- i810_configure_clocking();
- }
if(spdif_locked > 0 ) {
if(spdif_locked == 32000 || spdif_locked == 44100 || spdif_locked == 48000) {
printk("i810_audio: Enabling S/PDIF at sample rate %dHz.\n", spdif_locked);
} else {
- printk("i810_audio: S/PDIF can only be locked to 32000, 441000, or 48000Hz.\n");
+ printk("i810_audio: S/PDIF can only be locked to 32000, 44100, or 48000Hz.\n");
spdif_locked = 0;
}
}
diff --git a/drivers/usb/hcd.c b/drivers/usb/hcd.c
index e8e3e840ae90..f6095f2f8c48 100644
--- a/drivers/usb/hcd.c
+++ b/drivers/usb/hcd.c
@@ -996,6 +996,9 @@ static int hcd_submit_urb (struct urb *urb)
// hcd_monitor_hook(MONITOR_URB_SUBMIT, urb)
// It would catch submission paths for all urbs.
+ /* increment the reference count of the urb, as we now also control it. */
+ urb = usb_get_urb(urb);
+
/*
* Atomically queue the urb, first to our records, then to the HCD.
* Access to urb->status is controlled by urb->lock ... changes on
@@ -1328,5 +1331,6 @@ void usb_hcd_giveback_urb (struct usb_hcd *hcd, struct urb *urb)
/* pass ownership to the completion handler */
usb_dec_dev_use (dev);
urb->complete (urb);
+ usb_put_urb (urb);
}
EXPORT_SYMBOL (usb_hcd_giveback_urb);
diff --git a/drivers/usb/ov511.c b/drivers/usb/ov511.c
index 5a61edd2cbdf..c84a6691aec8 100644
--- a/drivers/usb/ov511.c
+++ b/drivers/usb/ov511.c
@@ -57,7 +57,7 @@
/*
* Version Information
*/
-#define DRIVER_VERSION "v1.48 for Linux 2.4"
+#define DRIVER_VERSION "v1.48a for Linux 2.4"
#define EMAIL "mmcclell@bigfoot.com"
#define DRIVER_AUTHOR "Mark McClelland <mmcclell@bigfoot.com> & Bret Wallach \
& Orion Sky Lawlor <olawlor@acm.org> & Kevin Moore & Charl P. Botha \
@@ -492,6 +492,10 @@ rvfree(void *mem, unsigned long size)
static struct proc_dir_entry *ov511_proc_entry = NULL;
extern struct proc_dir_entry *video_proc_entry;
+static struct file_operations ov511_control_fops = {
+ ioctl: ov511_control_ioctl,
+};
+
#define YES_NO(x) ((x) ? "yes" : "no")
/* /proc/video/ov511/<minor#>/info */
@@ -673,8 +677,8 @@ create_proc_ov511_cam(struct usb_ov511 *ov511)
unlock_kernel();
return;
}
- ov511->proc_control->proc_fops->ioctl = ov511_control_ioctl;
ov511->proc_control->data = ov511;
+ ov511->proc_control->proc_fops = &ov511_control_fops;
unlock_kernel();
}
@@ -6893,14 +6897,14 @@ ov51x_disconnect(struct usb_device *dev, void *ptr)
}
}
- usb_driver_release_interface(&ov511_driver,
- &ov511->dev->actconfig->interface[ov511->iface]);
- ov511->dev = NULL;
-
#if defined(CONFIG_PROC_FS) && defined(CONFIG_VIDEO_PROC_FS)
destroy_proc_ov511_cam(ov511);
#endif
+ usb_driver_release_interface(&ov511_driver,
+ &ov511->dev->actconfig->interface[ov511->iface]);
+ ov511->dev = NULL;
+
/* Free the memory */
if (ov511 && !ov511->user) {
ov511_dealloc(ov511, 1);
diff --git a/drivers/usb/usb.c b/drivers/usb/usb.c
index 8464a085a567..a6dfc4e70758 100644
--- a/drivers/usb/usb.c
+++ b/drivers/usb/usb.c
@@ -1074,16 +1074,18 @@ void usb_inc_dev_use(struct usb_device *dev)
* ----------------------------------------------------------------------*/
/**
- * usb_alloc_urb - creates a new urb for a USB driver to use
- * @iso_packets: number of iso packets for this urb
+ * usb_alloc_urb - creates a new urb for a USB driver to use
+ * @iso_packets: number of iso packets for this urb
*
- * Creates an urb for the USB driver to use and returns a pointer to it.
- * If no memory is available, NULL is returned.
+ * Creates an urb for the USB driver to use, initializes a few internal
+ * structures, incrementes the usage counter, and returns a pointer to it.
+ *
+ * If no memory is available, NULL is returned.
*
- * If the driver want to use this urb for interrupt, control, or bulk
- * endpoints, pass '0' as the number of iso packets.
+ * If the driver want to use this urb for interrupt, control, or bulk
+ * endpoints, pass '0' as the number of iso packets.
*
- * The driver should call usb_free_urb() when it is finished with the urb.
+ * The driver must call usb_free_urb() when it is finished with the urb.
*/
struct urb *usb_alloc_urb(int iso_packets)
{
@@ -1098,25 +1100,49 @@ struct urb *usb_alloc_urb(int iso_packets)
}
memset(urb, 0, sizeof(*urb));
-
+ atomic_inc(&urb->count);
spin_lock_init(&urb->lock);
return urb;
}
/**
- * usb_free_urb - frees the memory used by a urb
- * @urb: pointer to the urb to free
+ * usb_free_urb - frees the memory used by a urb when all users of it are finished
+ * @urb: pointer to the urb to free
+ *
+ * Must be called when a user of a urb is finished with it. When the last user
+ * of the urb calls this function, the memory of the urb is freed.
*
- * If an urb is created with a call to usb_create_urb() it should be
- * cleaned up with a call to usb_free_urb() when the driver is finished
- * with it.
+ * Note: The transfer buffer associated with the urb is not freed, that must be
+ * done elsewhere.
*/
void usb_free_urb(struct urb *urb)
{
if (urb)
- kfree(urb);
+ if (atomic_dec_and_test(&urb->count))
+ kfree(urb);
}
+
+/**
+ * usb_get_urb - incrementes the reference count of the urb
+ * @urb: pointer to the urb to modify
+ *
+ * This must be called whenever a urb is transfered from a device driver to a
+ * host controller driver. This allows proper reference counting to happen
+ * for urbs.
+ *
+ * A pointer to the urb with the incremented reference counter is returned.
+ */
+struct urb * usb_get_urb(struct urb *urb)
+{
+ if (urb) {
+ atomic_inc(&urb->count);
+ return urb;
+ } else
+ return NULL;
+}
+
+
/*-------------------------------------------------------------------*/
/**
@@ -1129,7 +1155,7 @@ void usb_free_urb(struct urb *urb)
* This call may be issued in interrupt context.
*
* The caller must have correctly initialized the URB before submitting
- * it. Macros such as FILL_BULK_URB() and FILL_CONTROL_URB() are
+ * it. Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are
* available to ensure that most fields are correctly initialized, for
* the particular kind of transfer, although they will not initialize
* any transfer flags.
@@ -2794,6 +2820,7 @@ EXPORT_SYMBOL(usb_get_current_frame_number);
// asynchronous request completion model
EXPORT_SYMBOL(usb_alloc_urb);
EXPORT_SYMBOL(usb_free_urb);
+EXPORT_SYMBOL(usb_get_urb);
EXPORT_SYMBOL(usb_submit_urb);
EXPORT_SYMBOL(usb_unlink_urb);
diff --git a/fs/Config.in b/fs/Config.in
index e39a91ca1b60..23796bac7bc2 100644
--- a/fs/Config.in
+++ b/fs/Config.in
@@ -141,23 +141,11 @@ else
define_bool CONFIG_SMB_FS n
fi
-#
-# Do we need the compression support?
-#
if [ "$CONFIG_ZISOFS" = "y" ]; then
define_tristate CONFIG_ZISOFS_FS $CONFIG_ISO9660_FS
else
define_tristate CONFIG_ZISOFS_FS n
fi
-if [ "$CONFIG_CRAMFS" = "y" -o "$CONFIG_ZISOFS_FS" = "y" ]; then
- define_tristate CONFIG_ZLIB_FS_INFLATE y
-else
- if [ "$CONFIG_CRAMFS" = "m" -o "$CONFIG_ZISOFS_FS" = "m" ]; then
- define_tristate CONFIG_ZLIB_FS_INFLATE m
- else
- define_tristate CONFIG_ZLIB_FS_INFLATE n
- fi
-fi
mainmenu_option next_comment
comment 'Partition Types'
diff --git a/fs/Makefile b/fs/Makefile
index 23cf3614a6ba..23b922a09781 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -14,7 +14,7 @@ obj-y := open.o read_write.o devices.o file_table.o buffer.o \
bio.o super.o block_dev.o char_dev.o stat.o exec.o pipe.o \
namei.o fcntl.o ioctl.o readdir.o select.o fifo.o locks.o \
dcache.o inode.o attr.o bad_inode.o file.o iobuf.o dnotify.o \
- filesystems.o namespace.o seq_file.o
+ filesystems.o namespace.o seq_file.o xattr.o
ifeq ($(CONFIG_QUOTA),y)
obj-y += dquot.o
@@ -30,7 +30,6 @@ subdir-y += driverfs
subdir-$(CONFIG_EXT3_FS) += ext3 # Before ext2 so root fs can be ext3
subdir-$(CONFIG_JBD) += jbd
subdir-$(CONFIG_EXT2_FS) += ext2
-subdir-$(CONFIG_ZLIB_FS_INFLATE) += inflate_fs
subdir-$(CONFIG_CRAMFS) += cramfs
subdir-$(CONFIG_RAMFS) += ramfs
subdir-$(CONFIG_CODA_FS) += coda
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 0d848e9238e4..ef9fb9f89aa2 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -170,6 +170,8 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
loff_t size = file->f_dentry->d_inode->i_bdev->bd_inode->i_size;
loff_t retval;
+ lock_kernel();
+
switch (origin) {
case 2:
offset += size;
@@ -186,6 +188,7 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
}
retval = offset;
}
+ unlock_kernel();
return retval;
}
diff --git a/fs/buffer.c b/fs/buffer.c
index b3d7fdc72773..866a5a41f905 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1679,6 +1679,52 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
return 0;
}
+/* utility function for filesystems that need to do work on expanding
+ * truncates. Uses prepare/commit_write to allow the filesystem to
+ * deal with the hole.
+ */
+int generic_cont_expand(struct inode *inode, loff_t size)
+{
+ struct address_space *mapping = inode->i_mapping;
+ struct page *page;
+ unsigned long index, offset, limit;
+ int err;
+
+ err = -EFBIG;
+ limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
+ if (limit != RLIM_INFINITY && size > (loff_t)limit) {
+ send_sig(SIGXFSZ, current, 0);
+ goto out;
+ }
+ if (size > inode->i_sb->s_maxbytes)
+ goto out;
+
+ offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
+
+ /* ugh. in prepare/commit_write, if from==to==start of block, we
+ ** skip the prepare. make sure we never send an offset for the start
+ ** of a block
+ */
+ if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
+ offset++;
+ }
+ index = size >> PAGE_CACHE_SHIFT;
+ err = -ENOMEM;
+ page = grab_cache_page(mapping, index);
+ if (!page)
+ goto out;
+ err = mapping->a_ops->prepare_write(NULL, page, offset, offset);
+ if (!err) {
+ err = mapping->a_ops->commit_write(NULL, page, offset, offset);
+ }
+ UnlockPage(page);
+ page_cache_release(page);
+ if (err > 0)
+ err = 0;
+out:
+ return err;
+}
+
/*
* For moronic filesystems that do not allow holes in file.
* We may have to extend the file.
diff --git a/fs/cramfs/Makefile b/fs/cramfs/Makefile
index dc8352324da0..112c87bec11c 100644
--- a/fs/cramfs/Makefile
+++ b/fs/cramfs/Makefile
@@ -8,6 +8,4 @@ obj-y := inode.o uncompress.o
obj-m := $(O_TARGET)
-CFLAGS_uncompress.o := -I $(TOPDIR)/fs/inflate_fs
-
include $(TOPDIR)/Rules.make
diff --git a/fs/devfs/base.c b/fs/devfs/base.c
index e7b2eec05fe2..89a698a63ea6 100644
--- a/fs/devfs/base.c
+++ b/fs/devfs/base.c
@@ -608,6 +608,10 @@
Fixed deadlock bug in <devfs_d_revalidate_wait>.
Tag VFS deletable in <devfs_mk_symlink> if handle ignored.
v1.10
+ 20020129 Richard Gooch <rgooch@atnf.csiro.au>
+ Added KERN_* to remaining messages.
+ Cleaned up declaration of <stat_read>.
+ v1.11
*/
#include <linux/types.h>
#include <linux/errno.h>
@@ -640,7 +644,7 @@
#include <asm/bitops.h>
#include <asm/atomic.h>
-#define DEVFS_VERSION "1.10 (20020120)"
+#define DEVFS_VERSION "1.11 (20020129)"
#define DEVFS_NAME "devfs"
@@ -848,8 +852,8 @@ static int devfsd_ioctl (struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg);
static int devfsd_close (struct inode *inode, struct file *file);
#ifdef CONFIG_DEVFS_DEBUG
-static int stat_read (struct file *file, char *buf, size_t len,
- loff_t *ppos);
+static ssize_t stat_read (struct file *file, char *buf, size_t len,
+ loff_t *ppos);
static struct file_operations stat_fops =
{
read: stat_read,
@@ -2431,10 +2435,10 @@ static int check_disc_changed (struct devfs_entry *de)
if (bdops->check_media_change == NULL) goto out;
if ( !bdops->check_media_change (dev) ) goto out;
retval = 1;
- printk ( KERN_DEBUG "VFS: Disk change detected on device %s\n",
+ printk (KERN_DEBUG "VFS: Disk change detected on device %s\n",
kdevname (dev) );
- if (invalidate_device(dev, 0))
- printk("VFS: busy inodes on changed media..\n");
+ if ( invalidate_device (dev, 0) )
+ printk (KERN_WARNING "VFS: busy inodes on changed media..\n");
/* Ugly hack to disable messages about unable to read partition table */
tmp = warn_no_part;
warn_no_part = 0;
@@ -3237,7 +3241,7 @@ static struct super_block *devfs_read_super (struct super_block *sb,
return sb;
out_no_root:
- printk ("devfs_read_super: get root inode failed\n");
+ PRINTK ("(): get root inode failed\n");
if (root_inode) iput (root_inode);
return NULL;
} /* End Function devfs_read_super */
@@ -3464,7 +3468,7 @@ static int __init init_devfs_fs (void)
{
int err;
- printk ("%s: v%s Richard Gooch (rgooch@atnf.csiro.au)\n",
+ printk (KERN_INFO "%s: v%s Richard Gooch (rgooch@atnf.csiro.au)\n",
DEVFS_NAME, DEVFS_VERSION);
devfsd_buf_cache = kmem_cache_create ("devfsd_event",
sizeof (struct devfsd_buf_entry),
@@ -3472,9 +3476,9 @@ static int __init init_devfs_fs (void)
if (!devfsd_buf_cache) OOPS ("(): unable to allocate event slab\n");
#ifdef CONFIG_DEVFS_DEBUG
devfs_debug = devfs_debug_init;
- printk ("%s: devfs_debug: 0x%0x\n", DEVFS_NAME, devfs_debug);
+ printk (KERN_INFO "%s: devfs_debug: 0x%0x\n", DEVFS_NAME, devfs_debug);
#endif
- printk ("%s: boot_options: 0x%0x\n", DEVFS_NAME, boot_options);
+ printk (KERN_INFO "%s: boot_options: 0x%0x\n", DEVFS_NAME, boot_options);
err = register_filesystem (&devfs_fs_type);
if (!err)
{
@@ -3491,8 +3495,8 @@ void __init mount_devfs_fs (void)
if ( !(boot_options & OPTION_MOUNT) ) return;
err = do_mount ("none", "/dev", "devfs", 0, "");
- if (err == 0) printk ("Mounted devfs on /dev\n");
- else printk ("Warning: unable to mount devfs, err: %d\n", err);
+ if (err == 0) printk (KERN_INFO "Mounted devfs on /dev\n");
+ else PRINTK ("(): unable to mount devfs, err: %d\n", err);
} /* End Function mount_devfs_fs */
module_init(init_devfs_fs)
diff --git a/fs/driverfs/inode.c b/fs/driverfs/inode.c
index 3f3031de17a5..802d5f8597f9 100644
--- a/fs/driverfs/inode.c
+++ b/fs/driverfs/inode.c
@@ -294,6 +294,7 @@ driverfs_write_file(struct file *file, const char *buf, size_t count, loff_t *pp
struct driver_file_entry * entry;
struct device * dev;
ssize_t retval = 0;
+ char * page;
entry = (struct driver_file_entry *)file->private_data;
if (!entry) {
@@ -305,10 +306,20 @@ driverfs_write_file(struct file *file, const char *buf, size_t count, loff_t *pp
dev = list_entry(entry->parent,struct device, dir);
+ page = (char *)__get_free_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ if (count >= PAGE_SIZE)
+ count = PAGE_SIZE - 1;
+ if (copy_from_user(page,buf,count))
+ goto done;
+ *(page + count) = '\0';
+
while (count > 0) {
ssize_t len;
- len = entry->store(dev,buf,count,*ppos);
+ len = entry->store(dev,page + retval,count,*ppos);
if (len <= 0) {
if (len < 0)
@@ -320,6 +331,8 @@ driverfs_write_file(struct file *file, const char *buf, size_t count, loff_t *pp
*ppos += len;
buf += len;
}
+ done:
+ free_page((unsigned long)page);
return retval;
}
@@ -361,7 +374,7 @@ static int driverfs_open_file(struct inode * inode, struct file * filp)
return 0;
}
-static int driverfs_flush(struct file * filp)
+static int driverfs_release(struct inode * inode, struct file * filp)
{
struct driver_file_entry * entry;
struct device * dev;
@@ -402,7 +415,7 @@ static struct file_operations driverfs_file_operations = {
llseek: driverfs_file_lseek,
mmap: generic_file_mmap,
open: driverfs_open_file,
- flush: driverfs_flush,
+ release: driverfs_release,
fsync: driverfs_sync_file,
};
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 30e309d0f284..20e8aea82fd2 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -499,6 +499,7 @@ repeat:
ei->i_dir_acl = 0;
ei->i_dtime = 0;
#ifdef EXT3_PREALLOCATE
+ ei->i_prealloc_block = 0;
ei->i_prealloc_count = 0;
#endif
ei->i_block_group = i;
diff --git a/fs/hfs/file_cap.c b/fs/hfs/file_cap.c
index 49aea8da2d2a..494d43249c44 100644
--- a/fs/hfs/file_cap.c
+++ b/fs/hfs/file_cap.c
@@ -91,6 +91,8 @@ static loff_t cap_info_llseek(struct file *file, loff_t offset, int origin)
{
long long retval;
+ lock_kernel();
+
switch (origin) {
case 2:
offset += file->f_dentry->d_inode->i_size;
@@ -106,6 +108,7 @@ static loff_t cap_info_llseek(struct file *file, loff_t offset, int origin)
}
retval = offset;
}
+ unlock_kernel();
return retval;
}
diff --git a/fs/hfs/file_hdr.c b/fs/hfs/file_hdr.c
index ba3c58a2fe87..2ac3182437a7 100644
--- a/fs/hfs/file_hdr.c
+++ b/fs/hfs/file_hdr.c
@@ -347,6 +347,8 @@ loff_t hdr_llseek(struct file *file, loff_t offset, int origin)
{
long long retval;
+ lock_kernel();
+
switch (origin) {
case 2:
offset += file->f_dentry->d_inode->i_size;
@@ -362,6 +364,7 @@ loff_t hdr_llseek(struct file *file, loff_t offset, int origin)
}
retval = offset;
}
+ unlock_kernel();
return retval;
}
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index e9c1706f1d7e..8943d6d15280 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -29,6 +29,9 @@ loff_t hpfs_dir_lseek(struct file *filp, loff_t off, int whence)
struct inode *i = filp->f_dentry->d_inode;
struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
struct super_block *s = i->i_sb;
+
+ lock_kernel();
+
/*printk("dir lseek\n");*/
if (new_off == 0 || new_off == 1 || new_off == 11 || new_off == 12 || new_off == 13) goto ok;
hpfs_lock_inode(i);
@@ -40,10 +43,12 @@ loff_t hpfs_dir_lseek(struct file *filp, loff_t off, int whence)
}
hpfs_unlock_inode(i);
ok:
+ unlock_kernel();
return filp->f_pos = new_off;
fail:
hpfs_unlock_inode(i);
/*printk("illegal lseek: %016llx\n", new_off);*/
+ unlock_kernel();
return -ESPIPE;
}
diff --git a/fs/isofs/Makefile b/fs/isofs/Makefile
index e35ceba0c6b5..a7d774396027 100644
--- a/fs/isofs/Makefile
+++ b/fs/isofs/Makefile
@@ -15,6 +15,4 @@ obj-$(CONFIG_ZISOFS) += compress.o
obj-m := $(O_TARGET)
-CFLAGS_compress.o := -I $(TOPDIR)/fs/inflate_fs
-
include $(TOPDIR)/Rules.make
diff --git a/fs/jffs2/Makefile b/fs/jffs2/Makefile
index 7cc0da6a7c0d..a69dbda59b04 100644
--- a/fs/jffs2/Makefile
+++ b/fs/jffs2/Makefile
@@ -11,7 +11,7 @@
COMPR_OBJS := compr.o compr_rubin.o compr_rtime.o pushpull.o \
- compr_zlib.o zlib.o
+ compr_zlib.o
JFFS2_OBJS := dir.o file.o ioctl.o nodelist.o malloc.o \
read.o nodemgmt.o readinode.o super.o write.o scan.o gc.o \
symlink.o build.o erase.o background.o
diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c
index d9b3cabda4e2..4c7df6b8c648 100644
--- a/fs/jffs2/compr_zlib.c
+++ b/fs/jffs2/compr_zlib.c
@@ -35,7 +35,11 @@
*
*/
+#ifdef __KERNEL__
+#include <linux/zlib.h>
+#else
#include "zlib.h"
+#endif
#ifdef __KERNEL__
#include <linux/kernel.h>
@@ -44,16 +48,6 @@
#include <linux/jffs2.h>
#include "nodelist.h"
-static void *zalloc(void *opaque, unsigned nr, unsigned size)
-{
- /* How much does it request? Should we use vmalloc? Or be dynamic? */
- return kmalloc(nr * size, GFP_KERNEL);
-}
-
-static void zfree(void *opaque, void *addr)
-{
- kfree(addr);
-}
#else
#define min(x,y) ((x)<(y)?(x):(y))
#ifndef D1
@@ -86,14 +80,18 @@ int zlib_compress(unsigned char *data_in, unsigned char *cpage_out,
return -1;
#ifdef __KERNEL__
- strm.zalloc = zalloc;
- strm.zfree = zfree;
+ strm.workspace = kmalloc(zlib_deflate_workspacesize(),
+ GFP_KERNEL);
+ if (strm.workspace == NULL) {
+ printk(KERN_WARNING "deflateInit alloc of workspace failed\n");
+ return -1;
+ }
#else
strm.zalloc = (void *)0;
strm.zfree = (void *)0;
#endif
- if (Z_OK != deflateInit(&strm, 3)) {
+ if (Z_OK != zlib_deflateInit(&strm, 3)) {
printk(KERN_WARNING "deflateInit failed\n");
return -1;
}
@@ -108,24 +106,24 @@ int zlib_compress(unsigned char *data_in, unsigned char *cpage_out,
strm.avail_in = min((unsigned)(*sourcelen-strm.total_in), strm.avail_out);
D1(printk(KERN_DEBUG "calling deflate with avail_in %d, avail_out %d\n",
strm.avail_in, strm.avail_out));
- ret = deflate(&strm, Z_PARTIAL_FLUSH);
+ ret = zlib_deflate(&strm, Z_PARTIAL_FLUSH);
D1(printk(KERN_DEBUG "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n",
strm.avail_in, strm.avail_out, strm.total_in, strm.total_out));
if (ret != Z_OK) {
D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret));
- deflateEnd(&strm);
- return -1;
+ goto out_err;
}
}
strm.avail_out += STREAM_END_SPACE;
strm.avail_in = 0;
- ret = deflate(&strm, Z_FINISH);
+ ret = zlib_deflate(&strm, Z_FINISH);
if (ret != Z_STREAM_END) {
D1(printk(KERN_DEBUG "final deflate returned %d\n", ret));
- deflateEnd(&strm);
- return -1;
+ goto out_err;
+
}
- deflateEnd(&strm);
+ zlib_deflateEnd(&strm);
+ kfree(strm.workspace);
D1(printk(KERN_DEBUG "zlib compressed %ld bytes into %ld\n", strm.total_in, strm.total_out));
@@ -136,6 +134,11 @@ int zlib_compress(unsigned char *data_in, unsigned char *cpage_out,
*dstlen = strm.total_out;
*sourcelen = strm.total_in;
return 0;
+
+ out_err:
+ zlib_deflateEnd(&strm);
+ kfree(strm.workspace);
+ return -1;
}
void zlib_decompress(unsigned char *data_in, unsigned char *cpage_out,
@@ -145,14 +148,18 @@ void zlib_decompress(unsigned char *data_in, unsigned char *cpage_out,
int ret;
#ifdef __KERNEL__
- strm.zalloc = zalloc;
- strm.zfree = zfree;
+ strm.workspace = kmalloc(zlib_inflate_workspacesize(),
+ GFP_KERNEL);
+ if (strm.workspace == NULL) {
+ printk(KERN_WARNING "inflateInit alloc of workspace failed\n");
+ return;
+ }
#else
strm.zalloc = (void *)0;
strm.zfree = (void *)0;
#endif
- if (Z_OK != inflateInit(&strm)) {
+ if (Z_OK != zlib_inflateInit(&strm)) {
printk(KERN_WARNING "inflateInit failed\n");
return;
}
@@ -164,10 +171,11 @@ void zlib_decompress(unsigned char *data_in, unsigned char *cpage_out,
strm.avail_out = destlen;
strm.total_out = 0;
- while((ret = inflate(&strm, Z_FINISH)) == Z_OK)
+ while((ret = zlib_inflate(&strm, Z_FINISH)) == Z_OK)
;
if (ret != Z_STREAM_END) {
printk(KERN_NOTICE "inflate returned %d\n", ret);
}
- inflateEnd(&strm);
+ zlib_inflateEnd(&strm);
+ kfree(strm.workspace);
}
diff --git a/fs/jffs2/zlib.c b/fs/jffs2/zlib.c
deleted file mode 100644
index 6595b3e41f9b..000000000000
--- a/fs/jffs2/zlib.c
+++ /dev/null
@@ -1,5371 +0,0 @@
-/*
- * This file is derived from various .h and .c files from the zlib-1.0.4
- * distribution by Jean-loup Gailly and Mark Adler, with some additions
- * by Paul Mackerras to aid in implementing Deflate compression and
- * decompression for PPP packets. See zlib.h for conditions of
- * distribution and use.
- *
- * Changes that have been made include:
- * - added Z_PACKET_FLUSH (see zlib.h for details)
- * - added inflateIncomp and deflateOutputPending
- * - allow strm->next_out to be NULL, meaning discard the output
- *
- * $Id: zlib.c,v 1.3 1997/12/23 10:47:42 paulus Exp $
- */
-
-/*
- * ==FILEVERSION 971210==
- *
- * This marker is used by the Linux installation script to determine
- * whether an up-to-date version of this file is already installed.
- */
-
-#define NO_DUMMY_DECL
-#define NO_ZCFUNCS
-#define MY_ZCALLOC
-
-#if defined(__FreeBSD__) && (defined(KERNEL) || defined(_KERNEL))
-#define inflate inflate_ppp /* FreeBSD already has an inflate :-( */
-#endif
-
-
-/* +++ zutil.h */
-/* zutil.h -- internal interface and configuration of the compression library
- * Copyright (C) 1995-1996 Jean-loup Gailly.
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-/* From: zutil.h,v 1.16 1996/07/24 13:41:13 me Exp $ */
-
-#ifndef _Z_UTIL_H
-#define _Z_UTIL_H
-
-#include "zlib.h"
-
-#if defined(KERNEL) || defined(_KERNEL)
-/* Assume this is a *BSD or SVR4 kernel */
-#include <sys/types.h>
-#include <sys/time.h>
-#include <sys/systm.h>
-# define HAVE_MEMCPY
-# define memcpy(d, s, n) bcopy((s), (d), (n))
-# define memset(d, v, n) bzero((d), (n))
-# define memcmp bcmp
-
-#else
-#if defined(__KERNEL__)
-/* Assume this is a Linux kernel */
-#include <linux/string.h>
-#define HAVE_MEMCPY
-
-#else /* not kernel */
-
-#if defined(MSDOS)||defined(VMS)||defined(CRAY)||defined(WIN32)||defined(RISCOS)
-# include <stddef.h>
-# include <errno.h>
-#else
- extern int errno;
-#endif
-#ifdef STDC
-# include <string.h>
-# include <stdlib.h>
-#endif
-#endif /* __KERNEL__ */
-#endif /* _KERNEL || KERNEL */
-
-#ifndef local
-# define local static
-#endif
-/* compile with -Dlocal if your debugger can't find static symbols */
-
-typedef unsigned char uch;
-typedef uch FAR uchf;
-typedef unsigned short ush;
-typedef ush FAR ushf;
-typedef unsigned long ulg;
-
-extern const char *z_errmsg[10]; /* indexed by 2-zlib_error */
-/* (size given to avoid silly warnings with Visual C++) */
-
-#define ERR_MSG(err) z_errmsg[Z_NEED_DICT-(err)]
-
-#define ERR_RETURN(strm,err) \
- return (strm->msg = (char*)ERR_MSG(err), (err))
-/* To be used only when the state is known to be valid */
-
- /* common constants */
-
-#ifndef DEF_WBITS
-# define DEF_WBITS MAX_WBITS
-#endif
-/* default windowBits for decompression. MAX_WBITS is for compression only */
-
-#if MAX_MEM_LEVEL >= 8
-# define DEF_MEM_LEVEL 8
-#else
-# define DEF_MEM_LEVEL MAX_MEM_LEVEL
-#endif
-/* default memLevel */
-
-#define STORED_BLOCK 0
-#define STATIC_TREES 1
-#define DYN_TREES 2
-/* The three kinds of block type */
-
-#define MIN_MATCH 3
-#define MAX_MATCH 258
-/* The minimum and maximum match lengths */
-
-#define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */
-
- /* target dependencies */
-
-#ifdef MSDOS
-# define OS_CODE 0x00
-# ifdef __TURBOC__
-# include <alloc.h>
-# else /* MSC or DJGPP */
-# include <malloc.h>
-# endif
-#endif
-
-#ifdef OS2
-# define OS_CODE 0x06
-#endif
-
-#ifdef WIN32 /* Window 95 & Windows NT */
-# define OS_CODE 0x0b
-#endif
-
-#if defined(VAXC) || defined(VMS)
-# define OS_CODE 0x02
-# define FOPEN(name, mode) \
- fopen((name), (mode), "mbc=60", "ctx=stm", "rfm=fix", "mrs=512")
-#endif
-
-#ifdef AMIGA
-# define OS_CODE 0x01
-#endif
-
-#if defined(ATARI) || defined(atarist)
-# define OS_CODE 0x05
-#endif
-
-#ifdef MACOS
-# define OS_CODE 0x07
-#endif
-
-#ifdef __50SERIES /* Prime/PRIMOS */
-# define OS_CODE 0x0F
-#endif
-
-#ifdef TOPS20
-# define OS_CODE 0x0a
-#endif
-
-#if defined(_BEOS_) || defined(RISCOS)
-# define fdopen(fd,mode) NULL /* No fdopen() */
-#endif
-
- /* Common defaults */
-
-#ifndef OS_CODE
-# define OS_CODE 0x03 /* assume Unix */
-#endif
-
-#ifndef FOPEN
-# define FOPEN(name, mode) fopen((name), (mode))
-#endif
-
- /* functions */
-
-#ifdef HAVE_STRERROR
- extern char *strerror OF((int));
-# define zstrerror(errnum) strerror(errnum)
-#else
-# define zstrerror(errnum) ""
-#endif
-
-#if defined(pyr)
-# define NO_MEMCPY
-#endif
-#if (defined(M_I86SM) || defined(M_I86MM)) && !defined(_MSC_VER)
- /* Use our own functions for small and medium model with MSC <= 5.0.
- * You may have to use the same strategy for Borland C (untested).
- */
-# define NO_MEMCPY
-#endif
-#if defined(STDC) && !defined(HAVE_MEMCPY) && !defined(NO_MEMCPY)
-# define HAVE_MEMCPY
-#endif
-#ifdef HAVE_MEMCPY
-# ifdef SMALL_MEDIUM /* MSDOS small or medium model */
-# define zmemcpy _fmemcpy
-# define zmemcmp _fmemcmp
-# define zmemzero(dest, len) _fmemset(dest, 0, len)
-# else
-# define zmemcpy memcpy
-# define zmemcmp memcmp
-# define zmemzero(dest, len) memset(dest, 0, len)
-# endif
-#else
- extern void zmemcpy OF((Bytef* dest, Bytef* source, uInt len));
- extern int zmemcmp OF((Bytef* s1, Bytef* s2, uInt len));
- extern void zmemzero OF((Bytef* dest, uInt len));
-#endif
-
-/* Diagnostic functions */
-#ifdef DEBUG_ZLIB
-# include <stdio.h>
-# ifndef verbose
-# define verbose 0
-# endif
- extern void z_error OF((char *m));
-# define Assert(cond,msg) {if(!(cond)) z_error(msg);}
-# define Trace(x) fprintf x
-# define Tracev(x) {if (verbose) fprintf x ;}
-# define Tracevv(x) {if (verbose>1) fprintf x ;}
-# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
-# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
-#else
-# define Assert(cond,msg)
-# define Trace(x)
-# define Tracev(x)
-# define Tracevv(x)
-# define Tracec(c,x)
-# define Tracecv(c,x)
-#endif
-
-
-typedef uLong (*check_func) OF((uLong check, const Bytef *buf, uInt len));
-
-voidpf zcalloc OF((voidpf opaque, unsigned items, unsigned size));
-void zcfree OF((voidpf opaque, voidpf ptr));
-
-#define ZALLOC(strm, items, size) \
- (*((strm)->zalloc))((strm)->opaque, (items), (size))
-#define ZFREE(strm, addr) (*((strm)->zfree))((strm)->opaque, (voidpf)(addr))
-#define TRY_FREE(s, p) {if (p) ZFREE(s, p);}
-
-#endif /* _Z_UTIL_H */
-/* --- zutil.h */
-
-/* +++ deflate.h */
-/* deflate.h -- internal compression state
- * Copyright (C) 1995-1996 Jean-loup Gailly
- * For conditions of distribution and use, see copyright notice in zlib.h
- */
-
-/* WARNING: this file should *not* be used by applications. It is
- part of the implementation of the compression library and is
- subject to change. Applications should only use zlib.h.
- */
-
-/* From: deflate.h,v 1.10 1996/07/02 12:41:00 me Exp $ */
-
-#ifndef _DEFLATE_H
-#define _DEFLATE_H
-
-/* #include "zutil.h" */
-
-/* ===========================================================================
- * Internal compression state.
- */
-
-#define LENGTH_CODES 29
-/* number of length codes, not counting the special END_BLOCK code */
-
-#define LITERALS 256
-/* number of literal bytes 0..255 */
-
-#define L_CODES (LITERALS+1+LENGTH_CODES)
-/* number of Literal or Length codes, including the END_BLOCK code */
-
-#define D_CODES 30
-/* number of distance codes */
-
-#define BL_CODES 19
-/* number of codes used to transfer the bit lengths */
-
-#define HEAP_SIZE (2*L_CODES+1)
-/* maximum heap size */
-
-#define MAX_BITS 15
-/* All codes must not exceed MAX_BITS bits */
-
-#define INIT_STATE 42
-#define BUSY_STATE 113
-#define FINISH_STATE 666
-/* Stream status */
-
-
-/* Data structure describing a single value and its code string. */
-typedef struct ct_data_s {
- union {
- ush freq; /* frequency count */
- ush code; /* bit string */
- } fc;
- union {
- ush dad; /* father node in Huffman tree */
- ush len; /* length of bit string */
- } dl;
-} FAR ct_data;
-
-#define Freq fc.freq
-#define Code fc.code
-#define Dad dl.dad
-#define Len dl.len
-
-typedef struct static_tree_desc_s static_tree_desc;
-
-typedef struct tree_desc_s {
- ct_data *dyn_tree; /* the dynamic tree */
- int max_code; /* largest code with non zero frequency */
- static_tree_desc *stat_desc; /* the corresponding static tree */
-} FAR tree_desc;
-
-typedef ush Pos;
-typedef Pos FAR Posf;
-typedef unsigned IPos;
-
-/* A Pos is an index in the character window. We use short instead of int to
- * save space in the various tables. IPos is used only for parameter passing.
- */
-
-typedef struct deflate_state {
- z_streamp strm; /* pointer back to this zlib stream */
- int status; /* as the name implies */
- Bytef *pending_buf; /* output still pending */
- ulg pending_buf_size; /* size of pending_buf */
- Bytef *pending_out; /* next pending byte to output to the stream */
- int pending; /* nb of bytes in the pending buffer */
- int noheader; /* suppress zlib header and adler32 */
- Byte data_type; /* UNKNOWN, BINARY or ASCII */
- Byte method; /* STORED (for zip only) or DEFLATED */
- int last_flush; /* value of flush param for previous deflate call */
-
- /* used by deflate.c: */
-
- uInt w_size; /* LZ77 window size (32K by default) */
- uInt w_bits; /* log2(w_size) (8..16) */
- uInt w_mask; /* w_size - 1 */
-
- Bytef *window;
- /* Sliding window. Input bytes are read into the second half of the window,
- * and move to the first half later to keep a dictionary of at least wSize
- * bytes. With this organization, matches are limited to a distance of
- * wSize-MAX_MATCH bytes, but this ensures that IO is always
- * performed with a length multiple of the block size. Also, it limits
- * the window size to 64K, which is quite useful on MSDOS.
- * To do: use the user input buffer as slidi