diff -u sys.noOD/alpha/conf/GENERIC sys/alpha/conf/GENERIC --- sys.noOD/alpha/conf/GENERIC Mon Oct 11 08:04:57 1999 +++ sys/alpha/conf/GENERIC Fri Dec 24 23:51:32 1999 @@ -71,6 +71,7 @@ controller scbus0 device da0 +device od0 device sa0 device pass0 device cd0 diff -u sys.noOD/cam/scsi/scsi_all.c sys/cam/scsi/scsi_all.c --- sys.noOD/cam/scsi/scsi_all.c Mon Aug 30 01:21:44 1999 +++ sys/cam/scsi/scsi_all.c Fri Dec 24 23:51:32 1999 @@ -35,6 +35,8 @@ #include #include +#include +#include #else #include #include @@ -58,6 +60,14 @@ #define ERESTART -1 /* restart syscall */ #define EJUSTRETURN -2 /* don't modify regs, just return */ #endif /* !KERNEL */ + +#ifdef KERNEL +/* + * XXX S.Akiyama + * CAM node was moved from scsi_cd.c. + */ +SYSCTL_NODE(_kern, OID_AUTO, cam, CTLFLAG_RD, 0, "CAM Subsystem"); +#endif const char *scsi_sense_key_text[] = { diff -u sys.noOD/cam/scsi/scsi_cd.c sys/cam/scsi/scsi_cd.c --- sys.noOD/cam/scsi/scsi_cd.c Sun Dec 12 08:03:03 1999 +++ sys/cam/scsi/scsi_cd.c Fri Dec 24 23:51:32 1999 @@ -281,10 +281,9 @@ static int changer_max_busy_seconds = CHANGER_MAX_BUSY_SECONDS; /* - * XXX KDM this CAM node should be moved if we ever get more CAM sysctl - * variables. + * XXX S.Akiyama + * CAM node was moved to scsi_all.c. */ -SYSCTL_NODE(_kern, OID_AUTO, cam, CTLFLAG_RD, 0, "CAM Subsystem"); SYSCTL_NODE(_kern_cam, OID_AUTO, cd, CTLFLAG_RD, 0, "CAM CDROM driver"); SYSCTL_NODE(_kern_cam_cd, OID_AUTO, changer, CTLFLAG_RD, 0, "CD Changer"); SYSCTL_INT(_kern_cam_cd_changer, OID_AUTO, min_busy_seconds, CTLFLAG_RW, diff -u sys.noOD/cam/scsi/scsi_da.c sys/cam/scsi/scsi_da.c --- sys.noOD/cam/scsi/scsi_da.c Mon Aug 30 01:21:46 1999 +++ sys/cam/scsi/scsi_da.c Fri Dec 24 23:51:32 1999 @@ -29,6 +29,7 @@ */ #include "opt_hw_wdog.h" +#include "od.h" #include #include @@ -930,7 +931,11 @@ cgd = (struct ccb_getdev *)arg; +#if NOD == 0 if ((cgd->pd_type != T_DIRECT) && (cgd->pd_type != T_OPTICAL)) +#else + if (cgd->pd_type != T_DIRECT) +#endif break; /* diff -u sys.noOD/cam/scsi/scsi_od.c sys/cam/scsi/scsi_od.c --- sys.noOD/cam/scsi/scsi_od.c Mon Dec 27 22:29:06 1999 +++ sys/cam/scsi/scsi_od.c Fri Dec 24 23:51:32 1999 @@ -0,0 +1,1824 @@ +/* + * Copyright (c) 1999 Shunsuke Akiyama . + * All rights reserved. + * + * NOTE: + * scsi_od.c based on scsi_da.c and modified for SCSI optical memory + * device characteristics. + * + * $Id: scsi_od.c,v 1.1.6.1 1999/10/11 16:13:43 akiyama Exp $ + */ + +/* + * Implementation of SCSI Direct Access Peripheral driver for CAM. + * + * Copyright (c) 1997 Justin T. Gibbs. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification, immediately at the beginning of the file. + * 2. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: scsi_da.c,v 1.19 1999/01/07 20:19:09 mjacob Exp $ + */ + +#include "opt_od.h" +#include "opt_hw_wdog.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +typedef enum { + OD_STATE_PROBE, + OD_STATE_NORMAL +} od_state; + +typedef enum { + OD_FLAG_PACK_INVALID = 0x001, + OD_FLAG_NEW_PACK = 0x002, + OD_FLAG_PACK_LOCKED = 0x004, + OD_FLAG_PACK_REMOVABLE = 0x008, + OD_FLAG_TAGGED_QUEUING = 0x010, + OD_FLAG_NEED_OTAG = 0x020, + OD_FLAG_WENT_IDLE = 0x040, + OD_FLAG_RETRY_UA = 0x080, + OD_FLAG_OPEN = 0x100 +} od_flags; + +typedef enum { + OD_Q_NONE = 0x00, + OD_Q_NO_SYNC_CACHE = 0x01, + OD_Q_NO_6_BYTE = 0x02 +} od_quirks; + +typedef enum { + OD_CCB_PROBE = 0x01, + OD_CCB_BUFFER_IO = 0x02, + OD_CCB_WAITING = 0x03, + OD_CCB_DUMP = 0x04, + OD_CCB_TYPE_MASK = 0x0F, + OD_CCB_RETRY_UA = 0x10 +} od_ccb_state; + +/* Offsets into our private area for storing information */ +#define ccb_state ppriv_field0 +#define ccb_bp ppriv_ptr1 + +struct disk_params { + u_int8_t heads; + u_int16_t cylinders; + u_int8_t secs_per_track; + u_int32_t secsize; /* Number of bytes/sector */ + u_int32_t sectors; /* total number sectors */ +}; + +struct od_softc { + struct buf_queue_head buf_queue; + struct devstat device_stats; + SLIST_ENTRY(od_softc) links; + LIST_HEAD(, ccb_hdr) pending_ccbs; + od_state state; + od_flags flags; + od_quirks quirks; + int minimum_cmd_size; + int ordered_tag_count; + struct disk_params params; + struct diskslices *dk_slices; /* virtual drives */ + union ccb saved_ccb; +}; + +struct od_quirk_entry { + struct scsi_inquiry_pattern inq_pat; + od_quirks quirks; +}; + +static struct od_quirk_entry od_quirk_table[] = +{ + { + { T_CDROM, SIP_MEDIA_REMOVABLE, "MATSHITA", "PD-2*", "*" }, + /* quirks */ OD_Q_NONE + }, + { + { T_CDROM, SIP_MEDIA_REMOVABLE, "TOSHIBA", "SD-W1101*", "*" }, + /* quirks */ OD_Q_NO_6_BYTE + }, + { + { T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "1300" }, + /* quirks */ OD_Q_NO_SYNC_CACHE + } +}; + +static d_open_t odopen; +static d_read_t odread; +static d_write_t odwrite; +static d_close_t odclose; +static d_strategy_t odstrategy; +static d_strategy_t odstrategy1; +static d_ioctl_t odioctl; +static d_dump_t oddump; +static d_psize_t odsize; +static periph_init_t odinit; + +static void odasync(void *callback_arg, u_int32_t code, + struct cam_path *path, void *arg); +static periph_ctor_t odregister; +static periph_dtor_t odcleanup; +static periph_start_t odstart; +static periph_oninv_t odoninvalidate; +static void oddone(struct cam_periph *periph, + union ccb *done_ccb); +static int oderror(union ccb *ccb, u_int32_t cam_flags, + u_int32_t sense_flags); +static void odprevent(struct cam_periph *periph, int action); +static void odsetgeom(struct cam_periph *periph, + struct scsi_read_capacity_data * rdcap); +#ifdef OD_USE_ORDERED_TAG +static timeout_t odsendorderedtag; +static void odshutdown(int howto, void *arg); +#endif /* OD_USE_ORDERED_TAG */ +static int odstartunit(struct cam_periph *periph); +static int odstopunit(struct cam_periph *periph, u_int32_t eject); +static int odcheckunit(struct cam_periph *periph); + +#ifndef OD_DEFAULT_TIMEOUT +#define OD_DEFAULT_TIMEOUT 60 /* Timeout in seconds */ +#endif + +/* + * OD_ORDEREDTAG_INTERVAL determines how often, relative + * to the default timeout, we check to see whether an ordered + * tagged transaction is appropriate to prevent simple tag + * starvation. Since we'd like to ensure that there is at least + * 1/2 of the timeout length left for a starved transaction to + * complete after we've sent an ordered tag, we must poll at least + * four times in every timeout period. This takes care of the worst + * case where a starved transaction starts during an interval that + * meets the requirement "don't send an ordered tag" test so it takes + * us two intervals to determine that a tag must be sent. + */ +#ifndef OD_ORDEREDTAG_INTERVAL +#define OD_ORDEREDTAG_INTERVAL 4 +#endif + +static struct periph_driver oddriver = +{ + odinit, "od", + TAILQ_HEAD_INITIALIZER(oddriver.units), /* generation */ 0 +}; + +DATA_SET(periphdriver_set, oddriver); + +#define OD_CDEV_MAJOR 70 +#define OD_BDEV_MAJOR 20 + +/* For 2.2-stable support */ +#ifndef D_DISK +#define D_DISK 0 +#endif + +static struct cdevsw od_cdevsw = +{ + /*d_open*/ odopen, + /*d_close*/ odclose, + /*d_read*/ odread, + /*d_write*/ odwrite, + /*d_ioctl*/ odioctl, + /*d_stop*/ nostop, + /*d_reset*/ noreset, + /*d_devtotty*/ nodevtotty, + /*d_poll*/ seltrue, + /*d_mmap*/ nommap, + /*d_strategy*/ odstrategy, + /*d_name*/ "od", + /*d_spare*/ NULL, + /*d_maj*/ -1, + /*d_dump*/ oddump, + /*d_psize*/ odsize, + /*d_flags*/ D_DISK, + /*d_maxio*/ 0, + /*b_maj*/ -1 +}; + +static SLIST_HEAD(,od_softc) softc_list; +static struct extend_array *odperiphs; + +static int od_auto_turnoff = 0; +static int od_wait_ready_count = 0; + +SYSCTL_NODE(_kern_cam, OID_AUTO, od, CTLFLAG_RD, 0, "CAM optical disk driver"); +SYSCTL_INT(_kern_cam_od, OID_AUTO, auto_turnoff, CTLFLAG_RW, + &od_auto_turnoff, 0, "Automatic spindown"); +SYSCTL_INT(_kern_cam_od, OID_AUTO, wait_ready_count, CTLFLAG_RW, + &od_wait_ready_count, 0, "Ready for wait count"); + +static int +odopen(dev_t dev, int flags, int fmt, struct proc *p) +{ + struct cam_periph *periph; + struct od_softc *softc; + struct disklabel label; + int unit; + int part; + int error; + int s; + union ccb *ccb; + int is_open; + + unit = dkunit(dev); + part = dkpart(dev); + periph = cam_extend_get(odperiphs, unit); + if (periph == NULL) + return ENXIO; + + softc = (struct od_softc *)periph->softc; + + CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, + ("odopen: dev=0x%lx (unit %d , partition %d)\n", (long) dev, + unit, part)); + + if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) { + return error; /* error code from tsleep */ + } + + is_open = softc->flags & OD_FLAG_OPEN; + if (is_open == 0) { + if (cam_periph_acquire(periph) != CAM_REQ_CMP) + return ENXIO; + + softc->flags |= OD_FLAG_OPEN; + odstartunit(periph); + error = odcheckunit(periph); + if (error) { + softc->flags &= ~OD_FLAG_OPEN; + cam_periph_unlock(periph); + cam_periph_release(periph); + return error; + } + odprevent(periph, PR_PREVENT); + } + + s = splsoftcam(); + if ((softc->flags & OD_FLAG_PACK_INVALID) != 0) { + /* + * If any partition is open, although the disk has + * been invalidated, disallow further opens. + */ + if (dsisopen(softc->dk_slices)) { + splx(s); + odprevent(periph, PR_PREVENT); + cam_periph_unlock(periph); + return ENXIO; + } + + /* Invalidate our pack information. */ + dsgone(&softc->dk_slices); + softc->flags &= ~OD_FLAG_PACK_INVALID; + } + splx(s); + + ccb = cam_periph_getccb(periph, /*priority*/1); + + /* Get write protect status */ + { +# define MODEBUFSZ 16 + struct scsi_mode_header_6 *mode_hdr; + void *mode_buffer; + int protected; + + mode_buffer = malloc(MODEBUFSZ, M_TEMP, M_WAITOK); + bzero(mode_buffer, MODEBUFSZ); + + scsi_mode_sense(&ccb->csio, + /*retries*/1, + /*cbfncp*/oddone, + MSG_SIMPLE_Q_TAG, + /*dbd*/1, + SMS_PAGE_CTRL_CURRENT, + /*page*/0x08, + mode_buffer, + MODEBUFSZ, + SSD_FULL_SIZE, + /*timeout*/5000); + error = cam_periph_runccb(ccb, oderror, /*cam_flags*/0, + /*sense_flags*/SF_RETRY_UA + | SF_NO_PRINT + | SF_RETRY_SELTO, + &softc->device_stats); + + if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) + cam_release_devq(ccb->ccb_h.path, 0, 0, 0, FALSE); + + if (error) + printf("odopen: get cache control page failed.\n"); + else { + mode_hdr = (struct scsi_mode_header_6 *)mode_buffer; + protected = mode_hdr->dev_spec & 0x80; + if (protected != 0 && (flags & FWRITE) != 0) + error = EACCES; + } + + free(mode_buffer, M_TEMP); + } + + /* Do a read capacity */ + if (error == 0) { + struct scsi_read_capacity_data *rcap; + + rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap), + M_TEMP, + M_WAITOK); + + scsi_read_capacity(&ccb->csio, + /*retries*/1, + /*cbfncp*/oddone, + MSG_SIMPLE_Q_TAG, + rcap, + SSD_FULL_SIZE, + /*timeout*/60000); + ccb->ccb_h.ccb_bp = NULL; + + error = cam_periph_runccb(ccb, oderror, /*cam_flags*/0, + /*sense_flags*/SF_RETRY_UA + | SF_NO_PRINT + | SF_RETRY_SELTO, + &softc->device_stats); + + if (error == 0) { + odsetgeom(periph, rcap); + } + + free(rcap, M_TEMP); + } + + xpt_release_ccb(ccb); + + if (error == 0) { + struct ccb_getdev cgd; + + /* Build label for whole disk. */ + bzero(&label, sizeof(label)); + label.d_type = DTYPE_SCSI; + + /* + * Grab the inquiry data to get the vendor and product names. + * Put them in the typename and packname for the label. + */ + xpt_setup_ccb(&cgd.ccb_h, periph->path, /*priority*/ 1); + cgd.ccb_h.func_code = XPT_GDEV_TYPE; + xpt_action((union ccb *)&cgd); + + strncpy(label.d_typename, cgd.inq_data.vendor, + min(SID_VENDOR_SIZE, sizeof(label.d_typename))); + strncpy(label.d_packname, cgd.inq_data.product, + min(SID_PRODUCT_SIZE, sizeof(label.d_packname))); + + label.d_secsize = softc->params.secsize; + label.d_nsectors = softc->params.secs_per_track; + label.d_ntracks = softc->params.heads; + label.d_ncylinders = softc->params.cylinders; + label.d_secpercyl = softc->params.heads + * softc->params.secs_per_track; + label.d_secperunit = softc->params.sectors; + label.d_flags = D_REMOVABLE; + + if ((dsisopen(softc->dk_slices) == 0) + && ((softc->flags & OD_FLAG_PACK_REMOVABLE) != 0)) { + odprevent(periph, PR_PREVENT); + } + + /* Initialize slice tables. */ + error = dsopen("od", dev, fmt, 0, &softc->dk_slices, &label, + odstrategy1, (ds_setgeom_t *)NULL, + &od_cdevsw); + + /* + * Check to see whether or not the blocksize is set yet. + * If it isn't, set it and then clear the blocksize + * unavailable flag for the device statistics. + */ + if ((softc->device_stats.flags + & DEVSTAT_BS_UNAVAILABLE) != 0) { + softc->device_stats.block_size = softc->params.secsize; + softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE; + } + } + + if (error) { + if ((dsisopen(softc->dk_slices) == 0) + && ((softc->flags & OD_FLAG_PACK_REMOVABLE) != 0)) { + odprevent(periph, PR_ALLOW); + if (od_auto_turnoff) + odstopunit(periph, 0); + } + if (is_open == 0) { + softc->flags &= ~OD_FLAG_OPEN; + cam_periph_release(periph); + } + } + + cam_periph_unlock(periph); + + return error; +} + +static int +odclose(dev_t dev, int flag, int fmt, struct proc *p) +{ + struct cam_periph *periph; + struct od_softc *softc; + int unit; + int error; + + unit = dkunit(dev); + periph = cam_extend_get(odperiphs, unit); + if (periph == NULL) + return ENXIO; + + softc = (struct od_softc *)periph->softc; + + if ((error = cam_periph_lock(periph, PRIBIO)) != 0) { + return error; /* error code from tsleep */ + } + + dsclose(dev, fmt, softc->dk_slices); + if (dsisopen(softc->dk_slices)) { + cam_periph_unlock(periph); + return 0; + } + + if ((softc->flags & OD_FLAG_PACK_INVALID) == 0 + && (softc->quirks & OD_Q_NO_SYNC_CACHE) == 0) { + union ccb *ccb; + + ccb = cam_periph_getccb(periph, /*priority*/1); + + scsi_synchronize_cache(&ccb->csio, + /*retries*/1, + /*cbfcnp*/oddone, + MSG_SIMPLE_Q_TAG, + /*begin_lba*/0,/* Cover the whole disk */ + /*lb_count*/0, + SSD_FULL_SIZE, + 5 * 60 * 1000); + + cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0, + /*sense_flags*/SF_RETRY_UA, + &softc->device_stats); + + if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { + if ((ccb->ccb_h.status & CAM_STATUS_MASK) == + CAM_SCSI_STATUS_ERROR) { + int asc, ascq; + int sense_key, error_code; + + scsi_extract_sense(&ccb->csio.sense_data, + &error_code, + &sense_key, + &asc, &ascq); + if (sense_key != SSD_KEY_ILLEGAL_REQUEST) + scsi_sense_print(&ccb->csio); + } else { + xpt_print_path(periph->path); + printf("Synchronize cache failed, status " + "== 0x%x, scsi status == 0x%x\n", + ccb->csio.ccb_h.status, + ccb->csio.scsi_status); + } + } + + if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) + cam_release_devq(ccb->ccb_h.path, + /*relsim_flags*/0, + /*reduction*/0, + /*timeout*/0, + /*getcount_only*/0); + + xpt_release_ccb(ccb); + } + + if ((softc->flags & OD_FLAG_PACK_REMOVABLE) != 0) { + odprevent(periph, PR_ALLOW); + if (od_auto_turnoff) + odstopunit(periph, 0); + /* + * If we've got removeable media, mark the blocksize as + * unavailable, since it could change when new media is + * inserted. + */ + softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE; + } + + softc->flags &= ~OD_FLAG_OPEN; + cam_periph_unlock(periph); + cam_periph_release(periph); + + return 0; +} + +static int +odread(dev_t dev, struct uio *uio, int ioflag) +{ + return physio(odstrategy, NULL, dev, 1, minphys, uio); +} + +static int +odwrite(dev_t dev, struct uio *uio, int ioflag) +{ + return physio(odstrategy, NULL, dev, 0, minphys, uio); +} + +/* + * Actually translate the requested transfer into one the physical driver + * can understand. The transfer is described by a buf and will include + * only one physical transfer. + */ +static void +odstrategy(struct buf *bp) +{ + struct cam_periph *periph; + struct od_softc *softc; + u_int unit; + u_int part; + int s; + + unit = dkunit(bp->b_dev); + part = dkpart(bp->b_dev); + periph = cam_extend_get(odperiphs, unit); + if (periph == NULL) { + bp->b_error = ENXIO; + goto bad; + } + softc = (struct od_softc *)periph->softc; + + /* + * Do bounds checking, adjust transfer, set b_cylin and b_pbklno. + */ + if (dscheck(bp, softc->dk_slices) <= 0) + goto done; + + /* + * Mask interrupts so that the pack cannot be invalidated until + * after we are in the queue. Otherwise, we might not properly + * clean up one of the buffers. + */ + s = splbio(); + + /* + * If the device has been made invalid, error out + */ + if ((softc->flags & OD_FLAG_PACK_INVALID)) { + splx(s); + bp->b_error = ENXIO; + goto bad; + } + + /* + * Place it in the queue of disk activities for this disk + */ + bufqdisksort(&softc->buf_queue, bp); + + splx(s); + + /* + * Schedule ourselves for performing the work. + */ + xpt_schedule(periph, /* XXX priority */1); + + return; +bad: + bp->b_flags |= B_ERROR; +done: + /* + * Correctly set the buf to indicate a completed xfer + */ + bp->b_resid = bp->b_bcount; + biodone(bp); + return; +} + +static void +odstrategy1(struct buf *bp) +{ + /* + * XXX - do something to make odstrategy() but not this block while + * we're doing dsopen() and dsioctl(). + */ + odstrategy(bp); +} + +/* For 2.2-stable support */ +#ifndef ENOIOCTL +#define ENOIOCTL -1 +#endif + +static int +odioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p) +{ + struct cam_periph *periph; + struct od_softc *softc; + int unit; + int error; + + unit = dkunit(dev); + periph = cam_extend_get(odperiphs, unit); + if (periph == NULL) + return ENXIO; + + softc = (struct od_softc *)periph->softc; + + CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("odioctl\n")); + + if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) + return error; /* error code from tsleep */ + + switch (cmd) { + case DIOCSBAD: + error = EINVAL; + break; + case CDIOCEJECT: + error = odstopunit(periph, 1); + softc->flags |= OD_FLAG_PACK_INVALID; + break; + case CDIOCALLOW: + odprevent(periph, PR_ALLOW); + break; + case CDIOCPREVENT: + odprevent(periph, PR_PREVENT); + break; + default: + error = dsioctl("od", dev, cmd, addr, flag, &softc->dk_slices, + odstrategy1, (ds_setgeom_t *)NULL); + if (error != ENOIOCTL) + break; + + error = cam_periph_ioctl(periph, cmd, addr, oderror); + break; + } + + cam_periph_unlock(periph); + + CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("leaving odioctl\n")); + + return error; +} + +static int +oddump(dev_t dev) +{ + struct cam_periph *periph; + struct od_softc *softc; + struct disklabel *lp; + u_int unit; + u_int part; + long num; /* number of sectors to write */ + long blkoff; + long blknum; + long blkcnt; + vm_offset_t addr; + static int oddoingadump = 0; + struct ccb_scsiio csio; + + /* toss any characters present prior to dump */ + while (cncheckc() != -1) + ; + + unit = dkunit(dev); + part = dkpart(dev); + periph = cam_extend_get(odperiphs, unit); + if (periph == NULL) { + return ENXIO; + } + softc = (struct od_softc *)periph->softc; + + if ((softc->flags & OD_FLAG_PACK_INVALID) != 0 + || (softc->dk_slices == NULL) + || (lp = dsgetlabel(dev, softc->dk_slices)) == NULL) + return ENXIO; + + /* Size of memory to dump, in disk sectors. */ + /* XXX Fix up for non DEV_BSIZE sectors!!! */ + num = (u_long)Maxmem * PAGE_SIZE / softc->params.secsize; + + blkoff = lp->d_partitions[part].p_offset; + blkoff += softc->dk_slices->dss_slices[dkslice(dev)].ds_offset; + + /* check transfer bounds against partition size */ + if ((dumplo < 0) || ((dumplo + num) > lp->d_partitions[part].p_size)) + return EINVAL; + + if (oddoingadump != 0) + return EFAULT; + + oddoingadump = 1; + + blknum = dumplo + blkoff; + blkcnt = PAGE_SIZE / softc->params.secsize; + + addr = 0; /* starting address */ + + while (num > 0) { + + if (is_physical_memory(addr)) { + pmap_enter(kernel_pmap, (vm_offset_t)CADDR1, + trunc_page(addr), VM_PROT_READ, TRUE); + } else { + pmap_enter(kernel_pmap, (vm_offset_t)CADDR1, + trunc_page(0), VM_PROT_READ, TRUE); + } + + xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1); + csio.ccb_h.ccb_state = OD_CCB_DUMP; + scsi_read_write(&csio, + /*retries*/1, + oddone, + MSG_ORDERED_Q_TAG, + /*read*/FALSE, + /*byte2*/0, + /*minimum_cmd_size*/ softc->minimum_cmd_size, + blknum, + blkcnt, + /*data_ptr*/CADDR1, + /*dxfer_len*/blkcnt * softc->params.secsize, + /*sense_len*/SSD_FULL_SIZE, + OD_DEFAULT_TIMEOUT * 1000); + xpt_polled_action((union ccb *)&csio); + + if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { + printf("Aborting dump due to I/O error.\n"); + if ((csio.ccb_h.status & CAM_STATUS_MASK) == + CAM_SCSI_STATUS_ERROR) + scsi_sense_print(&csio); + else + printf("status == 0x%x, scsi status == 0x%x\n", + csio.ccb_h.status, csio.scsi_status); + return EIO; + } + + if (addr % (1024 * 1024) == 0) { +#ifdef HW_WDOG + if (wdog_tickler) + (*wdog_tickler)(); +#endif /* HW_WDOG */ + /* Count in MB of data left to write */ + printf("%ld ", (num * softc->params.secsize) + / (1024 * 1024)); + } + + /* update block count */ + num -= blkcnt; + blknum += blkcnt; + addr += blkcnt * softc->params.secsize; + + /* operator aborting dump? */ + if (cncheckc() != -1) + return EINTR; + } + + /* + * Sync the disk cache contents to the physical media. + */ + if ((softc->quirks & OD_Q_NO_SYNC_CACHE) == 0) { + + xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1); + csio.ccb_h.ccb_state = OD_CCB_DUMP; + scsi_synchronize_cache(&csio, + /*retries*/1, + /*cbfcnp*/oddone, + MSG_SIMPLE_Q_TAG, + /*begin_lba*/0,/* Cover the whole disk */ + /*lb_count*/0, + SSD_FULL_SIZE, + 5 * 60 * 1000); + xpt_polled_action((union ccb *)&csio); + + if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { + if ((csio.ccb_h.status & CAM_STATUS_MASK) == + CAM_SCSI_STATUS_ERROR) { + int asc, ascq; + int sense_key, error_code; + + scsi_extract_sense(&csio.sense_data, + &error_code, + &sense_key, + &asc, &ascq); + if (sense_key != SSD_KEY_ILLEGAL_REQUEST) + scsi_sense_print(&csio); + } else { + xpt_print_path(periph->path); + printf("Synchronize cache failed, status " + "== 0x%x, scsi status == 0x%x\n", + csio.ccb_h.status, csio.scsi_status); + } + } + } + return 0; +} + +static int +odsize(dev_t dev) +{ + struct cam_periph *periph; + struct od_softc *softc; + + periph = cam_extend_get(odperiphs, dkunit(dev)); + if (periph == NULL) + return ENXIO; + + softc = (struct od_softc *)periph->softc; + + return dssize(dev, &softc->dk_slices, odopen, odclose); +} + +static void +odinit(void) +{ + cam_status status; + struct cam_path *path; + + /* + * Create our extend array for storing the devices we attach to. + */ + odperiphs = cam_extend_new(); + SLIST_INIT(&softc_list); + if (odperiphs == NULL) { + printf("od: Failed to alloc extend array!\n"); + return; + } + + /* + * Install a global async callback. This callback will + * receive async callbacks like "new device found". + */ + status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, + CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); + + if (status == CAM_REQ_CMP) { + struct ccb_setasync csa; + + xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); + csa.ccb_h.func_code = XPT_SASYNC_CB; + csa.event_enable = AC_FOUND_DEVICE; + csa.callback = odasync; + csa.callback_arg = NULL; + xpt_action((union ccb *)&csa); + status = csa.ccb_h.status; + xpt_free_path(path); + } + + if (status != CAM_REQ_CMP) { + printf("od: Failed to attach master async callback " + "due to status 0x%x!\n", status); + } else { +#ifdef OD_USE_ORDERED_TAG + int err; +#endif /* OD_USE_ORDERED_TAG */ + + /* If we were successfull, register our devsw */ + cdevsw_add_generic(OD_BDEV_MAJOR, OD_CDEV_MAJOR, &od_cdevsw); + +#ifdef OD_USE_ORDERED_TAG + /* + * Schedule a periodic event to occasioanly send an + * ordered tag to a device. + */ + timeout(odsendorderedtag, NULL, + (OD_DEFAULT_TIMEOUT * hz) / OD_ORDEREDTAG_INTERVAL); + + if ((err = at_shutdown(odshutdown, NULL, + SHUTDOWN_POST_SYNC)) != 0) + printf("odinit: at_shutdown returned %d!\n", err); +#endif /* OD_USE_ORDERED_TAG */ + } +} + +static void +odoninvalidate(struct cam_periph *periph) +{ + int s; + struct od_softc *softc; + struct buf *q_bp; + struct ccb_setasync csa; + + softc = (struct od_softc *)periph->softc; + + /* + * De-register any async callbacks. + */ + xpt_setup_ccb(&csa.ccb_h, periph->path, + /* priority */ 5); + csa.ccb_h.func_code = XPT_SASYNC_CB; + csa.event_enable = 0; + csa.callback = odasync; + csa.callback_arg = periph; + xpt_action((union ccb *)&csa); + + softc->flags |= OD_FLAG_PACK_INVALID; + + /* + * Although the oninvalidate() routines are always called at + * splsoftcam, we need to be at splbio() here to keep the buffer + * queue from being modified while we traverse it. + */ + s = splbio(); + + /* + * Return all queued I/O with ENXIO. + * XXX Handle any transactions queued to the card + * with XPT_ABORT_CCB. + */ + while ((q_bp = bufq_first(&softc->buf_queue)) != NULL){ + bufq_remove(&softc->buf_queue, q_bp); + q_bp->b_resid = q_bp->b_bcount; + q_bp->b_error = ENXIO; + q_bp->b_flags |= B_ERROR; + biodone(q_bp); + } + splx(s); + + SLIST_REMOVE(&softc_list, softc, od_softc, links); + + xpt_print_path(periph->path); + printf("lost device\n"); +} + +static void +odcleanup(struct cam_periph *periph) +{ + struct od_softc *softc; + + softc = (struct od_softc *)periph->softc; + + devstat_remove_entry(&softc->device_stats); + cam_extend_release(odperiphs, periph->unit_number); + xpt_print_path(periph->path); + printf("removing device entry\n"); + free(softc, M_DEVBUF); +} + +static void +odasync(void *callback_arg, u_int32_t code, + struct cam_path *path, void *arg) +{ + struct cam_periph *periph; + + periph = (struct cam_periph *)callback_arg; + switch (code) { + case AC_FOUND_DEVICE: + { + struct ccb_getdev *cgd; + cam_status status; + + cgd = (struct ccb_getdev *)arg; + + if (cgd->pd_type == T_OPTICAL) { + /* do nothing */ + } else if (cgd->pd_type == T_CDROM) { + caddr_t match; + match = + cam_quirkmatch((caddr_t)&cgd->inq_data, + (caddr_t)od_quirk_table, + sizeof (od_quirk_table) + / sizeof (*od_quirk_table), + sizeof (*od_quirk_table), + scsi_inquiry_match); + if (match == NULL) + break; + } else { + break; + } + + /* + * Allocate a peripheral instance for this device + * and start the probe process. + */ + status = cam_periph_alloc(odregister, odoninvalidate, + odcleanup, odstart, + "od", CAM_PERIPH_BIO, + cgd->ccb_h.path, odasync, + AC_FOUND_DEVICE, cgd); + + if (status != CAM_REQ_CMP + && status != CAM_REQ_INPROG) + printf("odasync: Unable to attach to new device " + "due to status 0x%x\n", status); + break; + } + case AC_SENT_BDR: + case AC_BUS_RESET: + { + struct od_softc *softc; + struct ccb_hdr *ccbh; + int s; + + softc = (struct od_softc *)periph->softc; + s = splsoftcam(); + /* + * Don't fail on the expected unit attention + * that will occur. + */ + softc->flags |= OD_FLAG_RETRY_UA; + for (ccbh = LIST_FIRST(&softc->pending_ccbs); + ccbh != NULL; ccbh = LIST_NEXT(ccbh, periph_links.le)) + ccbh->ccb_state |= OD_CCB_RETRY_UA; + splx(s); + /* FALLTHROUGH */ + } + default: + cam_periph_async(periph, code, path, arg); + break; + } +} + +static cam_status +odregister(struct cam_periph *periph, void *arg) +{ + int s; + struct od_softc *softc; + struct ccb_setasync csa; + struct ccb_getdev *cgd; + caddr_t match; + + cgd = (struct ccb_getdev *)arg; + if (periph == NULL) { + printf("odregister: periph was NULL!!\n"); + return CAM_REQ_CMP_ERR; + } + + if (cgd == NULL) { + printf("odregister: no getdev CCB, can't register device\n"); + return CAM_REQ_CMP_ERR; + } + + softc = (struct od_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT); + + if (softc == NULL) { + printf("odregister: Unable to probe new device. " + "Unable to allocate softc\n"); + return CAM_REQ_CMP_ERR; + } + + bzero(softc, sizeof(*softc)); + LIST_INIT(&softc->pending_ccbs); + softc->state = OD_STATE_PROBE; + bufq_init(&softc->buf_queue); + if (SID_IS_REMOVABLE(&cgd->inq_data)) + softc->flags |= OD_FLAG_PACK_REMOVABLE; + if ((cgd->inq_data.flags & SID_CmdQue) != 0) + softc->flags |= OD_FLAG_TAGGED_QUEUING; + + periph->softc = softc; + + cam_extend_set(odperiphs, periph->unit_number, periph); + + /* + * See if this device has any quirks. + */ + match = cam_quirkmatch((caddr_t)&cgd->inq_data, + (caddr_t)od_quirk_table, + sizeof(od_quirk_table) / sizeof(*od_quirk_table), + sizeof(*od_quirk_table), scsi_inquiry_match); + + if (match != NULL) + softc->quirks = ((struct od_quirk_entry *)match)->quirks; + else + softc->quirks = OD_Q_NONE; + + if (softc->quirks & OD_Q_NO_6_BYTE) + softc->minimum_cmd_size = 10; + else + softc->minimum_cmd_size = 6; + + /* + * Block our timeout handler while we + * add this softc to the dev list. + */ + s = splsoftclock(); + SLIST_INSERT_HEAD(&softc_list, softc, links); + splx(s); + + /* + * The OD driver supports a blocksize, but + * we don't know the blocksize until we do + * a read capacity. So, set a flag to + * indicate that the blocksize is + * unavailable right now. We'll clear the + * flag as soon as we've done a read capacity. + */ + devstat_add_entry(&softc->device_stats, "od", + periph->unit_number, 0, + DEVSTAT_BS_UNAVAILABLE, + DEVSTAT_TYPE_OPTICAL | DEVSTAT_TYPE_IF_SCSI, + DEVSTAT_PRIORITY_DA); + + /* + * Add async callbacks for bus reset and + * bus device reset calls. I don't bother + * checking if this fails as, in most cases, + * the system will function just fine without + * them and the only alternative would be to + * not attach the device on failure. + */ + xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5); + csa.ccb_h.func_code = XPT_SASYNC_CB; + csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE; + csa.callback = odasync; + csa.callback_arg = periph; + xpt_action((union ccb *)&csa); + /* + * Lock this peripheral until we are setup. + * This first call can't block + */ + (void)cam_periph_lock(periph, PRIBIO); + xpt_schedule(periph, /*priority*/5); + + return CAM_REQ_CMP; +} + +static void +odstart(struct cam_periph *periph, union ccb *start_ccb) +{ + struct od_softc *softc; + + softc = (struct od_softc *)periph->softc; + + switch (softc->state) { + case OD_STATE_NORMAL: + { + /* Pull a buffer from the queue and get going on it */ + struct buf *bp; + int s; + + /* + * See if there is a buf with work for us to do.. + */ + s = splbio(); + bp = bufq_first(&softc->buf_queue); + if (periph->immediate_priority <= periph->pinfo.priority) { + CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, + ("queuing for immediate ccb\n")); + start_ccb->ccb_h.ccb_state = OD_CCB_WAITING; + SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, + periph_links.sle); + periph->immediate_priority = CAM_PRIORITY_NONE; + splx(s); + wakeup(&periph->ccb_list); + } else if (bp == NULL) { + splx(s); + xpt_release_ccb(start_ccb); + } else { + int oldspl; + u_int8_t tag_code; + + bufq_remove(&softc->buf_queue, bp); + + devstat_start_transaction(&softc->device_stats); + + if ((bp->b_flags & B_ORDERED) != 0 + || (softc->flags & OD_FLAG_NEED_OTAG) != 0) { + softc->flags &= ~OD_FLAG_NEED_OTAG; + softc->ordered_tag_count++; + tag_code = MSG_ORDERED_Q_TAG; + } else { + tag_code = MSG_SIMPLE_Q_TAG; + } + scsi_read_write(&start_ccb->csio, + /*retries*/4, + oddone, + tag_code, + bp->b_flags & B_READ, + /*byte2*/0, + softc->minimum_cmd_size, + bp->b_pblkno, + bp->b_bcount / softc->params.secsize, + bp->b_data, + bp->b_bcount, + /*sense_len*/SSD_FULL_SIZE, + OD_DEFAULT_TIMEOUT * 1000); + start_ccb->ccb_h.ccb_state = OD_CCB_BUFFER_IO; + + /* + * Block out any asyncronous callbacks + * while we touch the pending ccb list. + */ + oldspl = splcam(); + LIST_INSERT_HEAD(&softc->pending_ccbs, + &start_ccb->ccb_h, periph_links.le); + splx(oldspl); + + /* We expect a unit attention from this device */ + if ((softc->flags & OD_FLAG_RETRY_UA) != 0) { + start_ccb->ccb_h.ccb_state |= OD_CCB_RETRY_UA; + softc->flags &= ~OD_FLAG_RETRY_UA; + } + + start_ccb->ccb_h.ccb_bp = bp; + bp = bufq_first(&softc->buf_queue); + splx(s); + + xpt_action(start_ccb); + } + + if (bp != NULL) { + /* Have more work to do, so ensure we stay scheduled */ + xpt_schedule(periph, /* XXX priority */1); + } + break; + } + case OD_STATE_PROBE: + { + struct ccb_scsiio *csio; + struct scsi_read_capacity_data *rcap; + + rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap), + M_TEMP, + M_NOWAIT); + if (rcap == NULL) { + printf("odstart: Couldn't malloc read_capacity data\n"); + /* od_free_periph??? */ + break; + } + csio = &start_ccb->csio; + scsi_read_capacity(csio, + /*retries*/4, + oddone, + MSG_SIMPLE_Q_TAG, + rcap, + SSD_FULL_SIZE, + /*timeout*/20000); + start_ccb->ccb_h.ccb_bp = NULL; + start_ccb->ccb_h.ccb_state = OD_CCB_PROBE; + xpt_action(start_ccb); + break; + } + } +} + +static void +oddone(struct cam_periph *periph, union ccb *done_ccb) +{ + struct od_softc *softc; + struct ccb_scsiio *csio; + + softc = (struct od_softc *)periph->softc; + csio = &done_ccb->csio; + + switch (csio->ccb_h.ccb_state & OD_CCB_TYPE_MASK) { + case OD_CCB_BUFFER_IO: + { + struct buf *bp; + int oldspl; + + bp = (struct buf *)done_ccb->ccb_h.ccb_bp; + if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) + != CAM_REQ_CMP) { + int error; + int s; + int sf; + + if ((csio->ccb_h.ccb_state & OD_CCB_RETRY_UA) != 0) + sf = SF_RETRY_UA; + else + sf = 0; + + /* Retry selection timeouts */ + sf |= SF_RETRY_SELTO; + + if ((error = oderror(done_ccb, 0, sf)) == ERESTART) { + /* + * A retry was scheuled, so just return. + */ + return; + } + if (error != 0) { + struct buf *q_bp; + + s = splbio(); + + if (error == ENXIO) { + /* + * Catastrophic error. + * Mark our pack as invalid. + */ + /* XXX See if this is really a media + * change first. + */ + xpt_print_path(periph->path); + printf("Invalidating pack\n"); + softc->flags |= OD_FLAG_PACK_INVALID; + } + + /* + * return all queued I/O with EIO, so that + * the client can retry these I/Os in the + * proper order should it attempt to recover. + */ + while ((q_bp = bufq_first(&softc->buf_queue)) + != NULL) { + bufq_remove(&softc->buf_queue, q_bp); + q_bp->b_resid = q_bp->b_bcount; + q_bp->b_error = EIO; + q_bp->b_flags |= B_ERROR; + biodone(q_bp); + } + splx(s); + bp->b_error = error; + bp->b_resid = bp->b_bcount; + bp->b_flags |= B_ERROR; + } else { + bp->b_resid = csio->resid; + bp->b_error = 0; + if (bp->b_resid != 0) { + /* Short transfer ??? */ + bp->b_flags |= B_ERROR; + } + } + if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) + cam_release_devq(done_ccb->ccb_h.path, + /*relsim_flags*/0, + /*reduction*/0, + /*timeout*/0, + /*getcount_only*/0); + } else { + bp->b_resid = csio->resid; + if (csio->resid > 0) + bp->b_flags |= B_ERROR; + } + + /* + * Block out any asyncronous callbacks + * while we touch the pending ccb list. + */ + oldspl = splcam(); + LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); + splx(oldspl); + + devstat_end_transaction(&softc->device_stats, + bp->b_bcount - bp->b_resid, + done_ccb->csio.tag_action & 0xf, + (bp->b_flags & B_READ) ? DEVSTAT_READ + : DEVSTAT_WRITE); + + if (softc->device_stats.busy_count == 0) + softc->flags |= OD_FLAG_WENT_IDLE; + + biodone(bp); + break; + } + case OD_CCB_PROBE: + { + struct scsi_read_capacity_data *rdcap; + char announce_buf[120]; + + rdcap = (struct scsi_read_capacity_data *)csio->data_ptr; + + if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { + struct disk_params *dp; + + odsetgeom(periph, rdcap); + dp = &softc->params; + snprintf(announce_buf, sizeof(announce_buf), + "%luMB (%u %u byte sectors: %dH %dS/T %dC)", + (unsigned long) (((u_int64_t)dp->secsize * + dp->sectors) / (1024*1024)), dp->sectors, + dp->secsize, dp->heads, dp->secs_per_track, + dp->cylinders); + } else { + int error; + + announce_buf[0] = '\0'; + + /* + * Retry any UNIT ATTENTION type errors. They + * are expected at boot. + */ + error = oderror(done_ccb, 0, SF_RETRY_UA | + SF_RETRY_SELTO | SF_NO_PRINT); + if (error == ERESTART) { + /* + * A retry was scheuled, so + * just return. + */ + return; + } else if (error != 0) { + struct scsi_sense_data *sense; + int asc, ascq; + int sense_key, error_code; + int have_sense; + cam_status status; + struct ccb_getdev cgd; + + /* Don't wedge this device's queue */ + cam_release_devq(done_ccb->ccb_h.path, + /*relsim_flags*/0, + /*reduction*/0, + /*timeout*/0, + /*getcount_only*/0); + + status = done_ccb->ccb_h.status; + + xpt_setup_ccb(&cgd.ccb_h, + done_ccb->ccb_h.path, + /* priority */ 1); + cgd.ccb_h.func_code = XPT_GDEV_TYPE; + xpt_action((union ccb *)&cgd); + + if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0) + || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0) + || ((status & CAM_AUTOSNS_VALID) == 0)) + have_sense = FALSE; + else + have_sense = TRUE; + + if (have_sense) { + sense = &csio->sense_data; + scsi_extract_sense(sense, &error_code, + &sense_key, + &asc, &ascq); + } + /* + * Attach to anything that claims to be a + * direct access or optical disk device, + * as long as it doesn't return a "Logical + * unit not supported" (0x25) error. + */ + if ((have_sense) && (asc != 0x25) + && (error_code == SSD_CURRENT_ERROR)) + snprintf(announce_buf, + sizeof(announce_buf), + "Attempt to query device " + "size failed: %s, %s", + scsi_sense_key_text[sense_key], + scsi_sense_desc(asc,ascq, + &cgd.inq_data)); + else { + if (have_sense) + scsi_sense_print( + &done_ccb->csio); + else { + xpt_print_path(periph->path); + printf("got CAM status %#x\n", + done_ccb->ccb_h.status); + } + + xpt_print_path(periph->path); + printf("fatal error, failed" + " to attach to device\n"); + + /* + * Free up resources. + */ + cam_periph_invalidate(periph); + } + } + } + free(rdcap, M_TEMP); + if (announce_buf[0] != '\0') + xpt_announce_periph(periph, announce_buf); + softc->state = OD_STATE_NORMAL; + /* + * Since our peripheral may be invalidated by an error + * above or an external event, we must release our CCB + * before releasing the probe lock on the peripheral. + * The peripheral will only go away once the last lock + * is removed, and we need it around for the CCB release + * operation. + */ + xpt_release_ccb(done_ccb); + cam_periph_unlock(periph); + return; + } + case OD_CCB_WAITING: + { + /* Caller will release the CCB */ + wakeup(&done_ccb->ccb_h.cbfcnp); + return; + } + case OD_CCB_DUMP: + /* No-op. We're polling */ + return; + default: + break; + } + xpt_release_ccb(done_ccb); +} + +static int +oderror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) +{ + struct od_softc *softc; + struct cam_periph *periph; + + periph = xpt_path_periph(ccb->ccb_h.path); + softc = (struct od_softc *)periph->softc; + + /* + * XXX + * Until we have a better way of doing pack validation, + * don't treat UAs as errors. + */ + sense_flags |= SF_RETRY_UA; + return cam_periph_error(ccb, cam_flags, sense_flags, + &softc->saved_ccb); +} + +static void +odprevent(struct cam_periph *periph, int action) +{ + struct od_softc *softc; + union ccb *ccb; + int error; + + softc = (struct od_softc *)periph->softc; + + if (((action == PR_ALLOW) + && (softc->flags & OD_FLAG_PACK_LOCKED) == 0) + || ((action == PR_PREVENT) + && (softc->flags & OD_FLAG_PACK_LOCKED) != 0)) { + return; + } + + ccb = cam_periph_getccb(periph, /*priority*/1); + + scsi_prevent(&ccb->csio, + /*retries*/1, + /*cbcfp*/oddone, + MSG_SIMPLE_Q_TAG, + action, + SSD_FULL_SIZE, + 60000); + + error = cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0, + /*sense_flags*/SF_RETRY_UA | SF_NO_PRINT, + &softc->device_stats); + + if (error == 0) { + if (action == PR_ALLOW) + softc->flags &= ~OD_FLAG_PACK_LOCKED; + else + softc->flags |= OD_FLAG_PACK_LOCKED; + } + + xpt_release_ccb(ccb); +} + +static void +odsetgeom(struct cam_periph *periph, struct scsi_read_capacity_data * rdcap) +{ + struct ccb_calc_geometry ccg; + struct od_softc *softc; + struct disk_params *dp; + + softc = (struct od_softc *)periph->softc; + + dp = &softc->params; + dp->secsize = scsi_4btoul(rdcap->length); + dp->sectors = scsi_4btoul(rdcap->addr) + 1; + /* + * Have the controller provide us with a geometry + * for this disk. The only time the geometry + * matters is when we boot and the controller + * is the only one knowledgeable enough to come + * up with something that will make this a bootable + * device. + */ + xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1); + ccg.ccb_h.func_code = XPT_CALC_GEOMETRY; + ccg.block_size = dp->secsize; + ccg.volume_size = dp->sectors; + ccg.heads = 0; + ccg.secs_per_track = 0; + ccg.cylinders = 0; + xpt_action((union ccb*)&ccg); + dp->heads = ccg.heads; + dp->secs_per_track = ccg.secs_per_track; + dp->cylinders = ccg.cylinders; +} + +#ifdef OD_USE_ORDERED_TAG +static void +odsendorderedtag(void *arg) +{ + struct od_softc *softc; + int s; + + for (softc = SLIST_FIRST(&softc_list); + softc != NULL; + softc = SLIST_NEXT(softc, links)) { + s = splsoftcam(); + if ((softc->ordered_tag_count == 0) + && ((softc->flags & OD_FLAG_WENT_IDLE) == 0)) { + softc->flags |= OD_FLAG_NEED_OTAG; + } + if (softc->device_stats.busy_count > 0) + softc->flags &= ~OD_FLAG_WENT_IDLE; + + softc->ordered_tag_count = 0; + splx(s); + } + /* Queue us up again */ + timeout(odsendorderedtag, NULL, + (OD_DEFAULT_TIMEOUT * hz) / OD_ORDEREDTAG_INTERVAL); +} + +/* + * Step through all OD peripheral drivers, and if the device is still open, + * sync the disk cache to physical media. + */ +static void +odshutdown(int howto, void *arg) +{ + struct cam_periph *periph; + struct od_softc *softc; + + for (periph = TAILQ_FIRST(&oddriver.units); periph != NULL; + periph = TAILQ_NEXT(periph, unit_links)) { + union ccb ccb; + softc = (struct od_softc *)periph->softc; + + /* + * We only sync the cache if the drive is still open, and + * if the drive is capable of it.. + */ + if (((softc->flags & OD_FLAG_OPEN) == 0) + || (softc->quirks & OD_Q_NO_SYNC_CACHE)) + continue; + + xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1); + + ccb.ccb_h.ccb_state = OD_CCB_DUMP; + scsi_synchronize_cache(&ccb.csio, + /*retries*/1, + /*cbfcnp*/oddone, + MSG_SIMPLE_Q_TAG, + /*begin_lba*/0, /* whole disk */ + /*lb_count*/0, + SSD_FULL_SIZE, + 5 * 60 * 1000); + + xpt_polled_action(&ccb); + + if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { + if (((ccb.ccb_h.status & CAM_STATUS_MASK) == + CAM_SCSI_STATUS_ERROR) + && (ccb.csio.scsi_status == SCSI_STATUS_CHECK_COND)){ + int error_code, sense_key, asc, ascq; + + scsi_extract_sense(&ccb.csio.sense_data, + &error_code, &sense_key, + &asc, &ascq); + + if (sense_key != SSD_KEY_ILLEGAL_REQUEST) + scsi_sense_print(&ccb.csio); + } else { + xpt_print_path(periph->path); + printf("Synchronize cache failed, status " + "== 0x%x, scsi status == 0x%x\n", + ccb.ccb_h.status, ccb.csio.scsi_status); + } + } + + if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0) + cam_release_devq(ccb.ccb_h.path, + /*relsim_flags*/0, + /*reduction*/0, + /*timeout*/0, + /*getcount_only*/0); + + } +} +#endif /* OD_USE_ORDERED_TAG */ + +static int +odstartunit(struct cam_periph *periph) +{ + struct od_softc *softc; + union ccb *ccb; + int error; + + softc = (struct od_softc *)periph->softc; + ccb = cam_periph_getccb(periph, /* priority */ 1); + + scsi_start_stop(&ccb->csio, + /* retries */ 1, + /* cbfcnp */ oddone, + /* tag_action */ MSG_SIMPLE_Q_TAG, + /* start */ TRUE, + /* load_eject */ 0, + /* immediate */ FALSE, + /* sense_len */ SSD_FULL_SIZE, + /* timeout */ 50000); + error = cam_periph_runccb(ccb, oderror, /*cam_flags*/0, + /*sense_flags*/SF_RETRY_UA | SF_NO_PRINT, + &softc->device_stats); + + xpt_release_ccb(ccb); + + return error; +} + +static int +odstopunit(struct cam_periph *periph, u_int32_t eject) +{ + struct od_softc *softc; + union ccb *ccb; + int error; + + softc = (struct od_softc *)periph->softc; + ccb = cam_periph_getccb(periph, /* priority */ 1); + + scsi_start_stop(&ccb->csio, + /* retries */ 1, + /* cbfcnp */ oddone, + /* tag_action */ MSG_SIMPLE_Q_TAG, + /* start */ FALSE, + /* load_eject */ eject, + /* immediate */ FALSE, + /* sense_len */ SSD_FULL_SIZE, + /* timeout */ 50000); + error = cam_periph_runccb(ccb, oderror, /*cam_flags*/0, + /*sense_flags*/SF_RETRY_UA, + &softc->device_stats); + + xpt_release_ccb(ccb); + + return error; +} + +static int +odcheckunit(struct cam_periph *periph) +{ + struct od_softc *softc; + union ccb *ccb; + int error; + int retries; + + softc = (struct od_softc *)periph->softc; + ccb = cam_periph_getccb(periph, /* priority */ 1); + + retries = od_wait_ready_count; + do { + scsi_test_unit_ready(&ccb->csio, + /* retries */ 1, + /* cbfcnp */ oddone, + /* tag_action */ MSG_SIMPLE_Q_TAG, + /* sense_len */ SSD_FULL_SIZE, + /* timeout */ 1000); + error = cam_periph_runccb(ccb, oderror, /*cam_flags*/0, + /*sense_flags*/ + SF_RETRY_UA | SF_NO_PRINT, + &softc->device_stats); + if (error != ENXIO || retries == 0) + break; + + tsleep(&error, PRIBIO | PCATCH, "odrdy", hz); + } while (--retries > 0); + + xpt_release_ccb(ccb); + + return error; +} diff -u sys.noOD/conf/files sys/conf/files --- sys.noOD/conf/files Thu Dec 9 04:46:57 1999 +++ sys/conf/files Fri Dec 24 23:51:32 1999 @@ -59,6 +59,7 @@ cam/scsi/scsi_sa.c optional sa cam/scsi/scsi_cd.c optional cd cam/scsi/scsi_ch.c optional ch +cam/scsi/scsi_od.c optional od cam/scsi/scsi_pass.c optional pass cam/scsi/scsi_scan.c optional scan cam/scsi/scsi_target.c optional targ diff -u sys.noOD/conf/options sys/conf/options --- sys.noOD/conf/options Thu Dec 9 04:46:58 1999 +++ sys/conf/options Fri Dec 24 23:51:32 1999 @@ -179,6 +179,9 @@ # Options used only in cam/scsi/scsi_pt.c SCSI_PT_DEFAULT_TIMEOUT opt_pt.h +# Options used only in cam/scsi/scsi_od.c +OD_USE_ORDERED_TAG opt_od.h + # Options used only in pci/ncr.c SCSI_NCR_DEBUG opt_ncr.h SCSI_NCR_DFLT_TAGS opt_ncr.h @@ -410,6 +413,9 @@ # Include LKM compatability module LKM + +# option for FFS compatibility +FFS_COMPAT_XXXXBSD opt_ffs.h # Embedded system options INIT_PATH opt_init_path.h diff -u sys.noOD/i386/conf/GENERIC sys/i386/conf/GENERIC --- sys.noOD/i386/conf/GENERIC Sun Dec 5 10:56:42 1999 +++ sys/i386/conf/GENERIC Fri Dec 24 23:51:32 1999 @@ -105,6 +105,7 @@ # Only one of each of these is needed, they are dynamically allocated. controller scbus0 # SCSI bus (required) device da0 # Direct Access (disks) +device od0 # Optical Memory (MO etc) device sa0 # Sequential Access (tape etc) device cd0 # CD device pass0 # Passthrough device (direct SCSI) diff -u sys.noOD/i386/conf/LINT sys/i386/conf/LINT --- sys.noOD/i386/conf/LINT Thu Dec 16 04:35:13 1999 +++ sys/i386/conf/LINT Fri Dec 24 23:51:32 1999 @@ -682,6 +682,11 @@ options CODA #CODA filesystem. pseudo-device vcoda 4 #coda minicache <-> venus comm. +# Large sector media compatibility +options FFS_COMPAT_XXXXBSD # FreeBSD 1.X, 2.X and NetBSD FFS + # compatibility for sector size is + # other than DEVBSIZE. + ##################################################################### # POSIX P1003.1B @@ -745,6 +750,7 @@ device da0 #SCSI direct access devices (aka disks) device sa0 #SCSI tapes device cd0 #SCSI CD-ROMs +device od0 #SCSI optical disk device pass0 #CAM passthrough driver # The previous devices (ch, da, st, cd) are recognized by config. diff -u sys.noOD/i386/conf/devices.i386 sys/i386/conf/devices.i386 --- sys.noOD/i386/conf/devices.i386 Mon Aug 30 01:05:24 1999 +++ sys/i386/conf/devices.i386 Fri Dec 24 23:51:32 1999 @@ -14,4 +14,5 @@ scd 16 pcd 17 acd 19 +od 20 wst 24 diff -u sys.noOD/i386/conf/majors.i386 sys/i386/conf/majors.i386 --- sys.noOD/i386/conf/majors.i386 Mon Aug 30 01:05:25 1999 +++ sys/i386/conf/majors.i386 Fri Dec 24 23:51:32 1999 @@ -33,6 +33,7 @@ 17 matcd Matsushita/Panasonic/Creative(SB) CDROM interface 18 ata "device independent" ATA/IDE driver 19 acdb ATAPI CDROM client of "ata" +20 od SCSI "optical" devices 21 ccd concatenated disk 22 gd Geometry disk. 24 wstb ATAPI tape client of "ata" @@ -115,6 +116,7 @@ 67 meteor Matrox Meteor video capture 68 si Specialix SI/XIO (peter@freebsd.org) 69 acd ATAPI CDROM client of "ata" +70 od SCSI "optical" devices 71 asc AmiScan driver 72 stl Stallion (cd1400 based) (gerg@stallion.oz.au) 73 ?? was qcam diff -u sys.noOD/i386/i386/swapgeneric.c sys/i386/i386/swapgeneric.c --- sys.noOD/i386/i386/swapgeneric.c Mon Aug 30 01:05:52 1999 +++ sys/i386/i386/swapgeneric.c Fri Dec 24 23:51:32 1999 @@ -56,6 +56,7 @@ #include "mcd.h" #include "scd.h" #include "matcd.h" +#include "od.h" /* * Generic configuration; all in one @@ -89,6 +90,9 @@ #endif #if NMATCD > 0 { "matcd", makedev(17,0x00000000), }, +#endif +#if NOD > 0 + { "od", makedev(20,0x00000000), }, #endif { 0 }, }; diff -u sys.noOD/i386/isa/diskslice_machdep.c sys/i386/isa/diskslice_machdep.c --- sys.noOD/i386/isa/diskslice_machdep.c Mon Aug 30 01:07:16 1999 +++ sys/i386/isa/diskslice_machdep.c Fri Dec 24 23:51:32 1999 @@ -50,6 +50,8 @@ #include #include +#define MEDIA_DESC_OFFS 21 + #define TRACE(str) do { if (dsi_debug) printf str; } while (0) static volatile u_char dsi_debug; @@ -61,6 +63,7 @@ { 0x80, 0, 1, 0, DOSPTYP_386BSD, 255, 255, 255, 0, 50000, }, }; +static int check_part_table __P((u_char *bp)); static int check_part __P((char *sname, struct dos_partition *dp, u_long offset, int nsectors, int ntracks, u_long mbr_offset)); @@ -71,6 +74,47 @@ u_long mbr_offset)); static int +check_part_table(bp) + u_char *bp; +{ + struct dos_partition *dp; + int result; + int i; +# define PATN_SIZE 5 + static u_char pattern[PATN_SIZE][4] = { + { 0, 0, 0, 0 }, + { 0x80, 0, 0, 0 }, + { 0, 0x80, 0, 0 }, + { 0, 0, 0x80, 0 }, + { 0, 0, 0, 0x80 } + }; + + dp = (struct dos_partition *)(bp + DOSPARTOFF); + result = 0; + for (i = 0; i < PATN_SIZE; i++) { + if (dp->dp_flag == pattern[i][0] + && (dp + 1)->dp_flag == pattern[i][1] + && (dp + 2)->dp_flag == pattern[i][2] + && (dp + 3)->dp_flag == pattern[i][3]) { + result = 1; + break; + } + } + +#if 0 + /* + * MS documented about media descriptor 0xF8, but it's not + * worked with special boot loader code.(eg. FreeBSD) + */ + if (result == 0) + if (bp[MEDIA_DESC_OFFS] == 0xF8) + result = 1; +#endif + + return result; +} + +static int check_part(sname, dp, offset, nsectors, ntracks, mbr_offset ) char *sname; struct dos_partition *dp; @@ -173,6 +217,7 @@ char *sname; struct diskslice *sp; struct diskslices *ssp; + int result; mbr_offset = DOSBBSECTOR; reread_mbr: @@ -203,6 +248,25 @@ goto done; } dp0 = (struct dos_partition *)(cp + DOSPARTOFF); + + result = check_part_table(cp); + if (result == 0) { + if (bootverbose) + printf("%s: no partion table\n", sname); + /* + * We are passed a pointer to a suitably initialized + * minimal slices "struct" with no dangling pointers + * in it. Replace it by a maximal one. This usually + * oversizes the "struct", but enlarging it while + * searching for logical drives would be inconvenient. + */ + free(*sspp, M_DEVBUF); + ssp = dsmakeslicestruct(MAX_SLICES, lp); + *sspp = ssp; + ssp->dss_nslices = BASE_SLICE; + error = 0; + goto done; + } /* Check for "Ontrack Diskmanager". */ for (dospart = 0, dp = dp0; dospart < NDOSPART; dospart++, dp++) { diff -u sys.noOD/kern/vfs_subr.c sys/kern/vfs_subr.c --- sys.noOD/kern/vfs_subr.c Thu Sep 16 11:02:15 1999 +++ sys/kern/vfs_subr.c Fri Dec 24 23:51:33 1999 @@ -1209,6 +1209,7 @@ nvp->v_hashchain = vpp; nvp->v_specnext = *vpp; nvp->v_specmountpoint = NULL; + nvp->v_blksize = 0; simple_unlock(&spechash_slock); *vpp = nvp; if (vp != NULLVP) { diff -u sys.noOD/miscfs/specfs/spec_vnops.c sys/miscfs/specfs/spec_vnops.c --- sys.noOD/miscfs/specfs/spec_vnops.c Mon Aug 30 01:26:57 1999 +++ sys/miscfs/specfs/spec_vnops.c Fri Dec 24 23:51:33 1999 @@ -45,6 +45,8 @@ #include #include #include +#include +#include #include #include @@ -165,6 +167,8 @@ dev_t bdev, dev = (dev_t)vp->v_rdev; int maj = major(dev); int error; + struct disklabel *label; + struct diskslices *slices; /* * Don't allow open if fs is mounted -nodev. @@ -209,6 +213,29 @@ VOP_UNLOCK(vp, 0, p); error = (*cdevsw[maj]->d_open)(dev, ap->a_mode, S_IFCHR, p); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); + if (error) + return error; + vp->v_blksize = 0; + if ((cdevsw[maj]->d_flags & D_TYPEMASK) == D_DISK + && cdevsw[maj]->d_ioctl != NULL) { + label = malloc(sizeof *label, M_TEMP, M_WAITOK); + slices = malloc(sizeof *slices, M_TEMP, M_WAITOK); + if ((*cdevsw[maj]->d_ioctl)(dev, DIOCGDINFO, + (caddr_t)label, ap->a_mode, p) == 0) + vp->v_blksize = label->d_secsize; + else if ((*cdevsw[maj]->d_ioctl)(dev, DIOCGSLICEINFO, + (caddr_t)slices, ap->a_mode, p) == 0) + vp->v_blksize = slices->dss_secsize; + free(slices, M_TEMP); + free(label, M_TEMP); +#ifdef FIX_VNODE_PAGER_DEBUG + printf("spec_open/VCHAR: dev = %d, block size = %lu\n", + dev, vp->v_blksize); +#endif + } + if (vp->v_blksize == 0 + || (vp->v_blksize & ~(1 << (ffs(vp->v_blksize) - 1))) != 0) + vp->v_blksize = DEV_BSIZE; return (error); case VBLK: @@ -232,7 +259,31 @@ error = vfs_mountedon(vp); if (error) return (error); - return ((*bdevsw[maj]->d_open)(dev, ap->a_mode, S_IFBLK, p)); + error = (*bdevsw[maj]->d_open)(dev, ap->a_mode, S_IFBLK, p); + if (error) + return error; + vp->v_blksize = 0; + if ((bdevsw[maj]->d_flags & D_TYPEMASK) == D_DISK + && bdevsw[maj]->d_ioctl != NULL) { + label = malloc(sizeof *label, M_TEMP, M_WAITOK); + slices = malloc(sizeof *slices, M_TEMP, M_WAITOK); + if ((*bdevsw[maj]->d_ioctl)(dev, DIOCGDINFO, + (caddr_t)label, ap->a_mode, p) == 0) + vp->v_blksize = label->d_secsize; + else if ((*bdevsw[maj]->d_ioctl)(dev, DIOCGSLICEINFO, + (caddr_t)slices, ap->a_mode, p) == 0) + vp->v_blksize = slices->dss_secsize; + free(slices, M_TEMP); + free(label, M_TEMP); +#ifdef FIX_VNODE_PAGER_DEBUG + printf("spec_open/VBLK: dev = %d, block size = %lu\n", + dev, vp->v_blksize); +#endif + } + if (vp->v_blksize == 0 + || (vp->v_blksize & ~(1 << (ffs(vp->v_blksize) - 1))) != 0) + vp->v_blksize = DEV_BSIZE; + return error; } return (0); } @@ -579,7 +630,6 @@ int *a_runb; } */ *ap; { - if (ap->a_vpp != NULL) *ap->a_vpp = ap->a_vp; if (ap->a_bnp != NULL) @@ -765,18 +815,10 @@ blkno = btodb(offset); /* - * Round up physical size for real devices, use the - * fundamental blocksize of the fs if possible. + * Round up physical size for real devices. */ - if (vp && vp->v_mount) { - if (vp->v_type != VBLK) { - vprint("Non VBLK", vp); - } - blksiz = vp->v_mount->mnt_stat.f_bsize; - if (blksiz < DEV_BSIZE) { - blksiz = DEV_BSIZE; - } - } + if (vp && vp->v_blksize >= DEV_BSIZE) + blksiz = vp->v_blksize; else blksiz = DEV_BSIZE; size = (ap->a_count + blksiz - 1) & ~(blksiz - 1); diff -u sys.noOD/pc98/conf/GENERIC98 sys/pc98/conf/GENERIC98 --- sys.noOD/pc98/conf/GENERIC98 Mon Dec 27 23:13:49 1999 +++ sys/pc98/conf/GENERIC98 Mon Dec 27 23:13:33 1999 @@ -151,7 +151,7 @@ # Only one of each of these is needed, they are dynamically allocated. controller scbus0 # SCSI bus (required) device da0 # Direct Access (disks) -#device od0 # Optical Memory (MO etc) +device od0 # Optical Memory (MO etc) device sa0 # Sequential Access (tape etc) device cd0 # CD device pass0 # Passthrough device (direct SCSI) diff -u sys.noOD/pc98/pc98/atcompat_diskslice.c sys/pc98/pc98/atcompat_diskslice.c --- sys.noOD/pc98/pc98/atcompat_diskslice.c Mon Aug 30 01:31:06 1999 +++ sys/pc98/pc98/atcompat_diskslice.c Fri Dec 24 23:51:33 1999 @@ -60,6 +60,8 @@ #include #include +#define MEDIA_DESC_OFFS 21 + #define TRACE(str) do { if (dsi_debug) printf str; } while (0) static volatile u_char dsi_debug; @@ -71,6 +73,7 @@ { 0x80, 0, 1, 0, DOSPTYP_386BSD, 255, 255, 255, 0, 50000, }, }; +static int check_part_table __P((u_char *bp)); static int check_part __P((char *sname, struct dos_partition *dp, u_long offset, int nsectors, int ntracks, u_long mbr_offset)); @@ -82,6 +85,47 @@ u_long mbr_offset)); static int +check_part_table(bp) + u_char *bp; +{ + struct dos_partition *dp; + int result; + int i; +# define PATN_SIZE 5 + static u_char pattern[PATN_SIZE][4] = { + { 0, 0, 0, 0 }, + { 0x80, 0, 0, 0 }, + { 0, 0x80, 0, 0 }, + { 0, 0, 0x80, 0 }, + { 0, 0, 0, 0x80 } + }; + + dp = (struct dos_partition *)(bp + DOSPARTOFF); + result = 0; + for (i = 0; i < PATN_SIZE; i++) { + if (dp->dp_flag == pattern[i][0] + && (dp + 1)->dp_flag == pattern[i][1] + && (dp + 2)->dp_flag == pattern[i][2] + && (dp + 3)->dp_flag == pattern[i][3]) { + result = 1; + break; + } + } + +#if 0 + /* + * MS documented about media descriptor 0xF8, but it's not + * worked with special boot loader code.(eg. FreeBSD) + */ + if (result == 0) + if (bp[MEDIA_DESC_OFFS] == 0xF8) + result = 1; +#endif + + return result; +} + +static int check_part(sname, dp, offset, nsectors, ntracks, mbr_offset ) char *sname; struct dos_partition *dp; @@ -186,6 +230,7 @@ char *sname; struct diskslice *sp; struct diskslices *ssp; + int result; mbr_offset = DOSBBSECTOR; reread_mbr: @@ -216,6 +261,25 @@ goto done; } dp0 = (struct dos_partition *)(cp + DOSPARTOFF); + + result = check_part_table(cp); + if (result == 0) { + if (bootverbose) + printf("%s: no partion table\n", sname); + /* + * We are passed a pointer to a suitably initialized + * minimal slices "struct" with no dangling pointers + * in it. Replace it by a maximal one. This usually + * oversizes the "struct", but enlarging it while + * searching for logical drives would be inconvenient. + */ + free(*sspp, M_DEVBUF); + ssp = dsmakeslicestruct(MAX_SLICES, lp); + *sspp = ssp; + ssp->dss_nslices = BASE_SLICE; + error = 0; + goto done; + } /* Check for "Ontrack Diskmanager". */ for (dospart = 0, dp = dp0; dospart < NDOSPART; dospart++, dp++) { diff -u sys.noOD/ufs/ffs/ffs_vfsops.c sys/ufs/ffs/ffs_vfsops.c --- sys.noOD/ufs/ffs/ffs_vfsops.c Mon Nov 22 01:58:49 1999 +++ sys/ufs/ffs/ffs_vfsops.c Fri Dec 24 23:51:33 1999 @@ -35,6 +35,7 @@ */ #include "opt_quota.h" +#include "opt_ffs.h" #include #include @@ -435,7 +436,9 @@ struct csum *space; struct buf *bp; struct fs *fs, *newfs; +#ifndef FFS_COMPAT_XXXXBSD struct partinfo dpart; +#endif dev_t dev; int i, blks, size, error; int32_t *lp; @@ -468,10 +471,14 @@ /* * Step 2: re-read superblock from disk. */ +#ifndef FFS_COMPAT_XXXXBSD if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0) size = DEV_BSIZE; else size = dpart.disklab->d_secsize; +#else + size = DEV_BSIZE; +#endif if (error = bread(devvp, (ufs_daddr_t)(SBOFF/size), SBSIZE, NOCRED,&bp)) return (error); newfs = (struct fs *)bp->b_data; @@ -494,6 +501,10 @@ brelse(bp); mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen; ffs_oldfscompat(fs); +#ifdef FFS_COMPAT_XXXXBSD + /* XXX bread assumes b_blkno in DEV_BSIZE unit, calculate fsbtodb */ + fs->fs_fsbtodb = ffs(fs->fs_fsize / DEV_BSIZE) - 1; +#endif /* * Step 3: re-read summary information from disk. @@ -579,7 +590,9 @@ struct buf *bp; register struct fs *fs; dev_t dev; +#ifndef FFS_COMPAT_XXXXBSD struct partinfo dpart; +#endif caddr_t base, space; int error, i, blks, size, ronly; int32_t *lp; @@ -627,10 +640,14 @@ if (error) return (error); +#ifndef FFS_COMPAT_XXXXBSD if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0) size = DEV_BSIZE; else size = dpart.disklab->d_secsize; +#else + size = DEV_BSIZE; +#endif bp = NULL; ump = NULL; @@ -663,6 +680,10 @@ error = EROFS; /* needs translation */ goto out; } +#ifdef FFS_COMPAT_XXXXBSD + /* XXX bread assumes b_blkno in DEV_BSIZE unit, calculate fsbtodb */ + fs->fs_fsbtodb = ffs(fs->fs_fsize / DEV_BSIZE) - 1; +#endif ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK); bzero((caddr_t)ump, sizeof *ump); ump->um_malloctype = malloctype; @@ -1293,6 +1314,12 @@ lp[0] = tmp; /* XXX */ } /* XXX */ dfs->fs_maxfilesize = mp->um_savedmaxfilesize; /* XXX */ + +#ifdef FFS_COMPAT_XXXXBSD + /* XXX restore fsbtodb which was modified for fixed b_blkno unit */ + ((struct fs *)bp->b_data)->fs_fsbtodb = ffs(fs->fs_nspf) - 1; +#endif + if (waitfor != MNT_WAIT) bawrite(bp); else if (error = bwrite(bp)) diff -u sys.noOD/vm/vnode_pager.c sys/vm/vnode_pager.c --- sys.noOD/vm/vnode_pager.c Mon Aug 30 01:33:41 1999 +++ sys/vm/vnode_pager.c Fri Dec 24 23:51:33 1999 @@ -68,6 +68,9 @@ #include #include +#include +#include + static vm_offset_t vnode_pager_addr __P((struct vnode *vp, vm_ooffset_t address, int *run)); static void vnode_pager_iodone __P((struct buf *bp)); @@ -569,6 +572,7 @@ int s; int count; int error = 0; + int blksize; object = vp->v_object; count = bytecount / PAGE_SIZE; @@ -704,8 +708,10 @@ /* * round up physical size for real devices */ - if (dp->v_type == VBLK || dp->v_type == VCHR) - size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); + if (dp->v_type == VBLK || dp->v_type == VCHR) { + blksize = (int)dp->v_blksize; + size = (size + blksize - 1) & ~(blksize - 1); + } bp = getpbuf(); kva = (vm_offset_t) bp->b_data;