Gentoo Websites Logo
Go to: Gentoo Home Documentation Forums Lists Bugs Planet Store Wiki Get Gentoo!
View | Details | Raw Unified | Return to bug 148423 | Differences between
and this patch

Collapse All | Expand All

(-)linux-2.6.17.1/block/ll_rw_blk.c (+125 lines)
Lines 39-44 Link Here
39
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
39
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
40
static void init_request_from_bio(struct request *req, struct bio *bio);
40
static void init_request_from_bio(struct request *req, struct bio *bio);
41
static int __make_request(request_queue_t *q, struct bio *bio);
41
static int __make_request(request_queue_t *q, struct bio *bio);
42
static int blk_protect_register(request_queue_t *q);
43
static void blk_protect_unregister(request_queue_t *q);
42
44
43
/*
45
/*
44
 * For the allocated request tables
46
 * For the allocated request tables
Lines 359-364 Link Here
359
361
360
EXPORT_SYMBOL(blk_queue_issue_flush_fn);
362
EXPORT_SYMBOL(blk_queue_issue_flush_fn);
361
363
364
void blk_queue_issue_protect_fn(request_queue_t *q, issue_protect_fn *ipf)
365
{
366
       q->issue_protect_fn = ipf;
367
}
368
EXPORT_SYMBOL(blk_queue_issue_protect_fn);
369
370
void blk_queue_issue_unprotect_fn(request_queue_t *q, issue_unprotect_fn *iuf)
371
{
372
       q->issue_unprotect_fn = iuf;
373
}
374
EXPORT_SYMBOL(blk_queue_issue_unprotect_fn);
375
362
/*
376
/*
363
 * Cache flushing for ordered writes handling
377
 * Cache flushing for ordered writes handling
364
 */
378
 */
Lines 3928-3933 Link Here
3928
		return ret;
3942
		return ret;
3929
	}
3943
	}
3930
3944
3945
	blk_protect_register(q);
3931
	return 0;
3946
	return 0;
3932
}
3947
}
3933
3948
Lines 3936-3941 Link Here
3936
	request_queue_t *q = disk->queue;
3951
	request_queue_t *q = disk->queue;
3937
3952
3938
	if (q && q->request_fn) {
3953
	if (q && q->request_fn) {
3954
		blk_protect_unregister(q);
3939
		elv_unregister_queue(q);
3955
		elv_unregister_queue(q);
3940
3956
3941
		kobject_uevent(&q->kobj, KOBJ_REMOVE);
3957
		kobject_uevent(&q->kobj, KOBJ_REMOVE);
Lines 3943-3945 Link Here
3943
		kobject_put(&disk->kobj);
3959
		kobject_put(&disk->kobj);
3944
	}
3960
	}
3945
}
3961
}
3962
3963
/*
3964
 * Restore the unplugging timer that we re-used
3965
 * to implement the queue freeze timeout...
3966
 */
3967
static void blk_unfreeze_work(void *data)
3968
{
3969
       request_queue_t *q = (request_queue_t *) data;
3970
3971
       INIT_WORK(&q->unplug_work, blk_unplug_work, q);
3972
       q->unplug_timer.function = blk_unplug_timeout;
3973
3974
       q->issue_unprotect_fn(q);
3975
}
3976
3977
/*
3978
 * Called when the queue freeze timeout expires...
3979
 */
3980
static void blk_unfreeze_timeout(unsigned long data)
3981
{
3982
       request_queue_t *q = (request_queue_t *) data;
3983
       kblockd_schedule_work(&q->unplug_work);
3984
}
3985
3986
/*
3987
 * The lower level driver parks and freezes the queue, and this block layer
3988
 *  function sets up the freeze timeout timer on return. If the queue is
3989
 *  already frozen then this is called to extend the timer...
3990
 */
3991
void blk_freeze_queue(request_queue_t *q, int seconds)
3992
{
3993
       /* set/reset the timer */
3994
       mod_timer(&q->unplug_timer, msecs_to_jiffies(seconds*1000) + jiffies);
3995
3996
       /* we do this every iteration - is this sane? */
3997
       INIT_WORK(&q->unplug_work, blk_unfreeze_work, q);
3998
       q->unplug_timer.function = blk_unfreeze_timeout;
3999
}
4000
4001
/*
4002
 * When reading the 'protect' attribute, we return boolean frozen or active
4003
 * todo:
4004
 * - maybe we should return seconds remaining instead?
4005
 */
4006
static ssize_t queue_protect_show(struct request_queue *q, char *page)
4007
{
4008
       return queue_var_show(blk_queue_stopped(q), (page));
4009
}
4010
4011
/*
4012
 * When writing the 'protect' attribute, input is the number of seconds
4013
 * to freeze the queue for. We call a lower level helper function to
4014
 * park the heads and freeze/block the queue, then we make a block layer
4015
 * call to setup the thaw timeout. If input is 0, then we thaw the queue.
4016
 */
4017
static ssize_t queue_protect_store(struct request_queue *q, const char *page, size_t count)
4018
{
4019
       unsigned long freeze = 0;
4020
       queue_var_store(&freeze, page, count);
4021
4022
       if(freeze>0) {
4023
/* Park and freeze */
4024
	       if (!blk_queue_stopped(q))
4025
		       q->issue_protect_fn(q);
4026
/* set / reset the thaw timer */
4027
	       blk_freeze_queue(q, freeze);
4028
       }
4029
       else
4030
	       blk_unfreeze_timeout((unsigned long) q);
4031
4032
       return count;
4033
}
4034
4035
static struct queue_sysfs_entry queue_protect_entry = {
4036
       .attr = {.name = "protect", .mode = S_IRUGO | S_IWUSR },
4037
       .show = queue_protect_show,
4038
       .store = queue_protect_store,
4039
};
4040
4041
static int blk_protect_register(request_queue_t *q)
4042
{
4043
	int error = 0;
4044
4045
/* check that the lower level driver has a protect handler */
4046
	if (!q->issue_protect_fn)
4047
		return 1;
4048
4049
/* create the attribute */
4050
	error = sysfs_create_file(&q->kobj, &queue_protect_entry.attr);
4051
	if(error){
4052
		printk(KERN_ERR
4053
		       "blk_protect_register(): failed to create protect queue attribute!\n");
4054
		return error;
4055
	}
4056
4057
	kobject_get(&q->kobj);
4058
	return 0;
4059
}
4060
4061
static void blk_protect_unregister(request_queue_t *q)
4062
{
4063
/* check that the lower level driver has a protect handler */
4064
	if (!q->issue_protect_fn)
4065
		return;
4066
4067
/* remove the attribute */
4068
	sysfs_remove_file(&q->kobj,&queue_protect_entry.attr);
4069
	kobject_put(&q->kobj);
4070
}
(-)linux-2.6.17.1/drivers/ide/ide-disk.c (+155 lines)
Lines 72-77 Link Here
72
#include <asm/io.h>
72
#include <asm/io.h>
73
#include <asm/div64.h>
73
#include <asm/div64.h>
74
74
75
int idedisk_protect_method = 0;
76
module_param_named(protect_method, idedisk_protect_method, int, 0444);
77
MODULE_PARM_DESC(protect_method, "hdaps disk protection method (0=autodetect, 1=unload, 2=standby)");
78
75
struct ide_disk_obj {
79
struct ide_disk_obj {
76
	ide_drive_t	*drive;
80
	ide_drive_t	*drive;
77
	ide_driver_t	*driver;
81
	ide_driver_t	*driver;
Lines 730-735 Link Here
730
}
734
}
731
735
732
/*
736
/*
737
 * todo:
738
 *  - we freeze the queue regardless of success and rely on the 
739
 *    ide_protect_queue function to thaw immediately if the command
740
 *    failed (to be consistent with the libata handler)... should 
741
 *    we also inspect here?
742
 */
743
void ide_end_protect_rq(struct request *rq, int error)
744
{
745
	struct completion *waiting = rq->waiting;
746
747
	/* spin lock already accquired */
748
	if (!blk_queue_stopped(rq->q))
749
		blk_stop_queue(rq->q);
750
751
	complete(waiting);
752
}
753
754
int ide_unprotect_queue(request_queue_t *q)
755
{
756
	struct request	rq;
757
	unsigned long flags;
758
	int		pending = 0, rc = 0;
759
	ide_drive_t 	*drive = q->queuedata;
760
	u8 		args[7], *argbuf = args;
761
762
	if (!blk_queue_stopped(q))
763
		return -EIO;
764
765
	/* Are there any pending jobs on the queue? */
766
	pending = ((q->rq.count[READ] > 0) || (q->rq.count[WRITE] > 0)) ? 1 : 0;
767
	
768
	spin_lock_irqsave(q->queue_lock, flags);
769
	blk_start_queue(q);
770
	spin_unlock_irqrestore(q->queue_lock, flags);
771
772
	/* The unload feature of the IDLE_IMMEDIATE command
773
	   temporarily disables HD power management from spinning down
774
	   the disk. Any other command will reenable HD pm, so, if
775
	   there are no pending jobs on the queue, another
776
	   CHECK_POWER_MODE1 command without the unload feature should do
777
	   just fine. */
778
	if (!pending) {
779
		printk(KERN_DEBUG "ide_unprotect_queue(): No pending I/O, re-enabling power management..\n");
780
		memset(args, 0, sizeof(args));
781
		argbuf[0] = 0xe5; /* CHECK_POWER_MODE1 */
782
		ide_init_drive_cmd(&rq);
783
		rq.flags = REQ_DRIVE_TASK;
784
		rq.buffer = argbuf;
785
		rc = ide_do_drive_cmd(drive, &rq, ide_head_wait);
786
	}
787
788
	return rc;
789
}
790
791
int ide_protect_queue(request_queue_t *q, int unload)
792
{
793
	ide_drive_t 	*drive = q->queuedata;
794
	struct request	rq;
795
	u8 		args[7], *argbuf = args;
796
	int		ret = 0;
797
	DECLARE_COMPLETION(wait);
798
799
	memset(&rq, 0, sizeof(rq));
800
	memset(args, 0, sizeof(args));
801
802
	if (blk_queue_stopped(q))
803
		return -EIO;
804
805
	if (unload) {
806
		argbuf[0] = 0xe1;
807
		argbuf[1] = 0x44;
808
		argbuf[3] = 0x4c;
809
		argbuf[4] = 0x4e;
810
		argbuf[5] = 0x55;
811
	} else
812
		argbuf[0] = 0xe0;
813
814
	/* Issue the park command & freeze */
815
	ide_init_drive_cmd(&rq);
816
817
	rq.flags = REQ_DRIVE_TASK;
818
	rq.buffer = argbuf;
819
	rq.waiting = &wait;
820
	rq.end_io = ide_end_protect_rq;
821
822
	ret = ide_do_drive_cmd(drive, &rq, ide_next);
823
	wait_for_completion(&wait);
824
	rq.waiting = NULL;
825
826
	if (ret)
827
	{
828
		printk(KERN_DEBUG "ide_protect_queue(): Warning: head NOT parked!..\n");
829
		ide_unprotect_queue(q);
830
		return ret;
831
	}
832
833
	if (unload) {
834
		if (args[3] == 0xc4)
835
			printk(KERN_DEBUG "ide_protect_queue(): head parked..\n");
836
		else {
837
			/* error parking the head */
838
			printk(KERN_DEBUG "ide_protect_queue(): head NOT parked!..\n");
839
			ret = -EIO;
840
			ide_unprotect_queue(q);
841
		}
842
	} else
843
		printk(KERN_DEBUG "ide_protect_queue(): head park not requested, used standby!..\n");
844
845
	return ret;
846
}	
847
848
int idedisk_issue_protect_fn(request_queue_t *q)
849
{
850
	ide_drive_t		*drive = q->queuedata;
851
	int unload;
852
853
	/*
854
	 * Check capability of the device -
855
	 *  - if "idle immediate with unload" is supported we use that, else
856
	 *    we use "standby immediate" and live with spinning down the drive..
857
	 *    (Word 84, bit 13 of IDENTIFY DEVICE data)
858
	 */
859
	if (idedisk_protect_method == 1) {
860
		unload = 1;	
861
		printk(KERN_DEBUG "idedisk_issue_protect_fn(): unload method requested, overriding drive capability check..\n");
862
	}
863
	else if (idedisk_protect_method == 2) {
864
		unload = 0;	
865
		printk(KERN_DEBUG "idedisk_issue_protect_fn(): standby method requested, overriding drive capability check..\n");
866
	}
867
	else if (drive->id->cfsse & (1 << 13)) {
868
		unload = 1;
869
		printk(KERN_DEBUG "idedisk_issue_protect_fn(): unload support reported by drive..\n");
870
	}
871
	else {
872
		unload = 0;
873
		printk(KERN_DEBUG "idedisk_issue_protect_fn(): unload support NOT reported by drive!..\n");
874
	}
875
876
	return ide_protect_queue(q, unload);
877
}
878
879
int idedisk_issue_unprotect_fn(request_queue_t *q)
880
{
881
	return ide_unprotect_queue(q);
882
}
883
884
/*
733
 * This is tightly woven into the driver->do_special can not touch.
885
 * This is tightly woven into the driver->do_special can not touch.
734
 * DON'T do it again until a total personality rewrite is committed.
886
 * DON'T do it again until a total personality rewrite is committed.
735
 */
887
 */
Lines 985-990 Link Here
985
		drive->wcache = 1;
1137
		drive->wcache = 1;
986
1138
987
	write_cache(drive, 1);
1139
	write_cache(drive, 1);
1140
1141
	blk_queue_issue_protect_fn(drive->queue, idedisk_issue_protect_fn);	
1142
	blk_queue_issue_unprotect_fn(drive->queue, idedisk_issue_unprotect_fn);	
988
}
1143
}
989
1144
990
static void ide_cacheflush_p(ide_drive_t *drive)
1145
static void ide_cacheflush_p(ide_drive_t *drive)
(-)linux-2.6.17.1/drivers/ide/ide-io.c (+14 lines)
Lines 1249-1254 Link Here
1249
		}
1249
		}
1250
1250
1251
		/*
1251
		/*
1252
		 * Don't accept a request when the queue is stopped (unless we
1253
		 * are resuming from suspend). Prevents existing queue entries 
1254
		 * being processed after queue is stopped by the hard disk 
1255
		 * protection mechanism...
1256
		 */
1257
		if (test_bit(QUEUE_FLAG_STOPPED, &drive->queue->queue_flags) && !blk_pm_resume_request(rq)) {
1258
			hwgroup->busy = 0;
1259
			break;
1260
		}
1261
1262
		/*
1252
		 * Sanity: don't accept a request that isn't a PM request
1263
		 * Sanity: don't accept a request that isn't a PM request
1253
		 * if we are currently power managed. This is very important as
1264
		 * if we are currently power managed. This is very important as
1254
		 * blk_stop_queue() doesn't prevent the elv_next_request()
1265
		 * blk_stop_queue() doesn't prevent the elv_next_request()
Lines 1729-1734 Link Here
1729
		where = ELEVATOR_INSERT_FRONT;
1740
		where = ELEVATOR_INSERT_FRONT;
1730
		rq->flags |= REQ_PREEMPT;
1741
		rq->flags |= REQ_PREEMPT;
1731
	}
1742
	}
1743
	if (action == ide_next)
1744
		where = ELEVATOR_INSERT_FRONT;
1745
1732
	__elv_add_request(drive->queue, rq, where, 0);
1746
	__elv_add_request(drive->queue, rq, where, 0);
1733
	ide_do_request(hwgroup, IDE_NO_IRQ);
1747
	ide_do_request(hwgroup, IDE_NO_IRQ);
1734
	spin_unlock_irqrestore(&ide_lock, flags);
1748
	spin_unlock_irqrestore(&ide_lock, flags);
(-)linux-2.6.17.1/drivers/scsi/libata-core.c (+4 lines)
Lines 75-80 Link Here
75
75
76
struct workqueue_struct *ata_aux_wq;
76
struct workqueue_struct *ata_aux_wq;
77
77
78
int libata_protect_method = 0;
79
module_param_named(protect_method, libata_protect_method, int, 0444);
80
MODULE_PARM_DESC(protect_method, "hdaps disk protection method (0=autodetect, 1=unload, 2=standby)");
81
78
int atapi_enabled = 1;
82
int atapi_enabled = 1;
79
module_param(atapi_enabled, int, 0444);
83
module_param(atapi_enabled, int, 0444);
80
MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
84
MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
(-)linux-2.6.17.1/drivers/scsi/libata.h (+1 lines)
Lines 40-45 Link Here
40
40
41
/* libata-core.c */
41
/* libata-core.c */
42
extern struct workqueue_struct *ata_aux_wq;
42
extern struct workqueue_struct *ata_aux_wq;
43
extern int libata_protect_method;
43
extern int atapi_enabled;
44
extern int atapi_enabled;
44
extern int atapi_dmadir;
45
extern int atapi_dmadir;
45
extern int libata_fua;
46
extern int libata_fua;
(-)linux-2.6.17.1/drivers/scsi/libata-scsi.c (+38 lines)
Lines 809-814 Link Here
809
	}
809
	}
810
}
810
}
811
811
812
extern int scsi_protect_queue(request_queue_t *q, int unload);
813
extern int scsi_unprotect_queue(request_queue_t *q);
814
815
static int ata_scsi_issue_protect_fn(request_queue_t *q)
816
{
817
	struct scsi_device *sdev = q->queuedata;
818
	struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0];
819
	struct ata_device *dev = &ap->device[sdev->id];
820
	int unload;
821
822
	if (libata_protect_method == 1) {
823
		unload = 1;	
824
		printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): unload method requested, overriding drive capability check..\n");
825
	}
826
	else if (libata_protect_method == 2) {
827
		unload = 0;	
828
		printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): standby method requested, overriding drive capability check..\n");
829
	}
830
	else if (ata_id_has_unload(dev->id)) {
831
		unload = 1;
832
		printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): unload support reported by drive..\n");
833
	}
834
	else {
835
		unload = 0;
836
		printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): unload support NOT reported by drive!..\n");
837
	}
838
839
	/* call scsi_protect_queue, requesting either unload or standby */
840
	return scsi_protect_queue(q, unload);
841
}
842
843
static int ata_scsi_issue_unprotect_fn(request_queue_t *q)
844
{
845
	return scsi_unprotect_queue(q);
846
}
847
812
/**
848
/**
813
 *	ata_scsi_slave_config - Set SCSI device attributes
849
 *	ata_scsi_slave_config - Set SCSI device attributes
814
 *	@sdev: SCSI device to examine
850
 *	@sdev: SCSI device to examine
Lines 832-837 Link Here
832
868
833
	if (dev)
869
	if (dev)
834
		ata_scsi_dev_config(sdev, dev);
870
		ata_scsi_dev_config(sdev, dev);
871
	blk_queue_issue_protect_fn(sdev->request_queue, ata_scsi_issue_protect_fn);	
872
	blk_queue_issue_unprotect_fn(sdev->request_queue, ata_scsi_issue_unprotect_fn);	
835
873
836
	return 0;	/* scsi layer doesn't check return value, sigh */
874
	return 0;	/* scsi layer doesn't check return value, sigh */
837
}
875
}
(-)linux-2.6.17.1/drivers/scsi/scsi_lib.c (+186 lines)
Lines 2191-2196 Link Here
2191
		device_for_each_child(dev, NULL, target_unblock);
2191
		device_for_each_child(dev, NULL, target_unblock);
2192
}
2192
}
2193
EXPORT_SYMBOL_GPL(scsi_target_unblock);
2193
EXPORT_SYMBOL_GPL(scsi_target_unblock);
2194
2195
/*
2196
 * As per scsi_wait_req_end_io(), which was removed in 2.6.15
2197
 */
2198
static void scsi_protect_wait_req_end_io(struct request *req, int error)
2199
{
2200
	BUG_ON(!req->waiting);
2201
2202
	complete(req->waiting);
2203
}
2204
2205
/*
2206
 * As per scsi_wait_done(), except calls scsi_device_block
2207
 * to block the queue at command completion. Only called by
2208
 * scsi_protect_wait().
2209
 * todo:
2210
 *  - we block the queue regardless of success and rely on the
2211
 *    scsi_protect_queue function to unblock if the command
2212
 *    failed... should we also inspect here?
2213
 */
2214
static void scsi_protect_wait_done(struct scsi_cmnd *cmd)
2215
{
2216
	struct request *req = cmd->request;
2217
	struct request_queue *q = cmd->device->request_queue;
2218
	struct scsi_device *sdev = cmd->device;
2219
	unsigned long flags;
2220
2221
	req->rq_status = RQ_SCSI_DONE;	/* Busy, but indicate request done */
2222
2223
	spin_lock_irqsave(q->queue_lock, flags);
2224
	if (blk_rq_tagged(req))
2225
		blk_queue_end_tag(q, req);
2226
	spin_unlock_irqrestore(q->queue_lock, flags);
2227
2228
	scsi_internal_device_block(sdev);
2229
2230
	if (req->waiting)
2231
		complete(req->waiting);
2232
}
2233
2234
/*
2235
 * As per scsi_wait_req(), except sets the completion function
2236
 * as scsi_protect_wait_done().
2237
 */
2238
void scsi_protect_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
2239
			   unsigned bufflen, int timeout, int retries)
2240
{
2241
	DECLARE_COMPLETION(wait);
2242
2243
	sreq->sr_request->waiting = &wait;
2244
	sreq->sr_request->rq_status = RQ_SCSI_BUSY;
2245
	sreq->sr_request->end_io = scsi_protect_wait_req_end_io;
2246
	scsi_do_req(sreq, cmnd, buffer, bufflen, scsi_protect_wait_done,
2247
		    timeout, retries);
2248
	wait_for_completion(&wait);
2249
	sreq->sr_request->waiting = NULL;
2250
	if (sreq->sr_request->rq_status != RQ_SCSI_DONE)
2251
		sreq->sr_result |= (DRIVER_ERROR << 24);
2252
2253
	__scsi_release_request(sreq);
2254
}
2255
2256
/*
2257
 * scsi_unprotect_queue()
2258
 *  - release the queue that was previously blocked
2259
 */
2260
int scsi_unprotect_queue(request_queue_t *q){
2261
2262
	struct scsi_device *sdev = q->queuedata;
2263
	int rc = 0, pending = 0;
2264
	u8 scsi_cmd[MAX_COMMAND_SIZE];
2265
	struct scsi_sense_hdr sshdr;
2266
2267
	if (sdev->sdev_state != SDEV_BLOCK)
2268
		return -ENXIO;
2269
2270
	/* Are there any pending jobs on the queue? */
2271
			pending = ((q->rq.count[READ] > 0) || (q->rq.count[WRITE] > 0)) ? 1 : 0;
2272
2273
				rc = scsi_internal_device_unblock(sdev);
2274
				if (rc)
2275
					return rc;
2276
2277
				if (!pending) {
2278
printk(KERN_DEBUG "scsi_unprotect_queue(): No pending I/O, re-enabling power management..\n");
2279
2280
memset(scsi_cmd, 0, sizeof(scsi_cmd));
2281
scsi_cmd[0]  = ATA_16;
2282
scsi_cmd[1]  = (3 << 1); /* Non-data */
2283
/* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */
2284
scsi_cmd[14] = 0xe5; /* CHECK_POWER_MODE1 */
2285
2286
/* Good values for timeout and retries?  Values below
2287
   from scsi_ioctl_send_command() for default case... */
2288
if (scsi_execute_req(sdev, scsi_cmd, DMA_NONE, NULL, 0, &sshdr,
2289
		     (10*HZ), 5))
2290
	rc = -EIO;
2291
				}
2292
				return rc;
2293
}
2294
EXPORT_SYMBOL_GPL(scsi_unprotect_queue);
2295
2296
/*
2297
 * scsi_protect_queue()
2298
 *  - build and issue the park/standby command..
2299
 *  - queue is blocked during command completion handler
2300
 */
2301
int scsi_protect_queue(request_queue_t *q, int unload)
2302
{
2303
	struct scsi_device *sdev = q->queuedata;
2304
	int rc = 0;
2305
	u8 scsi_cmd[MAX_COMMAND_SIZE];
2306
	u8 args[7];
2307
	struct scsi_request *sreq;
2308
	unsigned char *sb, *desc;
2309
2310
	if (sdev->sdev_state != SDEV_RUNNING)
2311
		return -ENXIO;
2312
2313
	memset(args, 0, sizeof(args));
2314
2315
	if (unload) {
2316
		args[0] = 0xe1;
2317
		args[1] = 0x44;
2318
		args[3] = 0x4c;
2319
		args[4] = 0x4e;
2320
		args[5] = 0x55;
2321
	} else
2322
		args[0] = 0xe0;
2323
2324
	memset(scsi_cmd, 0, sizeof(scsi_cmd));
2325
	scsi_cmd[0]  = ATA_16;
2326
	scsi_cmd[1]  = (3 << 1); /* Non-data */
2327
	scsi_cmd[2]  = 0x20;     /* no off.line, or data xfer, request cc */
2328
	scsi_cmd[4]  = args[1];
2329
	scsi_cmd[6]  = args[2];
2330
	scsi_cmd[8]  = args[3];
2331
	scsi_cmd[10] = args[4];
2332
	scsi_cmd[12] = args[5];
2333
	scsi_cmd[14] = args[0];
2334
2335
	sreq = scsi_allocate_request(sdev, GFP_KERNEL);
2336
	if (!sreq) {
2337
		rc = -EINTR;
2338
		goto error;
2339
	}
2340
2341
	sreq->sr_data_direction = DMA_NONE;
2342
2343
	scsi_protect_wait_req(sreq, scsi_cmd, NULL, 0, (10*HZ), 5);
2344
2345
	if (!sreq->sr_result == ((DRIVER_SENSE << 24) + SAM_STAT_CHECK_CONDITION)) {
2346
		printk(KERN_DEBUG "scsi_protect_queue(): head NOT parked!..\n");
2347
		scsi_unprotect_queue(q);		/* just in case we still managed to block */
2348
		rc = -EIO;
2349
		goto error;
2350
	}
2351
2352
	sb = sreq->sr_sense_buffer;
2353
	desc = sb + 8;
2354
2355
/* Retrieve data from check condition */
2356
	args[1] = desc[3];
2357
	args[2] = desc[5];
2358
	args[3] = desc[7];
2359
	args[4] = desc[9];
2360
	args[5] = desc[11];
2361
	args[0] = desc[13];
2362
2363
	if (unload) {
2364
		if (args[3] == 0xc4)
2365
			printk(KERN_DEBUG "scsi_protect_queue(): head parked..\n");
2366
		else {
2367
/* error parking the head */
2368
			printk(KERN_DEBUG "scsi_protect_queue(): head NOT parked!..\n");
2369
			rc = -EIO;
2370
			scsi_unprotect_queue(q);
2371
		}
2372
	} else
2373
		printk(KERN_DEBUG "scsi_protect_queue(): head park not requested, used standby!..\n");
2374
2375
error:
2376
	scsi_release_request(sreq);
2377
	return rc;
2378
}
2379
EXPORT_SYMBOL_GPL(scsi_protect_queue);
2194
2380
2195
/**
2381
/**
2196
 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
2382
 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
(-)linux-2.6.17.1/include/linux/ata.h (+1 lines)
Lines 277-282 Link Here
277
#define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6))
277
#define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6))
278
#define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5))
278
#define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5))
279
#define ata_id_hpa_enabled(id)	((id)[85] & (1 << 10))
279
#define ata_id_hpa_enabled(id)	((id)[85] & (1 << 10))
280
#define ata_id_has_unload(id)   ((id)[84] & (1 << 13))
280
#define ata_id_has_fua(id)	((id)[84] & (1 << 6))
281
#define ata_id_has_fua(id)	((id)[84] & (1 << 6))
281
#define ata_id_has_flush(id)	((id)[83] & (1 << 12))
282
#define ata_id_has_flush(id)	((id)[83] & (1 << 12))
282
#define ata_id_has_flush_ext(id) ((id)[83] & (1 << 13))
283
#define ata_id_has_flush_ext(id) ((id)[83] & (1 << 13))
(-)linux-2.6.17.1/include/linux/blkdev.h (+6 lines)
Lines 297-302 Link Here
297
typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
297
typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
298
typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
298
typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
299
typedef void (softirq_done_fn)(struct request *);
299
typedef void (softirq_done_fn)(struct request *);
300
typedef int (issue_protect_fn) (request_queue_t *);
301
typedef int (issue_unprotect_fn) (request_queue_t *);
300
302
301
enum blk_queue_state {
303
enum blk_queue_state {
302
	Queue_down,
304
	Queue_down,
Lines 339-344 Link Here
339
	issue_flush_fn		*issue_flush_fn;
341
	issue_flush_fn		*issue_flush_fn;
340
	prepare_flush_fn	*prepare_flush_fn;
342
	prepare_flush_fn	*prepare_flush_fn;
341
	softirq_done_fn		*softirq_done_fn;
343
	softirq_done_fn		*softirq_done_fn;
344
	issue_protect_fn	*issue_protect_fn;
345
	issue_unprotect_fn	*issue_unprotect_fn;
342
346
343
	/*
347
	/*
344
	 * Dispatch queue sorting
348
	 * Dispatch queue sorting
Lines 720-725 Link Here
720
extern unsigned blk_ordered_cur_seq(request_queue_t *);
724
extern unsigned blk_ordered_cur_seq(request_queue_t *);
721
extern unsigned blk_ordered_req_seq(struct request *);
725
extern unsigned blk_ordered_req_seq(struct request *);
722
extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int);
726
extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int);
727
extern void blk_queue_issue_protect_fn(request_queue_t *, issue_protect_fn *);
728
extern void blk_queue_issue_unprotect_fn(request_queue_t *, issue_unprotect_fn *);
723
729
724
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
730
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
725
extern void blk_dump_rq_flags(struct request *, char *);
731
extern void blk_dump_rq_flags(struct request *, char *);
(-)linux-2.6.17.1/include/linux/ide.h (+1 lines)
Lines 1085-1090 Link Here
1085
 */
1085
 */
1086
typedef enum {
1086
typedef enum {
1087
	ide_wait,	/* insert rq at end of list, and wait for it */
1087
	ide_wait,	/* insert rq at end of list, and wait for it */
1088
	ide_next,	/* insert rq immediately after current request */
1088
	ide_preempt,	/* insert rq in front of current request */
1089
	ide_preempt,	/* insert rq in front of current request */
1089
	ide_head_wait,	/* insert rq in front of current request and wait for it */
1090
	ide_head_wait,	/* insert rq in front of current request and wait for it */
1090
	ide_end		/* insert rq at end of list, but don't wait for it */
1091
	ide_end		/* insert rq at end of list, but don't wait for it */

Return to bug 148423