Gentoo Websites Logo
Go to: Gentoo Home Documentation Forums Lists Bugs Planet Store Wiki Get Gentoo!
View | Details | Raw Unified | Return to bug 148423 | Differences between
and this patch

Collapse All | Expand All

(-)linux-2.6.17.1/block/ll_rw_blk.c (+125 lines)
Lines 39-44 Link Here
39
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
39
static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
40
static void init_request_from_bio(struct request *req, struct bio *bio);
40
static void init_request_from_bio(struct request *req, struct bio *bio);
41
static int __make_request(request_queue_t *q, struct bio *bio);
41
static int __make_request(request_queue_t *q, struct bio *bio);
42
static int blk_protect_register(request_queue_t *q);
43
static void blk_protect_unregister(request_queue_t *q);
42
44
43
/*
45
/*
44
 * For the allocated request tables
46
 * For the allocated request tables
Lines 359-364 Link Here
359
361
360
EXPORT_SYMBOL(blk_queue_issue_flush_fn);
362
EXPORT_SYMBOL(blk_queue_issue_flush_fn);
361
363
364
void blk_queue_issue_protect_fn(request_queue_t *q, issue_protect_fn *ipf)
365
{
366
       q->issue_protect_fn = ipf;
367
}
368
EXPORT_SYMBOL(blk_queue_issue_protect_fn);
369
370
void blk_queue_issue_unprotect_fn(request_queue_t *q, issue_unprotect_fn *iuf)
371
{
372
       q->issue_unprotect_fn = iuf;
373
}
374
EXPORT_SYMBOL(blk_queue_issue_unprotect_fn);
375
362
/*
376
/*
363
 * Cache flushing for ordered writes handling
377
 * Cache flushing for ordered writes handling
364
 */
378
 */
Lines 3928-3933 Link Here
3928
		return ret;
3942
		return ret;
3929
	}
3943
	}
3930
3944
3945
	blk_protect_register(q);
3931
	return 0;
3946
	return 0;
3932
}
3947
}
3933
3948
Lines 3936-3941 Link Here
3936
	request_queue_t *q = disk->queue;
3951
	request_queue_t *q = disk->queue;
3937
3952
3938
	if (q && q->request_fn) {
3953
	if (q && q->request_fn) {
3954
		blk_protect_unregister(q);
3939
		elv_unregister_queue(q);
3955
		elv_unregister_queue(q);
3940
3956
3941
		kobject_uevent(&q->kobj, KOBJ_REMOVE);
3957
		kobject_uevent(&q->kobj, KOBJ_REMOVE);
Lines 3943-3945 Link Here
3943
		kobject_put(&disk->kobj);
3959
		kobject_put(&disk->kobj);
3944
	}
3960
	}
3945
}
3961
}
3962
3963
/*
3964
 * Restore the unplugging timer that we re-used
3965
 * to implement the queue freeze timeout...
3966
 */
3967
static void blk_unfreeze_work(void *data)
3968
{
3969
       request_queue_t *q = (request_queue_t *) data;
3970
3971
       INIT_WORK(&q->unplug_work, blk_unplug_work, q);
3972
       q->unplug_timer.function = blk_unplug_timeout;
3973
3974
       q->issue_unprotect_fn(q);
3975
}
3976
3977
/*
3978
 * Called when the queue freeze timeout expires...
3979
 */
3980
static void blk_unfreeze_timeout(unsigned long data)
3981
{
3982
       request_queue_t *q = (request_queue_t *) data;
3983
       kblockd_schedule_work(&q->unplug_work);
3984
}
3985
3986
/*
3987
 * The lower level driver parks and freezes the queue, and this block layer
3988
 *  function sets up the freeze timeout timer on return. If the queue is
3989
 *  already frozen then this is called to extend the timer...
3990
 */
3991
void blk_freeze_queue(request_queue_t *q, int seconds)
3992
{
3993
       /* set/reset the timer */
3994
       mod_timer(&q->unplug_timer, msecs_to_jiffies(seconds*1000) + jiffies);
3995
3996
       /* we do this every iteration - is this sane? */
3997
       INIT_WORK(&q->unplug_work, blk_unfreeze_work, q);
3998
       q->unplug_timer.function = blk_unfreeze_timeout;
3999
}
4000
4001
/*
4002
 * When reading the 'protect' attribute, we return boolean frozen or active
4003
 * todo:
4004
 * - maybe we should return seconds remaining instead?
4005
 */
4006
static ssize_t queue_protect_show(struct request_queue *q, char *page)
4007
{
4008
       return queue_var_show(blk_queue_stopped(q), (page));
4009
}
4010
4011
/*
4012
 * When writing the 'protect' attribute, input is the number of seconds
4013
 * to freeze the queue for. We call a lower level helper function to
4014
 * park the heads and freeze/block the queue, then we make a block layer
4015
 * call to setup the thaw timeout. If input is 0, then we thaw the queue.
4016
 */
4017
static ssize_t queue_protect_store(struct request_queue *q, const char *page, size_t count)
4018
{
4019
       unsigned long freeze = 0;
4020
       queue_var_store(&freeze, page, count);
4021
4022
       if(freeze>0) {
4023
/* Park and freeze */
4024
	       if (!blk_queue_stopped(q))
4025
		       q->issue_protect_fn(q);
4026
/* set / reset the thaw timer */
4027
	       blk_freeze_queue(q, freeze);
4028
       }
4029
       else
4030
	       blk_unfreeze_timeout((unsigned long) q);
4031
4032
       return count;
4033
}
4034
4035
static struct queue_sysfs_entry queue_protect_entry = {
4036
       .attr = {.name = "protect", .mode = S_IRUGO | S_IWUSR },
4037
       .show = queue_protect_show,
4038
       .store = queue_protect_store,
4039
};
4040
4041
static int blk_protect_register(request_queue_t *q)
4042
{
4043
	int error = 0;
4044
4045
/* check that the lower level driver has a protect handler */
4046
	if (!q->issue_protect_fn)
4047
		return 1;
4048
4049
/* create the attribute */
4050
	error = sysfs_create_file(&q->kobj, &queue_protect_entry.attr);
4051
	if(error){
4052
		printk(KERN_ERR
4053
		       "blk_protect_register(): failed to create protect queue attribute!\n");
4054
		return error;
4055
	}
4056
4057
	kobject_get(&q->kobj);
4058
	return 0;
4059
}
4060
4061
static void blk_protect_unregister(request_queue_t *q)
4062
{
4063
/* check that the lower level driver has a protect handler */
4064
	if (!q->issue_protect_fn)
4065
		return;
4066
4067
/* remove the attribute */
4068
	sysfs_remove_file(&q->kobj,&queue_protect_entry.attr);
4069
	kobject_put(&q->kobj);
4070
}
(-)linux-2.6.17.1/drivers/ide/ide-disk.c (+155 lines)
Lines 72-77 Link Here
72
#include <asm/io.h>
72
#include <asm/io.h>
73
#include <asm/div64.h>
73
#include <asm/div64.h>
74
74
75
int idedisk_protect_method = 0;
76
module_param_named(protect_method, idedisk_protect_method, int, 0444);
77
MODULE_PARM_DESC(protect_method, "hdaps disk protection method (0=autodetect, 1=unload, 2=standby)");
78
75
struct ide_disk_obj {
79
struct ide_disk_obj {
76
	ide_drive_t	*drive;
80
	ide_drive_t	*drive;
77
	ide_driver_t	*driver;
81
	ide_driver_t	*driver;
Lines 730-735 Link Here
730
}
734
}
731
735
732
/*
736
/*
737
 * todo:
738
 *  - we freeze the queue regardless of success and rely on the 
739
 *    ide_protect_queue function to thaw immediately if the command
740
 *    failed (to be consistent with the libata handler)... should 
741
 *    we also inspect here?
742
 */
743
void ide_end_protect_rq(struct request *rq, int error)
744
{
745
	struct completion *waiting = rq->waiting;
746
747
	/* spin lock already accquired */
748
	if (!blk_queue_stopped(rq->q))
749
		blk_stop_queue(rq->q);
750
751
	complete(waiting);
752
}
753
754
int ide_unprotect_queue(request_queue_t *q)
755
{
756
	struct request	rq;
757
	unsigned long flags;
758
	int		pending = 0, rc = 0;
759
	ide_drive_t 	*drive = q->queuedata;
760
	u8 		args[7], *argbuf = args;
761
762
	if (!blk_queue_stopped(q))
763
		return -EIO;
764
765
	/* Are there any pending jobs on the queue? */
766
	pending = ((q->rq.count[READ] > 0) || (q->rq.count[WRITE] > 0)) ? 1 : 0;
767
	
768
	spin_lock_irqsave(q->queue_lock, flags);
769
	blk_start_queue(q);
770
	spin_unlock_irqrestore(q->queue_lock, flags);
771
772
	/* The unload feature of the IDLE_IMMEDIATE command
773
	   temporarily disables HD power management from spinning down
774
	   the disk. Any other command will reenable HD pm, so, if
775
	   there are no pending jobs on the queue, another
776
	   CHECK_POWER_MODE1 command without the unload feature should do
777
	   just fine. */
778
	if (!pending) {
779
		printk(KERN_DEBUG "ide_unprotect_queue(): No pending I/O, re-enabling power management..\n");
780
		memset(args, 0, sizeof(args));
781
		argbuf[0] = 0xe5; /* CHECK_POWER_MODE1 */
782
		ide_init_drive_cmd(&rq);
783
		rq.flags = REQ_DRIVE_TASK;
784
		rq.buffer = argbuf;
785
		rc = ide_do_drive_cmd(drive, &rq, ide_head_wait);
786
	}
787
788
	return rc;
789
}
790
791
int ide_protect_queue(request_queue_t *q, int unload)
792
{
793
	ide_drive_t 	*drive = q->queuedata;
794
	struct request	rq;
795
	u8 		args[7], *argbuf = args;
796
	int		ret = 0;
797
	DECLARE_COMPLETION(wait);
798
799
	memset(&rq, 0, sizeof(rq));
800
	memset(args, 0, sizeof(args));
801
802
	if (blk_queue_stopped(q))
803
		return -EIO;
804
805
	if (unload) {
806
		argbuf[0] = 0xe1;
807
		argbuf[1] = 0x44;
808
		argbuf[3] = 0x4c;
809
		argbuf[4] = 0x4e;
810
		argbuf[5] = 0x55;
811
	} else
812
		argbuf[0] = 0xe0;
813
814
	/* Issue the park command & freeze */
815
	ide_init_drive_cmd(&rq);
816
817
	rq.flags = REQ_DRIVE_TASK;
818
	rq.buffer = argbuf;
819
	rq.waiting = &wait;
820
	rq.end_io = ide_end_protect_rq;
821
822
	ret = ide_do_drive_cmd(drive, &rq, ide_next);
823
	wait_for_completion(&wait);
824
	rq.waiting = NULL;
825
826
	if (ret)
827
	{
828
		printk(KERN_DEBUG "ide_protect_queue(): Warning: head NOT parked!..\n");
829
		ide_unprotect_queue(q);
830
		return ret;
831
	}
832
833
	if (unload) {
834
		if (args[3] == 0xc4)
835
			printk(KERN_DEBUG "ide_protect_queue(): head parked..\n");
836
		else {
837
			/* error parking the head */
838
			printk(KERN_DEBUG "ide_protect_queue(): head NOT parked!..\n");
839
			ret = -EIO;
840
			ide_unprotect_queue(q);
841
		}
842
	} else
843
		printk(KERN_DEBUG "ide_protect_queue(): head park not requested, used standby!..\n");
844
845
	return ret;
846
}	
847
848
int idedisk_issue_protect_fn(request_queue_t *q)
849
{
850
	ide_drive_t		*drive = q->queuedata;
851
	int unload;
852
853
	/*
854
	 * Check capability of the device -
855
	 *  - if "idle immediate with unload" is supported we use that, else
856
	 *    we use "standby immediate" and live with spinning down the drive..
857
	 *    (Word 84, bit 13 of IDENTIFY DEVICE data)
858
	 */
859
	if (idedisk_protect_method == 1) {
860
		unload = 1;	
861
		printk(KERN_DEBUG "idedisk_issue_protect_fn(): unload method requested, overriding drive capability check..\n");
862
	}
863
	else if (idedisk_protect_method == 2) {
864
		unload = 0;	
865
		printk(KERN_DEBUG "idedisk_issue_protect_fn(): standby method requested, overriding drive capability check..\n");
866
	}
867
	else if (drive->id->cfsse & (1 << 13)) {
868
		unload = 1;
869
		printk(KERN_DEBUG "idedisk_issue_protect_fn(): unload support reported by drive..\n");
870
	}
871
	else {
872
		unload = 0;
873
		printk(KERN_DEBUG "idedisk_issue_protect_fn(): unload support NOT reported by drive!..\n");
874
	}
875
876
	return ide_protect_queue(q, unload);
877
}
878
879
int idedisk_issue_unprotect_fn(request_queue_t *q)
880
{
881
	return ide_unprotect_queue(q);
882
}
883
884
/*
733
 * This is tightly woven into the driver->do_special can not touch.
885
 * This is tightly woven into the driver->do_special can not touch.
734
 * DON'T do it again until a total personality rewrite is committed.
886
 * DON'T do it again until a total personality rewrite is committed.
735
 */
887
 */
Lines 985-990 Link Here
985
		drive->wcache = 1;
1137
		drive->wcache = 1;
986
1138
987
	write_cache(drive, 1);
1139
	write_cache(drive, 1);
1140
1141
	blk_queue_issue_protect_fn(drive->queue, idedisk_issue_protect_fn);	
1142
	blk_queue_issue_unprotect_fn(drive->queue, idedisk_issue_unprotect_fn);	
988
}
1143
}
989
1144
990
static void ide_cacheflush_p(ide_drive_t *drive)
1145
static void ide_cacheflush_p(ide_drive_t *drive)
(-)linux-2.6.17.1/drivers/ide/ide-io.c (+14 lines)
Lines 1249-1254 Link Here
1249
		}
1249
		}
1250
1250
1251
		/*
1251
		/*
1252
		 * Don't accept a request when the queue is stopped (unless we
1253
		 * are resuming from suspend). Prevents existing queue entries 
1254
		 * being processed after queue is stopped by the hard disk 
1255
		 * protection mechanism...
1256
		 */
1257
		if (test_bit(QUEUE_FLAG_STOPPED, &drive->queue->queue_flags) && !blk_pm_resume_request(rq)) {
1258
			hwgroup->busy = 0;
1259
			break;
1260
		}
1261
1262
		/*
1252
		 * Sanity: don't accept a request that isn't a PM request
1263
		 * Sanity: don't accept a request that isn't a PM request
1253
		 * if we are currently power managed. This is very important as
1264
		 * if we are currently power managed. This is very important as
1254
		 * blk_stop_queue() doesn't prevent the elv_next_request()
1265
		 * blk_stop_queue() doesn't prevent the elv_next_request()
Lines 1729-1734 Link Here
1729
		where = ELEVATOR_INSERT_FRONT;
1740
		where = ELEVATOR_INSERT_FRONT;
1730
		rq->flags |= REQ_PREEMPT;
1741
		rq->flags |= REQ_PREEMPT;
1731
	}
1742
	}
1743
	if (action == ide_next)
1744
		where = ELEVATOR_INSERT_FRONT;
1745
1732
	__elv_add_request(drive->queue, rq, where, 0);
1746
	__elv_add_request(drive->queue, rq, where, 0);
1733
	ide_do_request(hwgroup, IDE_NO_IRQ);
1747
	ide_do_request(hwgroup, IDE_NO_IRQ);
1734
	spin_unlock_irqrestore(&ide_lock, flags);
1748
	spin_unlock_irqrestore(&ide_lock, flags);
(-)linux-2.6.17.1/drivers/scsi/libata-core.c (+4 lines)
Lines 75-80 Link Here
75
75
76
struct workqueue_struct *ata_aux_wq;
76
struct workqueue_struct *ata_aux_wq;
77
77
78
int libata_protect_method = 0;
79
module_param_named(protect_method, libata_protect_method, int, 0444);
80
MODULE_PARM_DESC(protect_method, "hdaps disk protection method (0=autodetect, 1=unload, 2=standby)");
81
78
int atapi_enabled = 1;
82
int atapi_enabled = 1;
79
module_param(atapi_enabled, int, 0444);
83
module_param(atapi_enabled, int, 0444);
80
MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
84
MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
(-)linux-2.6.17.1/drivers/scsi/libata.h (+1 lines)
Lines 40-45 Link Here
40
40
41
/* libata-core.c */
41
/* libata-core.c */
42
extern struct workqueue_struct *ata_aux_wq;
42
extern struct workqueue_struct *ata_aux_wq;
43
extern int libata_protect_method;
43
extern int atapi_enabled;
44
extern int atapi_enabled;
44
extern int atapi_dmadir;
45
extern int atapi_dmadir;
45
extern int libata_fua;
46
extern int libata_fua;
(-)linux-2.6.17.1/drivers/scsi/libata-scsi.c (+42 lines)
Lines 809-814 Link Here
809
	}
809
	}
810
}
810
}
811
811
812
#if 0
813
extern int scsi_protect_queue(request_queue_t *q, int unload);
814
extern int scsi_unprotect_queue(request_queue_t *q);
815
816
static int ata_scsi_issue_protect_fn(request_queue_t *q)
817
{
818
	struct scsi_device *sdev = q->queuedata;
819
	struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0];
820
	struct ata_device *dev = &ap->device[sdev->id];
821
	int unload;
822
823
	if (libata_protect_method == 1) {
824
		unload = 1;	
825
		printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): unload method requested, overriding drive capability check..\n");
826
	}
827
	else if (libata_protect_method == 2) {
828
		unload = 0;	
829
		printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): standby method requested, overriding drive capability check..\n");
830
	}
831
	else if (ata_id_has_unload(dev->id)) {
832
		unload = 1;
833
		printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): unload support reported by drive..\n");
834
	}
835
	else {
836
		unload = 0;
837
		printk(KERN_DEBUG "ata_scsi_issue_protect_fn(): unload support NOT reported by drive!..\n");
838
	}
839
840
	/* call scsi_protect_queue, requesting either unload or standby */
841
	return scsi_protect_queue(q, unload);
842
}
843
844
static int ata_scsi_issue_unprotect_fn(request_queue_t *q)
845
{
846
	return scsi_unprotect_queue(q);
847
}
848
#endif
849
812
/**
850
/**
813
 *	ata_scsi_slave_config - Set SCSI device attributes
851
 *	ata_scsi_slave_config - Set SCSI device attributes
814
 *	@sdev: SCSI device to examine
852
 *	@sdev: SCSI device to examine
Lines 832-837 Link Here
832
868
833
	if (dev)
869
	if (dev)
834
		ata_scsi_dev_config(sdev, dev);
870
		ata_scsi_dev_config(sdev, dev);
871
#if 0
872
	blk_queue_issue_protect_fn(sdev->request_queue, ata_scsi_issue_protect_fn);	
873
	blk_queue_issue_unprotect_fn(sdev->request_queue, ata_scsi_issue_unprotect_fn);	
874
#endif
835
875
836
	return 0;	/* scsi layer doesn't check return value, sigh */
876
	return 0;	/* scsi layer doesn't check return value, sigh */
837
}
877
}
(-)linux-2.6.17.1/drivers/scsi/scsi_lib.c (+188 lines)
Lines 2191-2196 Link Here
2191
		device_for_each_child(dev, NULL, target_unblock);
2191
		device_for_each_child(dev, NULL, target_unblock);
2192
}
2192
}
2193
EXPORT_SYMBOL_GPL(scsi_target_unblock);
2193
EXPORT_SYMBOL_GPL(scsi_target_unblock);
2194
2195
#if 0
2196
/*
2197
 * As per scsi_wait_req_end_io(), which was removed in 2.6.15
2198
 */
2199
static void scsi_protect_wait_req_end_io(struct request *req, int error)
2200
{
2201
	BUG_ON(!req->waiting);
2202
2203
	complete(req->waiting);
2204
}
2205
2206
/*
2207
 * As per scsi_wait_done(), except calls scsi_device_block
2208
 * to block the queue at command completion. Only called by
2209
 * scsi_protect_wait().
2210
 * todo:
2211
 *  - we block the queue regardless of success and rely on the
2212
 *    scsi_protect_queue function to unblock if the command
2213
 *    failed... should we also inspect here?
2214
 */
2215
static void scsi_protect_wait_done(struct scsi_cmnd *cmd)
2216
{
2217
	struct request *req = cmd->request;
2218
	struct request_queue *q = cmd->device->request_queue;
2219
	struct scsi_device *sdev = cmd->device;
2220
	unsigned long flags;
2221
2222
	req->rq_status = RQ_SCSI_DONE;	/* Busy, but indicate request done */
2223
2224
	spin_lock_irqsave(q->queue_lock, flags);
2225
	if (blk_rq_tagged(req))
2226
		blk_queue_end_tag(q, req);
2227
	spin_unlock_irqrestore(q->queue_lock, flags);
2228
2229
	scsi_internal_device_block(sdev);
2230
2231
	if (req->waiting)
2232
		complete(req->waiting);
2233
}
2234
2235
/*
2236
 * As per scsi_wait_req(), except sets the completion function
2237
 * as scsi_protect_wait_done().
2238
 */
2239
void scsi_protect_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
2240
			   unsigned bufflen, int timeout, int retries)
2241
{
2242
	DECLARE_COMPLETION(wait);
2243
2244
	sreq->sr_request->waiting = &wait;
2245
	sreq->sr_request->rq_status = RQ_SCSI_BUSY;
2246
	sreq->sr_request->end_io = scsi_protect_wait_req_end_io;
2247
	scsi_do_req(sreq, cmnd, buffer, bufflen, scsi_protect_wait_done,
2248
		    timeout, retries);
2249
	wait_for_completion(&wait);
2250
	sreq->sr_request->waiting = NULL;
2251
	if (sreq->sr_request->rq_status != RQ_SCSI_DONE)
2252
		sreq->sr_result |= (DRIVER_ERROR << 24);
2253
2254
	__scsi_release_request(sreq);
2255
}
2256
2257
/*
2258
 * scsi_unprotect_queue()
2259
 *  - release the queue that was previously blocked
2260
 */
2261
int scsi_unprotect_queue(request_queue_t *q){
2262
2263
	struct scsi_device *sdev = q->queuedata;
2264
	int rc = 0, pending = 0;
2265
	u8 scsi_cmd[MAX_COMMAND_SIZE];
2266
	struct scsi_sense_hdr sshdr;
2267
2268
	if (sdev->sdev_state != SDEV_BLOCK)
2269
		return -ENXIO;
2270
2271
	/* Are there any pending jobs on the queue? */
2272
			pending = ((q->rq.count[READ] > 0) || (q->rq.count[WRITE] > 0)) ? 1 : 0;
2273
2274
				rc = scsi_internal_device_unblock(sdev);
2275
				if (rc)
2276
					return rc;
2277
2278
				if (!pending) {
2279
printk(KERN_DEBUG "scsi_unprotect_queue(): No pending I/O, re-enabling power management..\n");
2280
2281
memset(scsi_cmd, 0, sizeof(scsi_cmd));
2282
scsi_cmd[0]  = ATA_16;
2283
scsi_cmd[1]  = (3 << 1); /* Non-data */
2284
/* scsi_cmd[2] is already 0 -- no off.line, cc, or data xfer */
2285
scsi_cmd[14] = 0xe5; /* CHECK_POWER_MODE1 */
2286
2287
/* Good values for timeout and retries?  Values below
2288
   from scsi_ioctl_send_command() for default case... */
2289
if (scsi_execute_req(sdev, scsi_cmd, DMA_NONE, NULL, 0, &sshdr,
2290
		     (10*HZ), 5))
2291
	rc = -EIO;
2292
				}
2293
				return rc;
2294
}
2295
EXPORT_SYMBOL_GPL(scsi_unprotect_queue);
2296
2297
/*
2298
 * scsi_protect_queue()
2299
 *  - build and issue the park/standby command..
2300
 *  - queue is blocked during command completion handler
2301
 */
2302
int scsi_protect_queue(request_queue_t *q, int unload)
2303
{
2304
	struct scsi_device *sdev = q->queuedata;
2305
	int rc = 0;
2306
	u8 scsi_cmd[MAX_COMMAND_SIZE];
2307
	u8 args[7];
2308
	struct scsi_request *sreq;
2309
	unsigned char *sb, *desc;
2310
2311
	if (sdev->sdev_state != SDEV_RUNNING)
2312
		return -ENXIO;
2313
2314
	memset(args, 0, sizeof(args));
2315
2316
	if (unload) {
2317
		args[0] = 0xe1;
2318
		args[1] = 0x44;
2319
		args[3] = 0x4c;
2320
		args[4] = 0x4e;
2321
		args[5] = 0x55;
2322
	} else
2323
		args[0] = 0xe0;
2324
2325
	memset(scsi_cmd, 0, sizeof(scsi_cmd));
2326
	scsi_cmd[0]  = ATA_16;
2327
	scsi_cmd[1]  = (3 << 1); /* Non-data */
2328
	scsi_cmd[2]  = 0x20;     /* no off.line, or data xfer, request cc */
2329
	scsi_cmd[4]  = args[1];
2330
	scsi_cmd[6]  = args[2];
2331
	scsi_cmd[8]  = args[3];
2332
	scsi_cmd[10] = args[4];
2333
	scsi_cmd[12] = args[5];
2334
	scsi_cmd[14] = args[0];
2335
2336
	sreq = scsi_allocate_request(sdev, GFP_KERNEL);
2337
	if (!sreq) {
2338
		rc = -EINTR;
2339
		goto error;
2340
	}
2341
2342
	sreq->sr_data_direction = DMA_NONE;
2343
2344
	scsi_protect_wait_req(sreq, scsi_cmd, NULL, 0, (10*HZ), 5);
2345
2346
	if (!sreq->sr_result == ((DRIVER_SENSE << 24) + SAM_STAT_CHECK_CONDITION)) {
2347
		printk(KERN_DEBUG "scsi_protect_queue(): head NOT parked!..\n");
2348
		scsi_unprotect_queue(q);		/* just in case we still managed to block */
2349
		rc = -EIO;
2350
		goto error;
2351
	}
2352
2353
	sb = sreq->sr_sense_buffer;
2354
	desc = sb + 8;
2355
2356
/* Retrieve data from check condition */
2357
	args[1] = desc[3];
2358
	args[2] = desc[5];
2359
	args[3] = desc[7];
2360
	args[4] = desc[9];
2361
	args[5] = desc[11];
2362
	args[0] = desc[13];
2363
2364
	if (unload) {
2365
		if (args[3] == 0xc4)
2366
			printk(KERN_DEBUG "scsi_protect_queue(): head parked..\n");
2367
		else {
2368
/* error parking the head */
2369
			printk(KERN_DEBUG "scsi_protect_queue(): head NOT parked!..\n");
2370
			rc = -EIO;
2371
			scsi_unprotect_queue(q);
2372
		}
2373
	} else
2374
		printk(KERN_DEBUG "scsi_protect_queue(): head park not requested, used standby!..\n");
2375
2376
error:
2377
	scsi_release_request(sreq);
2378
	return rc;
2379
}
2380
EXPORT_SYMBOL_GPL(scsi_protect_queue);
2381
#endif
2194
2382
2195
/**
2383
/**
2196
 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
2384
 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
(-)linux-2.6.17.1/include/linux/ata.h (+1 lines)
Lines 277-282 Link Here
277
#define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6))
277
#define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6))
278
#define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5))
278
#define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5))
279
#define ata_id_hpa_enabled(id)	((id)[85] & (1 << 10))
279
#define ata_id_hpa_enabled(id)	((id)[85] & (1 << 10))
280
#define ata_id_has_unload(id)   ((id)[84] & (1 << 13))
280
#define ata_id_has_fua(id)	((id)[84] & (1 << 6))
281
#define ata_id_has_fua(id)	((id)[84] & (1 << 6))
281
#define ata_id_has_flush(id)	((id)[83] & (1 << 12))
282
#define ata_id_has_flush(id)	((id)[83] & (1 << 12))
282
#define ata_id_has_flush_ext(id) ((id)[83] & (1 << 13))
283
#define ata_id_has_flush_ext(id) ((id)[83] & (1 << 13))
(-)linux-2.6.17.1/include/linux/blkdev.h (+6 lines)
Lines 297-302 Link Here
297
typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
297
typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *);
298
typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
298
typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
299
typedef void (softirq_done_fn)(struct request *);
299
typedef void (softirq_done_fn)(struct request *);
300
typedef int (issue_protect_fn) (request_queue_t *);
301
typedef int (issue_unprotect_fn) (request_queue_t *);
300
302
301
enum blk_queue_state {
303
enum blk_queue_state {
302
	Queue_down,
304
	Queue_down,
Lines 339-344 Link Here
339
	issue_flush_fn		*issue_flush_fn;
341
	issue_flush_fn		*issue_flush_fn;
340
	prepare_flush_fn	*prepare_flush_fn;
342
	prepare_flush_fn	*prepare_flush_fn;
341
	softirq_done_fn		*softirq_done_fn;
343
	softirq_done_fn		*softirq_done_fn;
344
	issue_protect_fn	*issue_protect_fn;
345
	issue_unprotect_fn	*issue_unprotect_fn;
342
346
343
	/*
347
	/*
344
	 * Dispatch queue sorting
348
	 * Dispatch queue sorting
Lines 720-725 Link Here
720
extern unsigned blk_ordered_cur_seq(request_queue_t *);
724
extern unsigned blk_ordered_cur_seq(request_queue_t *);
721
extern unsigned blk_ordered_req_seq(struct request *);
725
extern unsigned blk_ordered_req_seq(struct request *);
722
extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int);
726
extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int);
727
extern void blk_queue_issue_protect_fn(request_queue_t *, issue_protect_fn *);
728
extern void blk_queue_issue_unprotect_fn(request_queue_t *, issue_unprotect_fn *);
723
729
724
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
730
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
725
extern void blk_dump_rq_flags(struct request *, char *);
731
extern void blk_dump_rq_flags(struct request *, char *);
(-)linux-2.6.17.1/include/linux/ide.h (+1 lines)
Lines 1085-1090 Link Here
1085
 */
1085
 */
1086
typedef enum {
1086
typedef enum {
1087
	ide_wait,	/* insert rq at end of list, and wait for it */
1087
	ide_wait,	/* insert rq at end of list, and wait for it */
1088
	ide_next,	/* insert rq immediately after current request */
1088
	ide_preempt,	/* insert rq in front of current request */
1089
	ide_preempt,	/* insert rq in front of current request */
1089
	ide_head_wait,	/* insert rq in front of current request and wait for it */
1090
	ide_head_wait,	/* insert rq in front of current request and wait for it */
1090
	ide_end		/* insert rq at end of list, but don't wait for it */
1091
	ide_end		/* insert rq at end of list, but don't wait for it */

Return to bug 148423