/* Timer for init mask retries. */
static struct timer_list retry_timer;
+/* Timer for busy retries. */
+static struct timer_list sclp_busy_timer;
+
static volatile unsigned long sclp_status = 0;
/* some status flags */
#define SCLP_INIT 0
#define SCLP_READING 2
#define SCLP_INIT_POLL_INTERVAL 1
+#define SCLP_BUSY_POLL_INTERVAL 1
#define SCLP_COMMAND_INITIATED 0
#define SCLP_BUSY 2
*/
if (cc == SCLP_NOT_OPERATIONAL)
return -EIO;
- /*
- * We set the SCLP_RUNNING bit for cc 2 as well because if
- * service_call returns cc 2 some old request is running
- * that has to complete first
- */
- set_bit(SCLP_RUNNING, &sclp_status);
if (cc == SCLP_BUSY)
return -EBUSY;
return 0;
}
-static int
+static void
sclp_start_request(void)
{
struct sclp_req *req;
int rc;
unsigned long flags;
- /* quick exit if sclp is already in use */
- if (test_bit(SCLP_RUNNING, &sclp_status))
- return -EBUSY;
spin_lock_irqsave(&sclp_lock, flags);
- /* Get first request on queue if available */
- req = NULL;
- if (!list_empty(&sclp_req_queue))
+ /* quick exit if sclp is already in use */
+ if (test_bit(SCLP_RUNNING, &sclp_status)) {
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ return;
+ }
+ /* Try to start requests from the request queue. */
+ while (!list_empty(&sclp_req_queue)) {
req = list_entry(sclp_req_queue.next, struct sclp_req, list);
- if (req) {
rc = __service_call(req->command, req->sccb);
- if (rc) {
- req->status = SCLP_REQ_FAILED;
- list_del(&req->list);
- } else
+ if (rc == 0) {
+ /* Sucessfully started request. */
req->status = SCLP_REQ_RUNNING;
- } else
- rc = -EINVAL;
+ /* Request active. Set running indication. */
+ set_bit(SCLP_RUNNING, &sclp_status);
+ break;
+ }
+ if (rc == -EBUSY) {
+ /**
+ * SCLP is busy but no request is running.
+ * Try again later.
+ */
+ if (!timer_pending(&sclp_busy_timer) ||
+ !mod_timer(&sclp_busy_timer,
+ jiffies + SCLP_BUSY_POLL_INTERVAL*HZ)) {
+ sclp_busy_timer.function =
+ (void *) sclp_start_request;
+ sclp_busy_timer.expires =
+ jiffies + SCLP_BUSY_POLL_INTERVAL*HZ;
+ add_timer(&sclp_busy_timer);
+ }
+ break;
+ }
+ /* Request failed. */
+ req->status = SCLP_REQ_FAILED;
+ list_del(&req->list);
+ if (req->callback) {
+ spin_unlock_irqrestore(&sclp_lock, flags);
+ req->callback(req, req->callback_data);
+ spin_lock_irqsave(&sclp_lock, flags);
+ }
+ }
spin_unlock_irqrestore(&sclp_lock, flags);
- if (rc == -EIO && req->callback != NULL)
- req->callback(req, req->callback_data);
- return rc;
}
static int
static void
do_load_quiesce_psw(void * __unused)
{
+ static atomic_t cpuid = ATOMIC_INIT(-1);
psw_t quiesce_psw;
- unsigned long status;
+ __u32 status;
int i;
- if (smp_processor_id() != 0)
+ if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid))
signal_processor(smp_processor_id(), sigp_stop);
/* Wait for all other cpus to enter stopped state */
i = 1;
case sigp_order_code_accepted:
case sigp_status_stored:
/* Check for stopped and check stop state */
- if (test_bit(6, &status) || test_bit(4, &status))
+ if (status & 0x50)
i++;
break;
case sigp_busy:
*/
do {
rc = __service_call(req->command, req->sccb);
+ if (rc == 0)
+ set_bit(SCLP_RUNNING, &sclp_status);
spin_unlock_irqrestore(&sclp_lock, flags);
if (rc == -EIO)
return -ENOSYS;
ctl_set_bit(0, 9);
init_timer(&retry_timer);
+ init_timer(&sclp_busy_timer);
/* do the initial write event mask */
rc = sclp_init_mask();
if (rc == 0) {