diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/s390/cio/qdio.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/s390/cio/qdio.c')
-rw-r--r-- | drivers/s390/cio/qdio.c | 3468 |
1 files changed, 3468 insertions, 0 deletions
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c new file mode 100644 index 00000000000..bbe9f45d143 --- /dev/null +++ b/drivers/s390/cio/qdio.c @@ -0,0 +1,3468 @@ +/* + * + * linux/drivers/s390/cio/qdio.c + * + * Linux for S/390 QDIO base support, Hipersocket base support + * version 2 + * + * Copyright 2000,2002 IBM Corporation + * Author(s): Utz Bacher <utz.bacher@de.ibm.com> + * 2.6 cio integration by Cornelia Huck <cohuck@de.ibm.com> + * + * Restriction: only 63 iqdio subchannels would have its own indicator, + * after that, subsequent subchannels share one indicator + * + * + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/config.h> +#include <linux/module.h> +#include <linux/init.h> + +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/proc_fs.h> +#include <linux/timer.h> + +#include <asm/ccwdev.h> +#include <asm/io.h> +#include <asm/atomic.h> +#include <asm/semaphore.h> +#include <asm/timex.h> + +#include <asm/debug.h> +#include <asm/qdio.h> + +#include "cio.h" +#include "css.h" +#include "device.h" +#include "airq.h" +#include "qdio.h" +#include "ioasm.h" +#include "chsc.h" + +#define VERSION_QDIO_C "$Revision: 1.98 $" + +/****************** MODULE PARAMETER VARIABLES ********************/ +MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>"); +MODULE_DESCRIPTION("QDIO base support version 2, " \ + "Copyright 2000 IBM Corporation"); +MODULE_LICENSE("GPL"); + +/******************** HERE WE GO ***********************************/ + +static const char version[] = "QDIO base support version 2 (" + VERSION_QDIO_C "/" VERSION_QDIO_H "/" VERSION_CIO_QDIO_H ")"; + +#ifdef QDIO_PERFORMANCE_STATS +static int proc_perf_file_registration; +static unsigned long i_p_c, i_p_nc, o_p_c, o_p_nc, ii_p_c, ii_p_nc; +static struct qdio_perf_stats perf_stats; +#endif /* QDIO_PERFORMANCE_STATS */ + +static int hydra_thinints; +static int omit_svs; + +static int indicator_used[INDICATORS_PER_CACHELINE]; +static __u32 * volatile indicators; +static __u32 volatile spare_indicator; +static atomic_t spare_indicator_usecount; + +static debug_info_t *qdio_dbf_setup; +static debug_info_t *qdio_dbf_sbal; +static debug_info_t *qdio_dbf_trace; +static debug_info_t *qdio_dbf_sense; +#ifdef CONFIG_QDIO_DEBUG +static debug_info_t *qdio_dbf_slsb_out; +static debug_info_t *qdio_dbf_slsb_in; +#endif /* CONFIG_QDIO_DEBUG */ + +/* iQDIO stuff: */ +static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change + during a while loop */ +static DEFINE_SPINLOCK(ttiq_list_lock); +static int register_thinint_result; +static void tiqdio_tl(unsigned long); +static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0); + +/* not a macro, as one of the arguments is atomic_read */ +static inline int +qdio_min(int a,int b) +{ + if (a<b) + return a; + else + return b; +} + +/***************** SCRUBBER HELPER ROUTINES **********************/ + +static inline volatile __u64 +qdio_get_micros(void) +{ + return (get_clock() >> 10); /* time>>12 is microseconds */ +} + +/* + * unfortunately, we can't just xchg the values; in do_QDIO we want to reserve + * the q in any case, so that we'll not be interrupted when we are in + * qdio_mark_tiq... shouldn't have a really bad impact, as reserving almost + * ever works (last famous words) + */ +static inline int +qdio_reserve_q(struct qdio_q *q) +{ + return atomic_add_return(1,&q->use_count) - 1; +} + +static inline void +qdio_release_q(struct qdio_q *q) +{ + atomic_dec(&q->use_count); +} + +static volatile inline void +qdio_set_slsb(volatile char *slsb, unsigned char value) +{ + xchg((char*)slsb,value); +} + +static inline int +qdio_siga_sync(struct qdio_q *q, unsigned int gpr2, + unsigned int gpr3) +{ + int cc; + + QDIO_DBF_TEXT4(0,trace,"sigasync"); + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + +#ifdef QDIO_PERFORMANCE_STATS + perf_stats.siga_syncs++; +#endif /* QDIO_PERFORMANCE_STATS */ + + cc = do_siga_sync(q->irq, gpr2, gpr3); + if (cc) + QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); + + return cc; +} + +static inline int +qdio_siga_sync_q(struct qdio_q *q) +{ + if (q->is_input_q) + return qdio_siga_sync(q, 0, q->mask); + return qdio_siga_sync(q, q->mask, 0); +} + +/* + * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns + * an access exception + */ +static inline int +qdio_siga_output(struct qdio_q *q) +{ + int cc; + __u32 busy_bit; + __u64 start_time=0; + +#ifdef QDIO_PERFORMANCE_STATS + perf_stats.siga_outs++; +#endif /* QDIO_PERFORMANCE_STATS */ + + QDIO_DBF_TEXT4(0,trace,"sigaout"); + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + + for (;;) { + cc = do_siga_output(q->irq, q->mask, &busy_bit); +//QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit); + if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) { + if (!start_time) + start_time=NOW; + if ((NOW-start_time)>QDIO_BUSY_BIT_PATIENCE) + break; + } else + break; + } + + if ((cc==2) && (busy_bit)) + cc |= QDIO_SIGA_ERROR_B_BIT_SET; + + if (cc) + QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); + + return cc; +} + +static inline int +qdio_siga_input(struct qdio_q *q) +{ + int cc; + + QDIO_DBF_TEXT4(0,trace,"sigain"); + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + +#ifdef QDIO_PERFORMANCE_STATS + perf_stats.siga_ins++; +#endif /* QDIO_PERFORMANCE_STATS */ + + cc = do_siga_input(q->irq, q->mask); + + if (cc) + QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); + + return cc; +} + +/* locked by the locks in qdio_activate and qdio_cleanup */ +static __u32 * volatile +qdio_get_indicator(void) +{ + int i; + + for (i=1;i<INDICATORS_PER_CACHELINE;i++) + if (!indicator_used[i]) { + indicator_used[i]=1; + return indicators+i; + } + atomic_inc(&spare_indicator_usecount); + return (__u32 * volatile) &spare_indicator; +} + +/* locked by the locks in qdio_activate and qdio_cleanup */ +static void +qdio_put_indicator(__u32 *addr) +{ + int i; + + if ( (addr) && (addr!=&spare_indicator) ) { + i=addr-indicators; + indicator_used[i]=0; + } + if (addr == &spare_indicator) + atomic_dec(&spare_indicator_usecount); +} + +static inline volatile void +tiqdio_clear_summary_bit(__u32 *location) +{ + QDIO_DBF_TEXT5(0,trace,"clrsummb"); + QDIO_DBF_HEX5(0,trace,&location,sizeof(void*)); + + xchg(location,0); +} + +static inline volatile void +tiqdio_set_summary_bit(__u32 *location) +{ + QDIO_DBF_TEXT5(0,trace,"setsummb"); + QDIO_DBF_HEX5(0,trace,&location,sizeof(void*)); + + xchg(location,-1); +} + +static inline void +tiqdio_sched_tl(void) +{ + tasklet_hi_schedule(&tiqdio_tasklet); +} + +static inline void +qdio_mark_tiq(struct qdio_q *q) +{ + unsigned long flags; + + QDIO_DBF_TEXT4(0,trace,"mark iq"); + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + + spin_lock_irqsave(&ttiq_list_lock,flags); + if (unlikely(atomic_read(&q->is_in_shutdown))) + goto out_unlock; + + if (!q->is_input_q) + goto out_unlock; + + if ((q->list_prev) || (q->list_next)) + goto out_unlock; + + if (!tiq_list) { + tiq_list=q; + q->list_prev=q; + q->list_next=q; + } else { + q->list_next=tiq_list; + q->list_prev=tiq_list->list_prev; + tiq_list->list_prev->list_next=q; + tiq_list->list_prev=q; + } + spin_unlock_irqrestore(&ttiq_list_lock,flags); + + tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); + tiqdio_sched_tl(); + return; +out_unlock: + spin_unlock_irqrestore(&ttiq_list_lock,flags); + return; +} + +static inline void +qdio_mark_q(struct qdio_q *q) +{ + QDIO_DBF_TEXT4(0,trace,"mark q"); + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + + if (unlikely(atomic_read(&q->is_in_shutdown))) + return; + + tasklet_schedule(&q->tasklet); +} + +static inline int +qdio_stop_polling(struct qdio_q *q) +{ +#ifdef QDIO_USE_PROCESSING_STATE + int gsf; + + if (!atomic_swap(&q->polling,0)) + return 1; + + QDIO_DBF_TEXT4(0,trace,"stoppoll"); + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + + /* show the card that we are not polling anymore */ + if (!q->is_input_q) + return 1; + + gsf=GET_SAVED_FRONTIER(q); + set_slsb(&q->slsb.acc.val[(gsf+QDIO_MAX_BUFFERS_PER_Q-1)& + (QDIO_MAX_BUFFERS_PER_Q-1)], + SLSB_P_INPUT_NOT_INIT); + /* + * we don't issue this SYNC_MEMORY, as we trust Rick T and + * moreover will not use the PROCESSING state under VM, so + * q->polling was 0 anyway + */ + /*SYNC_MEMORY;*/ + if (q->slsb.acc.val[gsf]!=SLSB_P_INPUT_PRIMED) + return 1; + /* + * set our summary bit again, as otherwise there is a + * small window we can miss between resetting it and + * checking for PRIMED state + */ + if (q->is_thinint_q) + tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); + return 0; + +#else /* QDIO_USE_PROCESSING_STATE */ + return 1; +#endif /* QDIO_USE_PROCESSING_STATE */ +} + +/* + * see the comment in do_QDIO and before qdio_reserve_q about the + * sophisticated locking outside of unmark_q, so that we don't need to + * disable the interrupts :-) +*/ +static inline void +qdio_unmark_q(struct qdio_q *q) +{ + unsigned long flags; + + QDIO_DBF_TEXT4(0,trace,"unmark q"); + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + + if ((!q->list_prev)||(!q->list_next)) + return; + + if ((q->is_thinint_q)&&(q->is_input_q)) { + /* iQDIO */ + spin_lock_irqsave(&ttiq_list_lock,flags); + /* in case cleanup has done this already and simultanously + * qdio_unmark_q is called from the interrupt handler, we've + * got to check this in this specific case again */ + if ((!q->list_prev)||(!q->list_next)) + goto out; + if (q->list_next==q) { + /* q was the only interesting q */ + tiq_list=NULL; + q->list_next=NULL; + q->list_prev=NULL; + } else { + q->list_next->list_prev=q->list_prev; + q->list_prev->list_next=q->list_next; + tiq_list=q->list_next; + q->list_next=NULL; + q->list_prev=NULL; + } +out: + spin_unlock_irqrestore(&ttiq_list_lock,flags); + } +} + +static inline unsigned long +tiqdio_clear_global_summary(void) +{ + unsigned long time; + + QDIO_DBF_TEXT5(0,trace,"clrglobl"); + + time = do_clear_global_summary(); + + QDIO_DBF_HEX5(0,trace,&time,sizeof(unsigned long)); + + return time; +} + + +/************************* OUTBOUND ROUTINES *******************************/ + +inline static int +qdio_get_outbound_buffer_frontier(struct qdio_q *q) +{ + int f,f_mod_no; + volatile char *slsb; + int first_not_to_check; + char dbf_text[15]; + + QDIO_DBF_TEXT4(0,trace,"getobfro"); + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + + slsb=&q->slsb.acc.val[0]; + f_mod_no=f=q->first_to_check; + /* + * f points to already processed elements, so f+no_used is correct... + * ... but: we don't check 128 buffers, as otherwise + * qdio_has_outbound_q_moved would return 0 + */ + first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used), + (QDIO_MAX_BUFFERS_PER_Q-1)); + + if ((!q->is_iqdio_q)&&(!q->hydra_gives_outbound_pcis)) + SYNC_MEMORY; + +check_next: + if (f==first_not_to_check) + goto out; + + switch(slsb[f_mod_no]) { + + /* the adapter has not fetched the output yet */ + case SLSB_CU_OUTPUT_PRIMED: + QDIO_DBF_TEXT5(0,trace,"outpprim"); + break; + + /* the adapter got it */ + case SLSB_P_OUTPUT_EMPTY: + atomic_dec(&q->number_of_buffers_used); + f++; + f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1); + QDIO_DBF_TEXT5(0,trace,"outpempt"); + goto check_next; + + case SLSB_P_OUTPUT_ERROR: + QDIO_DBF_TEXT3(0,trace,"outperr"); + sprintf(dbf_text,"%x-%x-%x",f_mod_no, + q->sbal[f_mod_no]->element[14].sbalf.value, + q->sbal[f_mod_no]->element[15].sbalf.value); + QDIO_DBF_TEXT3(1,trace,dbf_text); + QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256); + + /* kind of process the buffer */ + set_slsb(&q->slsb.acc.val[f_mod_no], SLSB_P_OUTPUT_NOT_INIT); + + /* + * we increment the frontier, as this buffer + * was processed obviously + */ + atomic_dec(&q->number_of_buffers_used); + f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1); + + if (q->qdio_error) + q->error_status_flags|= + QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR; + q->qdio_error=SLSB_P_OUTPUT_ERROR; + q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR; + + break; + + /* no new buffers */ + default: + QDIO_DBF_TEXT5(0,trace,"outpni"); + } +out: + return (q->first_to_check=f_mod_no); +} + +/* all buffers are processed */ +inline static int +qdio_is_outbound_q_done(struct qdio_q *q) +{ + int no_used; +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[15]; +#endif + + no_used=atomic_read(&q->number_of_buffers_used); + +#ifdef CONFIG_QDIO_DEBUG + if (no_used) { + sprintf(dbf_text,"oqisnt%02x",no_used); + QDIO_DBF_TEXT4(0,trace,dbf_text); + } else { + QDIO_DBF_TEXT4(0,trace,"oqisdone"); + } + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); +#endif /* CONFIG_QDIO_DEBUG */ + return (no_used==0); +} + +inline static int +qdio_has_outbound_q_moved(struct qdio_q *q) +{ + int i; + + i=qdio_get_outbound_buffer_frontier(q); + + if ( (i!=GET_SAVED_FRONTIER(q)) || + (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) { + SAVE_FRONTIER(q,i); + QDIO_DBF_TEXT4(0,trace,"oqhasmvd"); + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + return 1; + } else { + QDIO_DBF_TEXT4(0,trace,"oqhsntmv"); + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + return 0; + } +} + +inline static void +qdio_kick_outbound_q(struct qdio_q *q) +{ + int result; +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[15]; + + QDIO_DBF_TEXT4(0,trace,"kickoutq"); + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); +#endif /* CONFIG_QDIO_DEBUG */ + + if (!q->siga_out) + return; + + /* here's the story with cc=2 and busy bit set (thanks, Rick): + * VM's CP could present us cc=2 and busy bit set on SIGA-write + * during reconfiguration of their Guest LAN (only in HIPERS mode, + * QDIO mode is asynchronous -- cc=2 and busy bit there will take + * the queues down immediately; and not being under VM we have a + * problem on cc=2 and busy bit set right away). + * + * Therefore qdio_siga_output will try for a short time constantly, + * if such a condition occurs. If it doesn't change, it will + * increase the busy_siga_counter and save the timestamp, and + * schedule the queue for later processing (via mark_q, using the + * queue tasklet). __qdio_outbound_processing will check out the + * counter. If non-zero, it will call qdio_kick_outbound_q as often + * as the value of the counter. This will attempt further SIGA + * instructions. For each successful SIGA, the counter is + * decreased, for failing SIGAs the counter remains the same, after + * all. + * After some time of no movement, qdio_kick_outbound_q will + * finally fail and reflect corresponding error codes to call + * the upper layer module and have it take the queues down. + * + * Note that this is a change from the original HiperSockets design + * (saying cc=2 and busy bit means take the queues down), but in + * these days Guest LAN didn't exist... excessive cc=2 with busy bit + * conditions will still take the queues down, but the threshold is + * higher due to the Guest LAN environment. + */ + + + result=qdio_siga_output(q); + + switch (result) { + case 0: + /* went smooth this time, reset timestamp */ +#ifdef CONFIG_QDIO_DEBUG + QDIO_DBF_TEXT3(0,trace,"cc2reslv"); + sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no, + atomic_read(&q->busy_siga_counter)); + QDIO_DBF_TEXT3(0,trace,dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + q->timing.busy_start=0; + break; + case (2|QDIO_SIGA_ERROR_B_BIT_SET): + /* cc=2 and busy bit: */ + atomic_inc(&q->busy_siga_counter); + + /* if the last siga was successful, save + * timestamp here */ + if (!q->timing.busy_start) + q->timing.busy_start=NOW; + + /* if we're in time, don't touch error_status_flags + * and siga_error */ + if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) { + qdio_mark_q(q); + break; + } + QDIO_DBF_TEXT2(0,trace,"cc2REPRT"); +#ifdef CONFIG_QDIO_DEBUG + sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no, + atomic_read(&q->busy_siga_counter)); + QDIO_DBF_TEXT3(0,trace,dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + /* else fallthrough and report error */ + default: + /* for plain cc=1, 2 or 3: */ + if (q->siga_error) + q->error_status_flags|= + QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR; + q->error_status_flags|= + QDIO_STATUS_LOOK_FOR_ERROR; + q->siga_error=result; + } +} + +inline static void +qdio_kick_outbound_handler(struct qdio_q *q) +{ + int start, end, real_end, count; +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[15]; +#endif + + start = q->first_element_to_kick; + /* last_move_ftc was just updated */ + real_end = GET_SAVED_FRONTIER(q); + end = (real_end+QDIO_MAX_BUFFERS_PER_Q-1)& + (QDIO_MAX_BUFFERS_PER_Q-1); + count = (end+QDIO_MAX_BUFFERS_PER_Q+1-start)& + (QDIO_MAX_BUFFERS_PER_Q-1); + +#ifdef CONFIG_QDIO_DEBUG + QDIO_DBF_TEXT4(0,trace,"kickouth"); + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + + sprintf(dbf_text,"s=%2xc=%2x",start,count); + QDIO_DBF_TEXT4(0,trace,dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + + if (q->state==QDIO_IRQ_STATE_ACTIVE) + q->handler(q->cdev,QDIO_STATUS_OUTBOUND_INT| + q->error_status_flags, + q->qdio_error,q->siga_error,q->q_no,start,count, + q->int_parm); + + /* for the next time: */ + q->first_element_to_kick=real_end; + q->qdio_error=0; + q->siga_error=0; + q->error_status_flags=0; +} + +static inline void +__qdio_outbound_processing(struct qdio_q *q) +{ + int siga_attempts; + + QDIO_DBF_TEXT4(0,trace,"qoutproc"); + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + + if (unlikely(qdio_reserve_q(q))) { + qdio_release_q(q); +#ifdef QDIO_PERFORMANCE_STATS + o_p_c++; +#endif /* QDIO_PERFORMANCE_STATS */ + /* as we're sissies, we'll check next time */ + if (likely(!atomic_read(&q->is_in_shutdown))) { + qdio_mark_q(q); + QDIO_DBF_TEXT4(0,trace,"busy,agn"); + } + return; + } +#ifdef QDIO_PERFORMANCE_STATS + o_p_nc++; + perf_stats.tl_runs++; +#endif /* QDIO_PERFORMANCE_STATS */ + + /* see comment in qdio_kick_outbound_q */ + siga_attempts=atomic_read(&q->busy_siga_counter); + while (siga_attempts) { + atomic_dec(&q->busy_siga_counter); + qdio_kick_outbound_q(q); + siga_attempts--; + } + + if (qdio_has_outbound_q_moved(q)) + qdio_kick_outbound_handler(q); + + if (q->is_iqdio_q) { + /* + * for asynchronous queues, we better check, if the fill + * level is too high. for synchronous queues, the fill + * level will never be that high. + */ + if (atomic_read(&q->number_of_buffers_used)> + IQDIO_FILL_LEVEL_TO_POLL) + qdio_mark_q(q); + + } else if (!q->hydra_gives_outbound_pcis) + if (!qdio_is_outbound_q_done(q)) + qdio_mark_q(q); + + qdio_release_q(q); +} + +static void +qdio_outbound_processing(struct qdio_q *q) +{ + __qdio_outbound_processing(q); +} + +/************************* INBOUND ROUTINES *******************************/ + + +inline static int +qdio_get_inbound_buffer_frontier(struct qdio_q *q) +{ + int f,f_mod_no; + volatile char *slsb; + int first_not_to_check; +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[15]; +#endif /* CONFIG_QDIO_DEBUG */ +#ifdef QDIO_USE_PROCESSING_STATE + int last_position=-1; +#endif /* QDIO_USE_PROCESSING_STATE */ + + QDIO_DBF_TEXT4(0,trace,"getibfro"); + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + + slsb=&q->slsb.acc.val[0]; + f_mod_no=f=q->first_to_check; + /* + * we don't check 128 buffers, as otherwise qdio_has_inbound_q_moved + * would return 0 + */ + first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used), + (QDIO_MAX_BUFFERS_PER_Q-1)); + + /* + * we don't use this one, as a PCI or we after a thin interrupt + * will sync the queues + */ + /* SYNC_MEMORY;*/ + +check_next: + f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1); + if (f==first_not_to_check) + goto out; + switch (slsb[f_mod_no]) { + + /* CU_EMPTY means frontier is reached */ + case SLSB_CU_INPUT_EMPTY: + QDIO_DBF_TEXT5(0,trace,"inptempt"); + break; + + /* P_PRIMED means set slsb to P_PROCESSING and move on */ + case SLSB_P_INPUT_PRIMED: + QDIO_DBF_TEXT5(0,trace,"inptprim"); + +#ifdef QDIO_USE_PROCESSING_STATE + /* + * as soon as running under VM, polling the input queues will + * kill VM in terms of CP overhead + */ + if (q->siga_sync) { + set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); + } else { + /* set the previous buffer to NOT_INIT. The current + * buffer will be set to PROCESSING at the end of + * this function to avoid further interrupts. */ + if (last_position>=0) + set_slsb(&slsb[last_position], + SLSB_P_INPUT_NOT_INIT); + atomic_set(&q->polling,1); + last_position=f_mod_no; + } +#else /* QDIO_USE_PROCESSING_STATE */ + set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); +#endif /* QDIO_USE_PROCESSING_STATE */ + /* + * not needed, as the inbound queue will be synced on the next + * siga-r, resp. tiqdio_is_inbound_q_done will do the siga-s + */ + /*SYNC_MEMORY;*/ + f++; + atomic_dec(&q->number_of_buffers_used); + goto check_next; + + case SLSB_P_INPUT_NOT_INIT: + case SLSB_P_INPUT_PROCESSING: + QDIO_DBF_TEXT5(0,trace,"inpnipro"); + break; + + /* P_ERROR means frontier is reached, break and report error */ + case SLSB_P_INPUT_ERROR: +#ifdef CONFIG_QDIO_DEBUG + sprintf(dbf_text,"inperr%2x",f_mod_no); + QDIO_DBF_TEXT3(1,trace,dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256); + + /* kind of process the buffer */ + set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); + + if (q->qdio_error) + q->error_status_flags|= + QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR; + q->qdio_error=SLSB_P_INPUT_ERROR; + q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR; + + /* we increment the frontier, as this buffer + * was processed obviously */ + f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1); + atomic_dec(&q->number_of_buffers_used); + +#ifdef QDIO_USE_PROCESSING_STATE + last_position=-1; +#endif /* QDIO_USE_PROCESSING_STATE */ + + break; + + /* everything else means frontier not changed (HALTED or so) */ + default: + break; + } +out: + q->first_to_check=f_mod_no; + +#ifdef QDIO_USE_PROCESSING_STATE + if (last_position>=0) + set_slsb(&slsb[last_position],SLSB_P_INPUT_PROCESSING); +#endif /* QDIO_USE_PROCESSING_STATE */ + + QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); + + return q->first_to_check; +} + +inline static int +qdio_has_inbound_q_moved(struct qdio_q *q) +{ + int i; + +#ifdef QDIO_PERFORMANCE_STATS + static int old_pcis=0; + static int old_thinints=0; + + if ((old_pcis==perf_stats.pcis)&&(old_thinints==perf_stats.thinints)) + perf_stats.start_time_inbound=NOW; + else + old_pcis=perf_stats.pcis; +#endif /* QDIO_PERFORMANCE_STATS */ + + i=qdio_get_inbound_buffer_frontier(q); + if ( (i!=GET_SAVED_FRONTIER(q)) || + (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) { + SAVE_FRONTIER(q,i); + if ((!q->siga_sync)&&(!q->hydra_gives_outbound_pcis)) + SAVE_TIMESTAMP(q); + + QDIO_DBF_TEXT4(0,trace,"inhasmvd"); + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + return 1; + } else { + QDIO_DBF_TEXT4(0,trace,"inhsntmv"); + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + return 0; + } +} + +/* means, no more buffers to be filled */ +inline static int +tiqdio_is_inbound_q_done(struct qdio_q *q) +{ + int no_used; +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[15]; +#endif + + no_used=atomic_read(&q->number_of_buffers_used); + + /* propagate the change from 82 to 80 through VM */ + SYNC_MEMORY; + +#ifdef CONFIG_QDIO_DEBUG + if (no_used) { + sprintf(dbf_text,"iqisnt%02x",no_used); + QDIO_DBF_TEXT4(0,trace,dbf_text); + } else { + QDIO_DBF_TEXT4(0,trace,"iniqisdo"); + } + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); +#endif /* CONFIG_QDIO_DEBUG */ + + if (!no_used) + return 1; + + if (!q->siga_sync) + /* we'll check for more primed buffers in qeth_stop_polling */ + return 0; + + if (q->slsb.acc.val[q->first_to_check]!=SLSB_P_INPUT_PRIMED) + /* + * nothing more to do, if next buffer is not PRIMED. + * note that we did a SYNC_MEMORY before, that there + * has been a sychnronization. + * we will return 0 below, as there is nothing to do + * (stop_polling not necessary, as we have not been + * using the PROCESSING state + */ + return 0; + + /* + * ok, the next input buffer is primed. that means, that device state + * change indicator and adapter local summary are set, so we will find + * it next time. + * we will return 0 below, as there is nothing to do, except scheduling + * ourselves for the next time. + */ + tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); + tiqdio_sched_tl(); + return 0; +} + +inline static int +qdio_is_inbound_q_done(struct qdio_q *q) +{ + int no_used; +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[15]; +#endif + + no_used=atomic_read(&q->number_of_buffers_used); + + /* + * we need that one for synchronization with the adapter, as it + * does a kind of PCI avoidance + */ + SYNC_MEMORY; + + if (!no_used) { + QDIO_DBF_TEXT4(0,trace,"inqisdnA"); + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + QDIO_DBF_TEXT4(0,trace,dbf_text); + return 1; + } + + if (q->slsb.acc.val[q->first_to_check]==SLSB_P_INPUT_PRIMED) { + /* we got something to do */ + QDIO_DBF_TEXT4(0,trace,"inqisntA"); + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + return 0; + } + + /* on VM, we don't poll, so the q is always done here */ + if (q->siga_sync) + return 1; + if (q->hydra_gives_outbound_pcis) + return 1; + + /* + * at this point we know, that inbound first_to_check + * has (probably) not moved (see qdio_inbound_processing) + */ + if (NOW>GET_SAVED_TIMESTAMP(q)+q->timing.threshold) { +#ifdef CONFIG_QDIO_DEBUG + QDIO_DBF_TEXT4(0,trace,"inqisdon"); + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used); + QDIO_DBF_TEXT4(0,trace,dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + return 1; + } else { +#ifdef CONFIG_QDIO_DEBUG + QDIO_DBF_TEXT4(0,trace,"inqisntd"); + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used); + QDIO_DBF_TEXT4(0,trace,dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + return 0; + } +} + +inline static void +qdio_kick_inbound_handler(struct qdio_q *q) +{ + int count, start, end, real_end, i; +#ifdef CONFIG_QDIO_DEBUG + char dbf_text[15]; +#endif + + QDIO_DBF_TEXT4(0,trace,"kickinh"); + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + + start=q->first_element_to_kick; + real_end=q->first_to_check; + end=(real_end+QDIO_MAX_BUFFERS_PER_Q-1)&(QDIO_MAX_BUFFERS_PER_Q-1); + + i=start; + count=0; + while (1) { + count++; + if (i==end) + break; + i=(i+1)&(QDIO_MAX_BUFFERS_PER_Q-1); + } + +#ifdef CONFIG_QDIO_DEBUG + sprintf(dbf_text,"s=%2xc=%2x",start,count); + QDIO_DBF_TEXT4(0,trace,dbf_text); +#endif /* CONFIG_QDIO_DEBUG */ + + if (likely(q->state==QDIO_IRQ_STATE_ACTIVE)) + q->handler(q->cdev, + QDIO_STATUS_INBOUND_INT|q->error_status_flags, + q->qdio_error,q->siga_error,q->q_no,start,count, + q->int_parm); + + /* for the next time: */ + q->first_element_to_kick=real_end; + q->qdio_error=0; + q->siga_error=0; + q->error_status_flags=0; + +#ifdef QDIO_PERFORMANCE_STATS + perf_stats.inbound_time+=NOW-perf_stats.start_time_inbound; + perf_stats.inbound_cnt++; +#endif /* QDIO_PERFORMANCE_STATS */ +} + +static inline void +__tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) +{ + struct qdio_irq *irq_ptr; + struct qdio_q *oq; + int i; + + QDIO_DBF_TEXT4(0,trace,"iqinproc"); + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + + /* + * we first want to reserve the q, so that we know, that we don't + * interrupt ourselves and call qdio_unmark_q, as is_in_shutdown might + * be set + */ + if (unlikely(qdio_reserve_q(q))) { + qdio_release_q(q); +#ifdef QDIO_PERFORMANCE_STATS + ii_p_c++; +#endif /* QDIO_PERFORMANCE_STATS */ + /* + * as we might just be about to stop polling, we make + * sure that we check again at least once more + */ + tiqdio_sched_tl(); + return; + } +#ifdef QDIO_PERFORMANCE_STATS + ii_p_nc++; +#endif /* QDIO_PERFORMANCE_STATS */ + if (unlikely(atomic_read(&q->is_in_shutdown))) { + qdio_unmark_q(q); + goto out; + } + + /* + * we reset spare_ind_was_set, when the queue does not use the + * spare indicator + */ + if (spare_ind_was_set) + spare_ind_was_set = (q->dev_st_chg_ind == &spare_indicator); + + if (!(*(q->dev_st_chg_ind)) && !spare_ind_was_set) + goto out; + /* + * q->dev_st_chg_ind is the indicator, be it shared or not. + * only clear it, if indicator is non-shared + */ + if (!spare_ind_was_set) + tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind); + + if (q->hydra_gives_outbound_pcis) { + if (!q->siga_sync_done_on_thinints) { + SYNC_MEMORY_ALL; + } else if ((!q->siga_sync_done_on_outb_tis)&& + (q->hydra_gives_outbound_pcis)) { + SYNC_MEMORY_ALL_OUTB; + } + } else { + SYNC_MEMORY; + } + /* + * maybe we have to do work on our outbound queues... at least + * we have to check the outbound-int-capable thinint-capable + * queues + */ + if (q->hydra_gives_outbound_pcis) { + irq_ptr = (struct qdio_irq*)q->irq_ptr; + for (i=0;i<irq_ptr->no_output_qs;i++) { + oq = irq_ptr->output_qs[i]; +#ifdef QDIO_PERFORMANCE_STATS + perf_stats.tl_runs--; +#endif /* QDIO_PERFORMANCE_STATS */ + if (!qdio_is_outbound_q_done(oq)) + __qdio_outbound_processing(oq); + } + } + + if (!qdio_has_inbound_q_moved(q)) + goto out; + + qdio_kick_inbound_handler(q); + if (tiqdio_is_inbound_q_done(q)) + if (!qdio_stop_polling(q)) { + /* + * we set the flags to get into the stuff next time, + * see also comment in qdio_stop_polling + */ + tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); + tiqdio_sched_tl(); + } +out: + qdio_release_q(q); +} + +static void +tiqdio_inbound_processing(struct qdio_q *q) +{ + __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount)); +} + +static inline void +__qdio_inbound_processing(struct qdio_q *q) +{ + int q_laps=0; + + QDIO_DBF_TEXT4(0,trace,"qinproc"); + QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); + + if (unlikely(qdio_reserve_q(q))) { + qdio_release_q(q); +#ifdef QDIO_PERFORMANCE_STATS + i_p_c++; +#endif /* QDIO_PERFORMANCE_STATS */ + /* as we're sissies, we'll check next time */ + if (likely(!atomic_read(&q->is_in_shutdown))) { + qdio_mark_q(q); + QDIO_DBF_TEXT4(0,trace,"busy,agn"); + } + return; + } +#ifdef QDIO_PERFORMANCE_STATS + i_p_nc++; + perf_stats.tl_runs++; +#endif /* QDIO_PERFORMANCE_STATS */ + +again: + if (qdio_has_inbound_q_moved(q)) { + qdio_kick_inbound_handler(q); + if (!qdio_stop_polling(q)) { + q_laps++; + if (q_laps<QDIO_Q_LAPS) + goto again; + } + qdio_mark_q(q); + } else { + if (!qdio_is_inbound_q_done(q)) |