diff --git a/sys/dev/netif/iwn/if_iwn.c b/sys/dev/netif/iwn/if_iwn.c index cda8e15..8615536 100644 --- a/sys/dev/netif/iwn/if_iwn.c +++ b/sys/dev/netif/iwn/if_iwn.c @@ -3417,7 +3417,7 @@ iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *ucred) case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (!(ifp->if_flags & IFF_RUNNING)) { - iwn_init_locked(sc); + iwn_init(sc); if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL) startall = 1; else @@ -3425,7 +3425,7 @@ iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *ucred) } } else { if (ifp->if_flags & IFF_RUNNING) - iwn_stop_locked(sc); + iwn_stop(sc); } if (startall) ieee80211_start_all(ic); @@ -6180,7 +6180,9 @@ iwn_init(void *arg) struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; + wlan_serialize_enter(); iwn_init_locked(sc); + wlan_serialize_exit(); if (ifp->if_flags & IFF_RUNNING) ieee80211_start_all(ic); @@ -6202,7 +6204,9 @@ iwn_stop_locked(struct iwn_softc *sc) static void iwn_stop(struct iwn_softc *sc) { + wlan_serialize_enter(); iwn_stop_locked(sc); + wlan_serialize_exit(); } /* @@ -6328,8 +6332,8 @@ iwn_hw_reset_task(void *arg0, int pending) wlan_serialize_enter(); ifp = sc->sc_ifp; ic = ifp->if_l2com; - iwn_stop(sc); - iwn_init(sc); + iwn_stop_locked(sc); + iwn_init_locked(sc); ieee80211_notify_radio(ic, 1); wlan_serialize_exit(); } @@ -6347,7 +6351,7 @@ iwn_radio_on_task(void *arg0, int pending) ic = ifp->if_l2com; vap = TAILQ_FIRST(&ic->ic_vaps); if (vap != NULL) { - iwn_init(sc); + iwn_init_locked(sc); ieee80211_init(vap); } wlan_serialize_exit(); @@ -6365,7 +6369,7 @@ iwn_radio_off_task(void *arg0, int pending) ifp = sc->sc_ifp; ic = ifp->if_l2com; vap = TAILQ_FIRST(&ic->ic_vaps); - iwn_stop(sc); + iwn_stop_locked(sc); if (vap != NULL) ieee80211_stop(vap); @@ -6416,7 +6420,7 @@ iwn_pci_suspend(device_t dev) struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); wlan_serialize_enter(); - iwn_stop(sc); + iwn_stop_locked(sc); if (vap != NULL) ieee80211_stop(vap); wlan_serialize_exit(); @@ -6440,7 +6444,7 @@ iwn_pci_resume(device_t dev) pci_write_config(dev, 0x41, 0, 1); if (ifp->if_flags & IFF_UP) { - iwn_init(sc); + iwn_init_locked(sc); if (vap != NULL) ieee80211_init(vap); if (ifp->if_flags & IFF_RUNNING) diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c index 415c578..8e9a7e4 100644 --- a/sys/kern/uipc_mbuf.c +++ b/sys/kern/uipc_mbuf.c @@ -255,6 +255,7 @@ int m_defragrandomfailures; struct objcache *mbuf_cache, *mbufphdr_cache; struct objcache *mclmeta_cache; struct objcache *mbufcluster_cache, *mbufphdrcluster_cache; +struct objcache *mbufjcluster_cache; int nmbclusters; int nmbufs; @@ -471,7 +472,7 @@ mclmeta_dtor(void *obj, void *private) } static void -linkcluster(struct mbuf *m, struct mbcluster *cl) +linkjcluster(struct mbuf *m, struct mbcluster *cl, uint size) { /* * Add the cluster to the mbuf. The caller will detect that the @@ -481,13 +482,19 @@ linkcluster(struct mbuf *m, struct mbcluster *cl) m->m_ext.ext_buf = cl->mcl_data; m->m_ext.ext_ref = m_mclref; m->m_ext.ext_free = m_mclfree; - m->m_ext.ext_size = MCLBYTES; + m->m_ext.ext_size = size; atomic_add_int(&cl->mcl_refs, 1); m->m_data = m->m_ext.ext_buf; m->m_flags |= M_EXT | M_EXT_CLUSTER; } +static void +linkcluster(struct mbuf *m, struct mbcluster *cl) +{ + linkjcluster(m, cl, MCLBYTES); +} + static boolean_t mbufphdrcluster_ctor(void *obj, void *private, int ocflags) { @@ -522,6 +529,23 @@ mbufcluster_ctor(void *obj, void *private, int ocflags) return (TRUE); } +static boolean_t +mbufjcluster_ctor(void *obj, void *private, int ocflags) +{ + struct mbuf *m = obj; + struct mbcluster *cl; + + mbuf_ctor(obj, private, ocflags); + cl = objcache_get(mclmeta_cache, ocflags); + if (cl == NULL) { + ++mbstat[mycpu->gd_cpuid].m_drops; + return (FALSE); + } + m->m_flags |= M_CLCACHE; + linkjcluster(m, cl, MJUMPAGESIZE); + return (TRUE); +} + /* * Used for both the cluster and cluster PHDR caches. * @@ -560,6 +584,7 @@ mbinit(void *dummy) */ for (i = 0; i < ncpus; i++) { atomic_set_long_nonlocked(&mbstat[i].m_msize, MSIZE); + /* XXX: fix to depend on cluster size. JAT */ atomic_set_long_nonlocked(&mbstat[i].m_mclbytes, MCLBYTES); atomic_set_long_nonlocked(&mbstat[i].m_minclsize, MINCLSIZE); atomic_set_long_nonlocked(&mbstat[i].m_mlen, MLEN); @@ -602,6 +627,12 @@ mbinit(void *dummy) objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); mb_limit += limit; + limit = nmbclusters; + mbufjcluster_cache = objcache_create("mbuf + jcluster", &limit, 0, + mbufjcluster_ctor, mbufcluster_dtor, NULL, + objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args); + mb_limit += limit; + /* * Adjust backing kmalloc pools' limit * @@ -611,6 +642,7 @@ mbinit(void *dummy) cl_limit += cl_limit / 8; kmalloc_raise_limit(mclmeta_malloc_args.mtype, mclmeta_malloc_args.objsize * cl_limit); + /* XXX: should be largest m_bufcluster size. JAT */ kmalloc_raise_limit(M_MBUFCL, MCLBYTES * cl_limit); mb_limit += mb_limit / 8; @@ -775,6 +807,18 @@ m_getclr(int how, int type) return (m); } +struct mbuf * +m_getjcl(int how, short type, int flags, size_t size) +{ + struct mbuf *m = NULL; +#if 0 + int ocflags = MBTOM(how); + int ntries = 0; +#endif /* if 0 */ + + return (m); +} + /* * Returns an mbuf with an attached cluster. * Because many network drivers use this kind of buffers a lot, it is @@ -1402,6 +1446,7 @@ m_dup_data(struct mbuf *m, int how) /* * Optimize the mbuf allocation but do not get too carried away. */ + /* XXX: fix to depend on cluster size. JAT */ if (m->m_next || m->m_len > MLEN) gsize = MCLBYTES; else diff --git a/sys/sys/mbuf.h b/sys/sys/mbuf.h index 35e448e..6446f3a 100644 --- a/sys/sys/mbuf.h +++ b/sys/sys/mbuf.h @@ -497,6 +497,7 @@ void m_freem(struct mbuf *); struct mbuf *m_get(int, int); struct mbuf *m_getc(int len, int how, int type); struct mbuf *m_getcl(int how, short type, int flags); +struct mbuf *m_getjcl(int how, short type, int flags, uint size); struct mbuf *m_getclr(int, int); struct mbuf *m_gethdr(int, int); struct mbuf *m_getm(struct mbuf *, int, int, int);