Bug #1964 » iwn_serializer.patch
| sys/dev/netif/iwn/if_iwn.c | ||
|---|---|---|
|
case SIOCSIFFLAGS:
|
||
|
if (ifp->if_flags & IFF_UP) {
|
||
|
if (!(ifp->if_flags & IFF_RUNNING)) {
|
||
|
iwn_init_locked(sc);
|
||
|
iwn_init(sc);
|
||
|
if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)
|
||
|
startall = 1;
|
||
|
else
|
||
| ... | ... | |
|
}
|
||
|
} else {
|
||
|
if (ifp->if_flags & IFF_RUNNING)
|
||
|
iwn_stop_locked(sc);
|
||
|
iwn_stop(sc);
|
||
|
}
|
||
|
if (startall)
|
||
|
ieee80211_start_all(ic);
|
||
| ... | ... | |
|
struct ifnet *ifp = sc->sc_ifp;
|
||
|
struct ieee80211com *ic = ifp->if_l2com;
|
||
|
wlan_serialize_enter();
|
||
|
iwn_init_locked(sc);
|
||
|
wlan_serialize_exit();
|
||
|
if (ifp->if_flags & IFF_RUNNING)
|
||
|
ieee80211_start_all(ic);
|
||
| ... | ... | |
|
static void
|
||
|
iwn_stop(struct iwn_softc *sc)
|
||
|
{
|
||
|
wlan_serialize_enter();
|
||
|
iwn_stop_locked(sc);
|
||
|
wlan_serialize_exit();
|
||
|
}
|
||
|
/*
|
||
| ... | ... | |
|
wlan_serialize_enter();
|
||
|
ifp = sc->sc_ifp;
|
||
|
ic = ifp->if_l2com;
|
||
|
iwn_stop(sc);
|
||
|
iwn_init(sc);
|
||
|
iwn_stop_locked(sc);
|
||
|
iwn_init_locked(sc);
|
||
|
ieee80211_notify_radio(ic, 1);
|
||
|
wlan_serialize_exit();
|
||
|
}
|
||
| ... | ... | |
|
ic = ifp->if_l2com;
|
||
|
vap = TAILQ_FIRST(&ic->ic_vaps);
|
||
|
if (vap != NULL) {
|
||
|
iwn_init(sc);
|
||
|
iwn_init_locked(sc);
|
||
|
ieee80211_init(vap);
|
||
|
}
|
||
|
wlan_serialize_exit();
|
||
| ... | ... | |
|
ifp = sc->sc_ifp;
|
||
|
ic = ifp->if_l2com;
|
||
|
vap = TAILQ_FIRST(&ic->ic_vaps);
|
||
|
iwn_stop(sc);
|
||
|
iwn_stop_locked(sc);
|
||
|
if (vap != NULL)
|
||
|
ieee80211_stop(vap);
|
||
| ... | ... | |
|
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
|
||
|
wlan_serialize_enter();
|
||
|
iwn_stop(sc);
|
||
|
iwn_stop_locked(sc);
|
||
|
if (vap != NULL)
|
||
|
ieee80211_stop(vap);
|
||
|
wlan_serialize_exit();
|
||
| ... | ... | |
|
pci_write_config(dev, 0x41, 0, 1);
|
||
|
if (ifp->if_flags & IFF_UP) {
|
||
|
iwn_init(sc);
|
||
|
iwn_init_locked(sc);
|
||
|
if (vap != NULL)
|
||
|
ieee80211_init(vap);
|
||
|
if (ifp->if_flags & IFF_RUNNING)
|
||
| sys/kern/uipc_mbuf.c | ||
|---|---|---|
|
struct objcache *mbuf_cache, *mbufphdr_cache;
|
||
|
struct objcache *mclmeta_cache;
|
||
|
struct objcache *mbufcluster_cache, *mbufphdrcluster_cache;
|
||
|
struct objcache *mbufjcluster_cache;
|
||
|
int nmbclusters;
|
||
|
int nmbufs;
|
||
| ... | ... | |
|
}
|
||
|
static void
|
||
|
linkcluster(struct mbuf *m, struct mbcluster *cl)
|
||
|
linkjcluster(struct mbuf *m, struct mbcluster *cl, uint size)
|
||
|
{
|
||
|
/*
|
||
|
* Add the cluster to the mbuf. The caller will detect that the
|
||
| ... | ... | |
|
m->m_ext.ext_buf = cl->mcl_data;
|
||
|
m->m_ext.ext_ref = m_mclref;
|
||
|
m->m_ext.ext_free = m_mclfree;
|
||
|
m->m_ext.ext_size = MCLBYTES;
|
||
|
m->m_ext.ext_size = size;
|
||
|
atomic_add_int(&cl->mcl_refs, 1);
|
||
|
m->m_data = m->m_ext.ext_buf;
|
||
|
m->m_flags |= M_EXT | M_EXT_CLUSTER;
|
||
|
}
|
||
|
static void
|
||
|
linkcluster(struct mbuf *m, struct mbcluster *cl)
|
||
|
{
|
||
|
linkjcluster(m, cl, MCLBYTES);
|
||
|
}
|
||
|
static boolean_t
|
||
|
mbufphdrcluster_ctor(void *obj, void *private, int ocflags)
|
||
|
{
|
||
| ... | ... | |
|
return (TRUE);
|
||
|
}
|
||
|
static boolean_t
|
||
|
mbufjcluster_ctor(void *obj, void *private, int ocflags)
|
||
|
{
|
||
|
struct mbuf *m = obj;
|
||
|
struct mbcluster *cl;
|
||
|
mbuf_ctor(obj, private, ocflags);
|
||
|
cl = objcache_get(mclmeta_cache, ocflags);
|
||
|
if (cl == NULL) {
|
||
|
++mbstat[mycpu->gd_cpuid].m_drops;
|
||
|
return (FALSE);
|
||
|
}
|
||
|
m->m_flags |= M_CLCACHE;
|
||
|
linkjcluster(m, cl, MJUMPAGESIZE);
|
||
|
return (TRUE);
|
||
|
}
|
||
|
/*
|
||
|
* Used for both the cluster and cluster PHDR caches.
|
||
|
*
|
||
| ... | ... | |
|
*/
|
||
|
for (i = 0; i < ncpus; i++) {
|
||
|
atomic_set_long_nonlocked(&mbstat[i].m_msize, MSIZE);
|
||
|
/* XXX: fix to depend on cluster size. JAT */
|
||
|
atomic_set_long_nonlocked(&mbstat[i].m_mclbytes, MCLBYTES);
|
||
|
atomic_set_long_nonlocked(&mbstat[i].m_minclsize, MINCLSIZE);
|
||
|
atomic_set_long_nonlocked(&mbstat[i].m_mlen, MLEN);
|
||
| ... | ... | |
|
objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
|
||
|
mb_limit += limit;
|
||
|
limit = nmbclusters;
|
||
|
mbufjcluster_cache = objcache_create("mbuf + jcluster", &limit, 0,
|
||
|
mbufjcluster_ctor, mbufcluster_dtor, NULL,
|
||
|
objcache_malloc_alloc, objcache_malloc_free, &mbuf_malloc_args);
|
||
|
mb_limit += limit;
|
||
|
/*
|
||
|
* Adjust backing kmalloc pools' limit
|
||
|
*
|
||
| ... | ... | |
|
cl_limit += cl_limit / 8;
|
||
|
kmalloc_raise_limit(mclmeta_malloc_args.mtype,
|
||
|
mclmeta_malloc_args.objsize * cl_limit);
|
||
|
/* XXX: should be largest m_bufcluster size. JAT */
|
||
|
kmalloc_raise_limit(M_MBUFCL, MCLBYTES * cl_limit);
|
||
|
mb_limit += mb_limit / 8;
|
||
| ... | ... | |
|
return (m);
|
||
|
}
|
||
|
struct mbuf *
|
||
|
m_getjcl(int how, short type, int flags, size_t size)
|
||
|
{
|
||
|
struct mbuf *m = NULL;
|
||
|
#if 0
|
||
|
int ocflags = MBTOM(how);
|
||
|
int ntries = 0;
|
||
|
#endif /* if 0 */
|
||
|
return (m);
|
||
|
}
|
||
|
/*
|
||
|
* Returns an mbuf with an attached cluster.
|
||
|
* Because many network drivers use this kind of buffers a lot, it is
|
||
| ... | ... | |
|
/*
|
||
|
* Optimize the mbuf allocation but do not get too carried away.
|
||
|
*/
|
||
|
/* XXX: fix to depend on cluster size. JAT */
|
||
|
if (m->m_next || m->m_len > MLEN)
|
||
|
gsize = MCLBYTES;
|
||
|
else
|
||
| sys/sys/mbuf.h | ||
|---|---|---|
|
struct mbuf *m_get(int, int);
|
||
|
struct mbuf *m_getc(int len, int how, int type);
|
||
|
struct mbuf *m_getcl(int how, short type, int flags);
|
||
|
struct mbuf *m_getjcl(int how, short type, int flags, uint size);
|
||
|
struct mbuf *m_getclr(int, int);
|
||
|
struct mbuf *m_gethdr(int, int);
|
||
|
struct mbuf *m_getm(struct mbuf *, int, int, int);
|
||