*
* Michael Callahan <callahan@maths.ox.ac.uk>
* Al Longyear <longyear@netcom.com>
+ * Paul Mackerras <Paul.Mackerras@cs.anu.edu.au>
*
* Dynamic PPP devices by Jim Freeman <jfree@caldera.com>.
* ppp_tty_receive ``noisy-raise-bug'' fixed by Ove Ewerlid <ewerlid@syscon.uu.se>
*
- * ==FILEVERSION 970626==
+ * ==FILEVERSION 980319==
*
* NOTE TO MAINTAINERS:
* If you modify this file at all, please set the number above to the
#define CHECK_CHARACTERS 1
#define PPP_COMPRESS 1
-#ifndef PPP_MAX_DEV
-#define PPP_MAX_DEV 256
-#endif
-
-/* $Id: ppp.c,v 1.13 1997/07/14 03:50:50 paulus Exp $
- * Added dynamic allocation of channels to eliminate
- * compiled-in limits on the number of channels.
- *
- * Dynamic channel allocation code Copyright 1995 Caldera, Inc.,
- * released under the GNU General Public License Version 2.
- */
+/* $Id: ppp.c,v 1.17 1998/03/24 23:54:59 paulus Exp $ */
#include <linux/version.h>
+#include <linux/config.h> /* for CONFIG_KERNELD */
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
+
+#if LINUX_VERSION_CODE >= VERSION(2,1,68)
+#include <linux/rtnetlink.h>
+#endif
+
#include <linux/inet.h>
#include <linux/ioctl.h>
#include <linux/if_pppvar.h>
#include <linux/ppp-comp.h>
+#ifdef CONFIG_KERNELD
+#include <linux/kerneld.h>
+#endif
+
#ifndef PPP_IPX
#define PPP_IPX 0x2b /* IPX protocol over PPP */
#endif
#define test_and_set_bit(nr, addr) set_bit(nr, addr)
#endif
+#if LINUX_VERSION_CODE < VERSION(2,1,57)
+#define signal_pending(p) ((p)->signal & ~(p)->blocked)
+#endif
+
+#if LINUX_VERSION_CODE < VERSION(2,1,25)
+#define net_device_stats enet_statistics
+#endif
+
+#if LINUX_VERSION_CODE < VERSION(2,1,60)
+typedef int rw_ret_t;
+typedef unsigned int rw_count_t;
+#else
+typedef ssize_t rw_ret_t;
+typedef size_t rw_count_t;
+#endif
+
static int ppp_register_compressor (struct compressor *cp);
static void ppp_unregister_compressor (struct compressor *cp);
register __u8 chr);
extern inline int lock_buffer (register struct ppp_buffer *buf);
static int ppp_dev_xmit_ip (struct ppp *ppp, struct ppp_buffer *buf,
- __u8 *data, int len);
+ __u8 *data, int len, enum NPmode npmode);
static int rcv_proto_ip (struct ppp *, __u16, __u8 *, int);
static int rcv_proto_ipx (struct ppp *, __u16, __u8 *, int);
#define OPTIMIZE_FLAG_TIME 0
#endif
-#ifndef PPP_MAX_DEV
-#define PPP_MAX_DEV 256
-#endif
-
/*
* Parameters which may be changed via insmod.
*/
static int flag_time = OPTIMIZE_FLAG_TIME;
-static int max_dev = PPP_MAX_DEV;
#if LINUX_VERSION_CODE >= VERSION(2,1,19)
MODULE_PARM(flag_time, "i");
-MODULE_PARM(max_dev, "i");
#endif
/*
static int ppp_dev_ioctl (struct device *dev, struct ifreq *ifr, int cmd);
static int ppp_dev_close (struct device *);
static int ppp_dev_xmit (sk_buff *, struct device *);
-static struct enet_statistics *ppp_dev_stats (struct device *);
+static struct net_device_stats *ppp_dev_stats (struct device *);
#if LINUX_VERSION_CODE < VERSION(2,1,15)
static int ppp_dev_header (sk_buff *, struct device *, __u16,
* TTY callbacks
*/
-static int ppp_tty_read (struct tty_struct *, struct file *, __u8 *,
- unsigned int);
-static int ppp_tty_write (struct tty_struct *, struct file *, const __u8 *,
- unsigned int);
+static rw_ret_t ppp_tty_read(struct tty_struct *, struct file *, __u8 *,
+ rw_count_t);
+static rw_ret_t ppp_tty_write(struct tty_struct *, struct file *, const __u8 *,
+ rw_count_t);
static int ppp_tty_ioctl (struct tty_struct *, struct file *, unsigned int,
unsigned long);
#if LINUX_VERSION_CODE < VERSION(2,1,23)
static int ppp_tty_select (struct tty_struct *tty, struct inode *inode,
struct file *filp, int sel_type, select_table * wait);
#else
-static unsigned int ppp_tty_poll (struct tty_struct *tty, struct file *filp, poll_table * wait);
+static unsigned int ppp_tty_poll (struct tty_struct *tty, struct file *filp,
+ poll_table * wait);
#endif
static int ppp_tty_open (struct tty_struct *);
static void ppp_tty_close (struct tty_struct *);
*/
(void) memset (&ppp_ldisc, 0, sizeof (ppp_ldisc));
ppp_ldisc.magic = TTY_LDISC_MAGIC;
+#if LINUX_VERSION_CODE >= VERSION(2,1,28)
+ ppp_ldisc.name = "ppp";
+#endif
ppp_ldisc.open = ppp_tty_open;
ppp_ldisc.close = ppp_tty_close;
ppp_ldisc.read = ppp_tty_read;
dev->rebuild_header = ppp_dev_rebuild;
#endif
- dev->hard_header_len = PPP_HARD_HDR_LEN;
+ dev->hard_header_len = PPP_HDRLEN;
/* device INFO */
dev->mtu = PPP_MTU;
/* New-style flags */
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+#if LINUX_VERSION_CODE < VERSION(2,1,67)
dev->family = AF_INET;
dev->pa_addr = 0;
dev->pa_brdaddr = 0;
dev->pa_mask = 0;
dev->pa_alen = 4; /* sizeof (__u32) */
+#endif
return 0;
}
ppp->read_wait = NULL;
ppp->write_wait = NULL;
ppp->last_xmit = jiffies - flag_time;
+ ppp->last_recv = jiffies;
/* clear statistics */
memset(&ppp->stats, 0, sizeof (struct pppstat));
- memset(&ppp->estats, 0, sizeof(struct enet_statistics));
-
- /* Reset the demand dial information */
- ppp->ddinfo.xmit_idle= /* time since last NP packet sent */
- ppp->ddinfo.recv_idle=jiffies; /* time since last NP packet received */
+ memset(&ppp->estats, 0, sizeof(ppp->estats));
/* PPP compression data */
ppp->sc_xc_state =
extern inline int
lock_buffer (register struct ppp_buffer *buf)
{
- register int state;
+ unsigned long state;
unsigned long flags;
/*
* Save the current state and if free then set it to the "busy" state
*/
if (new_wbuf == NULL || new_tbuf == NULL ||
new_rbuf == NULL || new_cbuf == NULL) {
- if (ppp->flags & SC_DEBUG)
- printk (KERN_ERR
- "ppp: failed to allocate new buffers\n");
+ printk (KERN_ERR "ppp: failed to allocate new buffers\n");
ppp_free_buf (new_wbuf);
ppp_free_buf (new_tbuf);
dev = ppp2dev (ppp);
if (ppp->flags & SC_DEBUG)
- printk(KERN_DEBUG "ppp%s released\n", ppp->name);
+ printk(KERN_DEBUG "%s released\n", ppp->name);
ppp_ccp_closed (ppp);
if (tty != NULL && tty->disc_data == ppp)
tty->disc_data = NULL; /* Break the tty->ppp link */
- if (dev && dev->flags & IFF_UP) {
- dev->flags &= ~IFF_UP; /* prevent recursion */
- dev_close (dev); /* close the device properly */
- }
-
ppp_free_buf (ppp->rbuf);
ppp_free_buf (ppp->wbuf);
ppp_free_buf (ppp->cbuf);
* There should not be an existing table for this slot.
*/
if (ppp) {
- if (ppp->flags & SC_DEBUG)
- printk (KERN_ERR
+ printk (KERN_ERR
"ppp_tty_open: gack! tty already associated to %s!\n",
ppp->magic == PPP_MAGIC ? ppp2dev(ppp)->name
: "unknown");
} else {
ppp = ppp_alloc();
if (ppp == NULL) {
- if (ppp->flags & SC_DEBUG)
- printk (KERN_ERR "ppp_alloc failed\n");
+ printk (KERN_ERR "ppp_alloc failed\n");
return -ENFILE;
}
/*
*/
ppp->slcomp = slhc_init (16, 16);
if (ppp->slcomp == NULL) {
- if (ppp->flags & SC_DEBUG)
- printk (KERN_ERR "ppp_tty_open: "
- "no space for compression buffers!\n");
+ printk (KERN_ERR "ppp_tty_open: "
+ "no space for compression buffers!\n");
ppp_release (ppp);
return -ENOMEM;
}
*/
ppp->ubuf = ppp_alloc_buf (RBUFSIZE, BUFFER_TYPE_TTY_RD);
if (ppp->ubuf == NULL) {
- if (ppp->flags & SC_DEBUG)
- printk (KERN_ERR "ppp_tty_open: "
- "no space for user receive buffer\n");
+ printk (KERN_ERR "ppp_tty_open: "
+ "no space for user receive buffer\n");
ppp_release (ppp);
return -ENOMEM;
}
* transmission block.
*/
ppp2dev (ppp)->tbusy = 0;
- if (ppp2dev (ppp) -> flags & IFF_UP) {
- mark_bh (NET_BH);
- }
+ mark_bh (NET_BH);
/*
* Wake up the transmission queue for all completion events.
*/
*/
if (ppp->flags & SC_LOG_RAWIN)
ppp_print_buffer ("receive buffer", data, count);
+
/*
* Collect the character and error condition for the character. Set the toss
* flag for the first character error.
}
++flags;
}
+
/*
* Set the flags for d7 being 0/1 and parity being even/odd so that
* the normal processing would have all flags set at the end of the
CHECK_PPP(0);
CHECK_BUF_MAGIC(ppp->rbuf);
+
/*
* If there is a pending error from the receiver then log it and discard
* the damaged frame.
*/
new_data = kmalloc (ppp->mru + PPP_HDRLEN, GFP_ATOMIC);
if (new_data == NULL) {
- if (ppp->flags & SC_DEBUG)
- printk (KERN_ERR
- "ppp_doframe: no memory\n");
+ printk (KERN_ERR "ppp_doframe: no memory\n");
new_count = DECOMP_ERROR;
} else {
new_count = (*ppp->sc_rcomp->decompress)
case DECOMP_FATALERROR:
ppp->flags |= SC_DC_FERROR;
- if (ppp->flags & SC_DEBUG)
- printk(KERN_ERR "ppp: fatal decomp error\n");
+ printk(KERN_ERR "ppp: fatal decomp error\n");
break;
}
/*
(*ppp->sc_rcomp->incomp) (ppp->sc_rc_state,
data, count);
}
+ } else if (proto == PPP_COMP && (ppp->flags & SC_DEBUG)) {
+ printk(KERN_DEBUG "ppp: frame not decompressed: "
+ "flags=%x, count=%d, sc_rc_state=%p\n",
+ ppp->flags, count, ppp->sc_rc_state);
}
/*
* Process the uncompressed frame.
skb->free = 1;
#endif
- ppp->ddinfo.recv_idle = jiffies;
+ ppp->last_recv = jiffies;
netif_rx (skb);
return 1;
}
}
break;
}
+ if (ppp->flags & SC_DEBUG)
+ printk(KERN_DEBUG "ppp_proto_ccp: %s code %d, flags=%x\n",
+ (rcvd? "rcvd": "sent"), CCP_CODE(dp), ppp->flags);
restore_flags(flags);
}
waiting if necessary
*/
-static int
+static rw_ret_t
ppp_tty_read (struct tty_struct *tty, struct file *file, __u8 * buf,
- unsigned int nr)
+ rw_count_t nr)
{
struct ppp *ppp = tty2ppp (tty);
__u8 c;
- int len, ret;
int error;
+ rw_ret_t len, ret;
#define GETC(c) \
{ \
if (!ppp)
return -EIO;
- if (ppp->magic != PPP_MAGIC)
- return -EIO;
+ /* if (ppp->magic != PPP_MAGIC)
+ return -EIO; */
CHECK_PPP (-ENXIO);
current->state = TASK_INTERRUPTIBLE;
schedule ();
- if (current->signal & ~current->blocked)
+ if (signal_pending(current))
return -EINTR;
continue;
}
"ppp_tty_read: sleeping(read_wait)\n");
#endif
interruptible_sleep_on (&ppp->read_wait);
- if (current->signal & ~current->blocked)
+ if (signal_pending(current))
return -EINTR;
}
if (ppp->flags & SC_DEBUG)
printk (KERN_DEBUG
- "ppp: read of %u bytes too small for %d "
- "frame\n", nr, len + 2);
+ "ppp: read of %lu bytes too small for %ld "
+ "frame\n", (unsigned long) nr, (long) len + 2);
ppp->stats.ppp_ierrors++;
error = -EOVERFLOW;
goto out;
}
clear_bit (0, &ppp->ubuf->locked);
-#if 0
- if (ppp->flags & SC_DEBUG)
- printk (KERN_DEBUG "ppp_tty_read: passing %d bytes up\n", ret);
-#endif
return ret;
out:
if (non_ip || flag_time == 0)
ins_char (buf, PPP_FLAG);
else {
- if (jiffies - ppp->last_xmit > flag_time)
+ if (jiffies - ppp->last_xmit >= flag_time)
ins_char (buf, PPP_FLAG);
}
ppp->last_xmit = jiffies;
(control == PPP_UI) &&
(proto != PPP_LCP) &&
(proto != PPP_CCP)) {
- new_data = kmalloc (ppp->mtu, GFP_ATOMIC);
+ new_data = kmalloc (ppp->mtu + PPP_HDRLEN, GFP_ATOMIC);
if (new_data == NULL) {
- if (ppp->flags & SC_DEBUG)
- printk (KERN_ERR
- "ppp_dev_xmit_frame: no memory\n");
+ printk (KERN_ERR "ppp_dev_xmit_frame: no memory\n");
return 1;
}
new_count = (*ppp->sc_xcomp->compress)
- (ppp->sc_xc_state, data, new_data, count, ppp->mtu);
+ (ppp->sc_xc_state, data, new_data, count,
+ ppp->mtu + PPP_HDRLEN);
if (new_count > 0 && (ppp->flags & SC_CCP_UP)) {
ppp_dev_xmit_lower (ppp, buf, new_data, new_count, 0);
* we have to put the FCS field on ourselves
*/
-static int
+static rw_ret_t
ppp_tty_write (struct tty_struct *tty, struct file *file, const __u8 * data,
- unsigned int count)
+ rw_count_t count)
{
struct ppp *ppp = tty2ppp (tty);
__u8 *new_data;
- int proto;
+ int error;
+ struct wait_queue wait = {current, NULL};
/*
* Verify the pointers.
if (ppp->flags & SC_DEBUG)
printk (KERN_WARNING
"ppp_tty_write: truncating user packet "
- "from %u to mtu %d\n", count,
+ "from %lu to mtu %d\n", (unsigned long) count,
PPP_MTU + PPP_HDRLEN);
count = PPP_MTU + PPP_HDRLEN;
}
*/
new_data = kmalloc (count, GFP_KERNEL);
if (new_data == NULL) {
- if (ppp->flags & SC_DEBUG)
- printk (KERN_ERR
- "ppp_tty_write: no memory\n");
+ printk (KERN_ERR "ppp_tty_write: no memory\n");
return 0;
}
/*
* Retrieve the user's buffer
*/
- if (copy_from_user (new_data, data, count)) {
- kfree (new_data);
- return -EFAULT;
- }
+ COPY_FROM_USER (error, new_data, data, count);
+ if (error)
+ goto out_free;
/*
- * lock this PPP unit so we will be the only writer;
- * sleep if necessary
+ * Lock this PPP unit so we will be the only writer,
+ * sleeping if necessary.
+ *
+ * Note that we add our task to the wait queue before
+ * attempting to lock, as the lock flag may be cleared
+ * from an interrupt.
*/
- while (lock_buffer (ppp->tbuf) != 0) {
+ add_wait_queue(&ppp->write_wait, &wait);
+ while (1) {
+ error = 0;
current->timeout = 0;
-#if 0
- if (ppp->flags & SC_DEBUG)
- printk (KERN_DEBUG "ppp_tty_write: sleeping\n");
-#endif
- interruptible_sleep_on (&ppp->write_wait);
+ current->state = TASK_INTERRUPTIBLE;
+ if (lock_buffer(ppp->tbuf) == 0)
+ break;
+ schedule();
+ error = -EIO;
ppp = tty2ppp (tty);
- if (!ppp || ppp->magic != PPP_MAGIC || !ppp->inuse
- || tty != ppp->tty) {
- kfree (new_data);
- return 0;
- }
-
- if (current->signal & ~current->blocked) {
- kfree (new_data);
- return -EINTR;
+ if (!ppp || ppp->magic != PPP_MAGIC ||
+ !ppp->inuse || tty != ppp->tty) {
+ printk("ppp_tty_write: %p invalid after wait!\n", ppp);
+ break;
}
+ error = -EINTR;
+ if (signal_pending(current))
+ break;
}
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&ppp->write_wait, &wait);
+ if (error)
+ goto out_free;
+
/*
* Change the LQR frame
*/
/*
* Send the data
*/
- if (proto == PPP_IP) {
+ if (PPP_PROTOCOL(new_data) == PPP_IP) {
/*
* IP frames can be sent by pppd when we're doing
* demand-dialling. We send them via ppp_dev_xmit_ip
* to make sure that VJ compression happens properly.
*/
ppp_dev_xmit_ip(ppp, ppp->tbuf, new_data + PPP_HDRLEN,
- count - PPP_HDRLEN);
+ count - PPP_HDRLEN, NPMODE_PASS);
} else {
ppp_dev_xmit_frame (ppp, ppp->tbuf, new_data, count);
}
+ error = count;
+out_free:
kfree (new_data);
- return (int) count;
+ return error;
}
/*
- * Process the BSD compression IOCTL event for the tty device.
+ * Process the set-compression ioctl.
*/
static int
ppp_set_compression (struct ppp *ppp, struct ppp_option_data *odp)
{
struct compressor *cp;
- struct ppp_option_data data;
- int error;
- int nb;
+ int error, nb;
+ unsigned long flags;
__u8 *ptr;
__u8 ccp_option[CCP_MAX_OPTION_LENGTH];
- unsigned long flags;
+ struct ppp_option_data data;
/*
* Fetch the compression parameters
*/
- COPY_FROM_USER (error,
- &data,
- odp,
- sizeof (data));
-
+ COPY_FROM_USER (error, &data, odp, sizeof (data));
if (error != 0)
return error;
if ((__u32) nb >= (__u32)CCP_MAX_OPTION_LENGTH)
nb = CCP_MAX_OPTION_LENGTH;
- COPY_FROM_USER (error,
- ccp_option,
- ptr,
- nb);
-
+ COPY_FROM_USER (error, ccp_option, ptr, nb);
if (error != 0)
return error;
save_flags(flags);
cli();
- ppp->flags &= ~(SC_COMP_RUN | SC_DECOMP_RUN);
+ ppp->flags &= ~(data.transmit? SC_COMP_RUN: SC_DECOMP_RUN);
restore_flags(flags);
cp = find_compressor (ccp_option[0]);
- if (cp != (struct compressor *) 0) {
- /*
- * Found a handler for the protocol - try to allocate
- * a compressor or decompressor.
- */
- error = 0;
- if (data.transmit) {
- if (ppp->sc_xc_state != NULL)
- (*ppp->sc_xcomp->comp_free)(ppp->sc_xc_state);
+#ifdef CONFIG_KERNELD
+ if (cp == NULL) {
+ char modname[32];
+ sprintf(modname, "ppp-compress-%d", ccp_option[0]);
+ request_module(modname);
+ cp = find_compressor(ccp_option[0]);
+ }
+#endif /* CONFIG_KERNELD */
- ppp->sc_xcomp = cp;
- ppp->sc_xc_state = cp->comp_alloc(ccp_option, nb);
+ if (cp == NULL)
+ goto out_no_comp;
+ /*
+ * Found a handler for the protocol - try to allocate
+ * a compressor or decompressor.
+ */
+ error = 0;
+ if (data.transmit) {
+ if (ppp->sc_xc_state != NULL)
+ (*ppp->sc_xcomp->comp_free)(ppp->sc_xc_state);
+ ppp->sc_xc_state = NULL;
- if (ppp->sc_xc_state == NULL) {
- if (ppp->flags & SC_DEBUG)
- printk(KERN_DEBUG "%s: comp_alloc failed\n",
- ppp->name);
- error = -ENOBUFS;
- } else {
- if (ppp->flags & SC_DEBUG)
- printk(KERN_DEBUG "%s: comp_alloc -> %p\n",
- ppp->name, ppp->sc_xc_state);
- }
- } else {
- if (ppp->sc_rc_state != NULL)
- (*ppp->sc_rcomp->decomp_free)(ppp->sc_rc_state);
- ppp->sc_rcomp = cp;
- ppp->sc_rc_state = cp->decomp_alloc(ccp_option, nb);
- if (ppp->sc_rc_state == NULL) {
- if (ppp->flags & SC_DEBUG)
- printk(KERN_DEBUG "%s: decomp_alloc failed\n",
- ppp->name);
- error = -ENOBUFS;
- } else {
- if (ppp->flags & SC_DEBUG)
- printk(KERN_DEBUG "%s: decomp_alloc -> %p\n",
- ppp->name, ppp->sc_rc_state);
- }
- }
- return (error);
+ ppp->sc_xcomp = cp;
+ ppp->sc_xc_state = cp->comp_alloc(ccp_option, nb);
+ if (ppp->sc_xc_state == NULL) {
+ printk(KERN_WARNING "%s: comp_alloc failed\n",
+ ppp->name);
+ error = -ENOBUFS;
+ } else if (ppp->flags & SC_DEBUG)
+ printk(KERN_DEBUG "%s: comp_alloc -> %p\n",
+ ppp->name, ppp->sc_xc_state);
+ } else {
+ if (ppp->sc_rc_state != NULL)
+ (*ppp->sc_rcomp->decomp_free)(ppp->sc_rc_state);
+ ppp->sc_rc_state = NULL;
+
+ ppp->sc_rcomp = cp;
+ ppp->sc_rc_state = cp->decomp_alloc(ccp_option, nb);
+ if (ppp->sc_rc_state == NULL) {
+ printk(KERN_WARNING "%s: decomp_alloc failed\n",
+ ppp->name);
+ error = -ENOBUFS;
+ } else if (ppp->flags & SC_DEBUG)
+ printk(KERN_DEBUG "%s: decomp_alloc -> %p\n",
+ ppp->name, ppp->sc_rc_state);
}
+ return error;
+out_no_comp:
if (ppp->flags & SC_DEBUG)
printk(KERN_DEBUG "%s: no compressor for [%x %x %x], %x\n",
ppp->name, ccp_option[0], ccp_option[1],
unsigned int param2, unsigned long param3)
{
struct ppp *ppp = tty2ppp (tty);
- register int temp_i = 0;
+ register int temp_i = 0, oldflags;
int error = 0;
+ unsigned long flags;
/*
* Verify the status of the PPP device.
*/
if (error != 0)
break;
temp_i &= SC_MASK;
- temp_i |= (ppp->flags & ~SC_MASK);
- if ((ppp->flags & SC_CCP_OPEN) &&
- (temp_i & SC_CCP_OPEN) == 0)
- ppp_ccp_closed (ppp);
+ if ((ppp->flags & SC_CCP_OPEN) && (temp_i & SC_CCP_OPEN) == 0)
+ ppp_ccp_closed(ppp);
- if ((ppp->flags | temp_i) & SC_DEBUG)
+ save_flags(flags);
+ cli();
+ oldflags = ppp->flags;
+ ppp->flags = temp_i |= (ppp->flags & ~SC_MASK);
+ restore_flags(flags);
+ if ((oldflags | temp_i) & SC_DEBUG)
printk (KERN_INFO
"ppp_tty_ioctl: set flags to %x\n", temp_i);
- ppp->flags = temp_i;
break;
/*
* Set the compression mode
case PPPIOCGIDLE:
{
struct ppp_idle cur_ddinfo;
- __u32 cur_jiffies = jiffies;
/* change absolute times to relative times. */
- cur_ddinfo.xmit_idle = (cur_jiffies - ppp->ddinfo.xmit_idle) / HZ;
- cur_ddinfo.recv_idle = (cur_jiffies - ppp->ddinfo.recv_idle) / HZ;
- COPY_TO_USER (error,
- (void *) param3,
- &cur_ddinfo,
+ cur_ddinfo.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
+ cur_ddinfo.recv_idle = (jiffies - ppp->last_recv) / HZ;
+ COPY_TO_USER (error, (void *) param3, &cur_ddinfo,
sizeof (cur_ddinfo));
}
break;
* Retrieve the extended async map
*/
case PPPIOCGXASYNCMAP:
- COPY_TO_USER (error,
- (void *) param3,
- ppp->xmit_async_map,
+ COPY_TO_USER (error, (void *) param3, ppp->xmit_async_map,
sizeof (ppp->xmit_async_map));
break;
/*
{
__u32 temp_tbl[8];
- COPY_FROM_USER (error,
- temp_tbl,
- (void *) param3,
+ COPY_FROM_USER (error, temp_tbl, (void *) param3,
sizeof (temp_tbl));
-
if (error != 0)
break;
+
temp_tbl[1] = 0x00000000;
temp_tbl[2] &= ~0x40000000;
temp_tbl[3] |= 0x60000000;
temp_i = (temp_i & 255) + 1;
if (ppp->flags & SC_DEBUG)
printk (KERN_INFO
- "ppp_tty_ioctl: set maxcid to %d\n",
- temp_i);
+ "ppp_tty_ioctl: set maxcid to %d\n", temp_i);
if (ppp->slcomp != NULL)
slhc_free (ppp->slcomp);
- ppp->slcomp = slhc_init (16, temp_i);
+ ppp->slcomp = NULL;
+ ppp->slcomp = slhc_init (16, temp_i);
if (ppp->slcomp == NULL) {
- if (ppp->flags & SC_DEBUG)
- printk (KERN_ERR
- "ppp: no space for compression buffers!\n");
+ printk (KERN_ERR "ppp_tty_ioctl: "
+ "no space for compression buffers!\n");
ppp_release (ppp);
error = -ENOMEM;
}
{
struct npioctl npi;
- COPY_FROM_USER (error,
- &npi,
- (void *) param3,
+ COPY_FROM_USER (error, &npi, (void *) param3,
sizeof (npi));
if (error != 0)
break;
default:
if (ppp->flags & SC_DEBUG)
printk(KERN_DEBUG "pppioc[gs]npmode: "
- "invalid proto %d\n", npi.protocol);
+ "invalid protocol %d\n",
+ npi.protocol);
error = -EINVAL;
}
if (param2 == PPPIOCGNPMODE) {
npi.mode = ppp->sc_npmode[npi.protocol];
- COPY_TO_USER (error,
- (void *) param3,
- &npi,
+ COPY_TO_USER (error, (void *) param3, &npi,
sizeof (npi));
break;
}
if (ppp->flags & SC_DEBUG)
printk(KERN_DEBUG "ppp: set np %d to %d\n",
npi.protocol, npi.mode);
+ ppp2dev(ppp)->tbusy = 0;
+ mark_bh(NET_BH);
}
break;
/*
*/
default:
if (ppp->flags & SC_DEBUG)
- printk (KERN_ERR
- "ppp_tty_ioctl: invalid ioctl: %x, addr %lx\n",
- param2,
- param3);
-
+ printk (KERN_WARNING "ppp_tty_ioctl: "
+ "invalid ioctl=%x, addr=%lx\n", param2, param3);
error = -ENOIOCTLCMD;
break;
}
if (ppp && ppp->magic == PPP_MAGIC && tty == ppp->tty) {
CHECK_PPP (0);
+#if LINUX_VERSION_CODE < VERSION(2,1,89)
poll_wait(&ppp->read_wait, wait);
poll_wait(&ppp->write_wait, wait);
+#else
+ poll_wait(filp, &ppp->read_wait, wait);
+ poll_wait(filp, &ppp->write_wait, wait);
+#endif
/* Must lock the user buffer area while checking. */
+ CHECK_BUF_MAGIC(ppp->ubuf);
if(test_and_set_bit(0, &ppp->ubuf->locked) == 0) {
if(ppp->ubuf->head != ppp->ubuf->tail)
mask |= POLLIN | POLLRDNORM;
{
struct ppp *ppp = dev2ppp (dev);
- /* reset POINTOPOINT every time, since dev_close zaps it! */
- dev->flags |= IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
-
if (ppp2tty (ppp) == NULL) {
- if (ppp->flags & SC_DEBUG)
- printk (KERN_ERR
+ printk (KERN_ERR
"ppp: %s not connected to a TTY! can't go open!\n",
dev->name);
return -ENXIO;
/*
* Move the version data
*/
- COPY_TO_USER (error,
- result,
- szVersion,
- len);
+ COPY_TO_USER (error, result, szVersion, len);
return error;
}
result = (struct ppp_stats *) ifr->ifr_ifru.ifru_data;
- COPY_TO_USER (error,
- result,
- &temp,
- sizeof (temp));
+ COPY_TO_USER (error, result, &temp, sizeof (temp));
return error;
}
*/
result = (struct ppp_comp_stats *) ifr->ifr_ifru.ifru_data;
- COPY_TO_USER (error,
- result,
- &temp,
- sizeof (temp));
+ COPY_TO_USER (error, result, &temp, sizeof (temp));
return error;
}
static int
ppp_dev_xmit_ip (struct ppp *ppp, struct ppp_buffer *buf,
- __u8 *data, int len)
+ __u8 *data, int len, enum NPmode npmode)
{
int proto = PPP_IP;
__u8 *hdr;
/*
* Branch on the type of processing for the IP frame.
*/
- switch (ppp->sc_npmode[NP_IP]) {
+ switch (npmode) {
case NPMODE_PASS:
break;
hdr[2] = 0;
hdr[3] = proto;
- return ppp_dev_xmit_frame (ppp, ppp->wbuf, hdr, len);
+ return ppp_dev_xmit_frame (ppp, buf, hdr, len);
}
/*
/*
* Send a frame to the remote.
*/
+#if LINUX_VERSION_CODE < VERSION(2,1,86)
+#define FREE_SKB(skb) dev_kfree_skb(skb)
+#else
+#define FREE_SKB(skb) dev_kfree_skb(skb, FREE_WRITE)
+#endif
static int
ppp_dev_xmit (sk_buff *skb, struct device *dev)
* Avoid timing problem should tty hangup while data is queued to be sent
*/
if (!ppp->inuse) {
- dev_kfree_skb (skb, FREE_WRITE);
+ FREE_SKB (skb);
return 0;
}
/*
printk (KERN_ERR
"ppp_dev_xmit: %s not connected to a TTY!\n",
dev->name);
- dev_kfree_skb (skb, FREE_WRITE);
+ FREE_SKB (skb);
return 0;
}
/*
if (ppp->flags & SC_DEBUG)
printk (KERN_CRIT "ppp_dev_xmit: %s Null skb data\n",
dev->name);
- dev_kfree_skb (skb, FREE_WRITE);
+ FREE_SKB (skb);
return 0;
}
/*
if (ppp->tbuf->locked || lock_buffer (ppp->wbuf) != 0) {
dev->tbusy = 1;
if (ppp->flags & SC_DEBUG)
- printk(KERN_DEBUG "dev_xmit blocked, t=%d w=%d\n",
+ printk(KERN_DEBUG "dev_xmit blocked, t=%lu w=%lu\n",
ppp->tbuf->locked, ppp->wbuf->locked);
return 1;
}
break;
case ETH_P_IP:
- answer = ppp_dev_xmit_ip (ppp, ppp->wbuf, data, len);
+ answer = ppp_dev_xmit_ip (ppp, ppp->wbuf, data, len,
+ ppp->sc_npmode[NP_IP]);
break;
default: /* All others have no support at this time. */
- dev_kfree_skb (skb, FREE_WRITE);
+ FREE_SKB (skb);
return 0;
}
/*
*/
if (answer == 0) {
/* packet queued OK */
- dev_kfree_skb (skb, FREE_WRITE);
- ppp->ddinfo.xmit_idle = jiffies;
+ FREE_SKB (skb);
} else {
ppp->wbuf->locked = 0;
if (answer < 0) {
/* packet should be dropped */
- dev_kfree_skb (skb, FREE_WRITE);
+ FREE_SKB (skb);
answer = 0;
} else {
/* packet should be queued for later */
* Generate the statistic information for the /proc/net/dev listing.
*/
-static struct enet_statistics *
+static struct net_device_stats *
ppp_dev_stats (struct device *dev)
{
struct ppp *ppp = dev2ppp (dev);
- ppp->estats.rx_packets = ppp->stats.ppp_ipackets;
- ppp->estats.rx_errors = ppp->stats.ppp_ierrors;
- ppp->estats.tx_packets = ppp->stats.ppp_opackets;
- ppp->estats.tx_errors = ppp->stats.ppp_oerrors;
+ ppp->estats.rx_packets = ppp->stats.ppp_ipackets;
+ ppp->estats.rx_errors = ppp->stats.ppp_ierrors;
+ ppp->estats.tx_packets = ppp->stats.ppp_opackets;
+ ppp->estats.tx_errors = ppp->stats.ppp_oerrors;
+#if LINUX_VERSION_CODE >= VERSION(2,1,25)
+ ppp->estats.rx_bytes = ppp->stats.ppp_ibytes;
+ ppp->estats.tx_bytes = ppp->stats.ppp_obytes;
+#endif
return &ppp->estats;
}
return ppp;
}
+/* Collect hung up channels */
+
+static void ppp_sync(void)
+{
+ struct device *dev;
+ struct ppp *ppp;
+
+#if LINUX_VERSION_CODE >= VERSION(2,1,68)
+ rtnl_lock();
+#endif
+ for (ppp = ppp_list; ppp != 0; ppp = ppp->next) {
+ if (!ppp->inuse) {
+ dev = ppp2dev(ppp);
+ if (dev->flags & IFF_UP)
+ dev_close(dev);
+ }
+ }
+#if LINUX_VERSION_CODE >= VERSION(2,1,68)
+ rtnl_unlock();
+#endif
+}
+
+
/* allocate or create a PPP channel */
static struct ppp *
ppp_alloc (void)
struct device *dev;
struct ppp *ppp;
+ ppp_sync();
+
/* try to find an free device */
if_num = 0;
for (ppp = ppp_list; ppp != 0; ppp = ppp->next) {
- if (!set_bit(0, &ppp->inuse))
+ if (!test_and_set_bit(0, &ppp->inuse)) {
+
+ /* Reregister device */
+
+ dev = ppp2dev(ppp);
+ unregister_netdev (dev);
+
+ if (register_netdev (dev)) {
+ printk(KERN_DEBUG "cannot reregister ppp device\n");
+ return NULL;
+ }
return ppp;
+ }
++if_num;
}
/*