patch-2.4.22 linux-2.4.22/net/sunrpc/xdr.c
Next file: linux-2.4.22/net/sunrpc/xprt.c
Previous file: linux-2.4.22/net/sunrpc/svcsock.c
Back to the patch index
Back to the overall index
- Lines: 277
- Date:
2003-08-25 04:44:44.000000000 -0700
- Orig file:
linux-2.4.21/net/sunrpc/xdr.c
- Orig date:
2003-06-13 07:51:39.000000000 -0700
diff -urN linux-2.4.21/net/sunrpc/xdr.c linux-2.4.22/net/sunrpc/xdr.c
@@ -180,7 +180,8 @@
{
struct iovec *iov = iov_base;
struct page **ppage = xdr->pages;
- unsigned int len, pglen = xdr->page_len, first_kmap;
+ struct page **first_kmap = NULL;
+ unsigned int len, pglen = xdr->page_len;
len = xdr->head[0].iov_len;
if (base < len) {
@@ -203,16 +204,15 @@
ppage += base >> PAGE_CACHE_SHIFT;
base &= ~PAGE_CACHE_MASK;
}
- first_kmap = 1;
do {
len = PAGE_CACHE_SIZE;
- if (first_kmap) {
- first_kmap = 0;
+ if (!first_kmap) {
+ first_kmap = ppage;
iov->iov_base = kmap(*ppage);
} else {
iov->iov_base = kmap_nonblock(*ppage);
if (!iov->iov_base)
- goto out;
+ goto out_err;
}
if (base) {
iov->iov_base += base;
@@ -231,8 +231,11 @@
iov->iov_base = (char *)xdr->tail[0].iov_base + base;
iov++;
}
- out:
return (iov - iov_base);
+out_err:
+ for (; first_kmap != ppage; first_kmap++)
+ kunmap(*first_kmap);
+ return 0;
}
void xdr_kunmap(struct xdr_buf *xdr, unsigned int base, int niov)
@@ -333,26 +336,219 @@
copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len);
}
-void
-xdr_shift_buf(struct xdr_buf *xdr, size_t len)
+/*
+ * Helper routines for doing 'memmove' like operations on a struct xdr_buf
+ *
+ * _shift_data_right_pages
+ * @pages: vector of pages containing both the source and dest memory area.
+ * @pgto_base: page vector address of destination
+ * @pgfrom_base: page vector address of source
+ * @len: number of bytes to copy
+ *
+ * Note: the addresses pgto_base and pgfrom_base are both calculated in
+ * the same way:
+ * if a memory area starts at byte 'base' in page 'pages[i]',
+ * then its address is given as (i << PAGE_CACHE_SHIFT) + base
+ * Also note: pgfrom_base must be < pgto_base, but the memory areas
+ * they point to may overlap.
+ */
+static void
+_shift_data_right_pages(struct page **pages, size_t pgto_base,
+ size_t pgfrom_base, size_t len)
{
- struct iovec iov[MAX_IOVEC];
- unsigned int nr, len_part, n, skip;
+ struct page **pgfrom, **pgto;
+ char *vfrom, *vto;
+ size_t copy;
+
+ BUG_ON(pgto_base <= pgfrom_base);
+
+ pgto_base += len;
+ pgfrom_base += len;
+
+ pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
+ pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
+
+ pgto_base &= ~PAGE_CACHE_MASK;
+ pgfrom_base &= ~PAGE_CACHE_MASK;
- skip = 0;
do {
+ /* Are any pointers crossing a page boundary? */
+ if (pgto_base == 0) {
+ pgto_base = PAGE_CACHE_SIZE;
+ pgto--;
+ }
+ if (pgfrom_base == 0) {
+ pgfrom_base = PAGE_CACHE_SIZE;
+ pgfrom--;
+ }
- nr = xdr_kmap(iov, xdr, skip);
+ copy = len;
+ if (copy > pgto_base)
+ copy = pgto_base;
+ if (copy > pgfrom_base)
+ copy = pgfrom_base;
+ pgto_base -= copy;
+ pgfrom_base -= copy;
+
+ vto = kmap_atomic(*pgto, KM_USER0);
+ vfrom = kmap_atomic(*pgfrom, KM_USER1);
+ memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
+ kunmap_atomic(vfrom, KM_USER1);
+ kunmap_atomic(vto, KM_USER0);
- len_part = 0;
- for (n = 0; n < nr; n++)
- len_part += iov[n].iov_len;
+ } while ((len -= copy) != 0);
+}
- xdr_shift_iovec(iov, nr, len_part);
+/*
+ * _copy_to_pages
+ * @pages: array of pages
+ * @pgbase: page vector address of destination
+ * @p: pointer to source data
+ * @len: length
+ *
+ * Copies data from an arbitrary memory location into an array of pages
+ * The copy is assumed to be non-overlapping.
+ */
+static void
+_copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
+{
+ struct page **pgto;
+ char *vto;
+ size_t copy;
+
+ pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
+ pgbase &= ~PAGE_CACHE_MASK;
+
+ do {
+ copy = PAGE_CACHE_SIZE - pgbase;
+ if (copy > len)
+ copy = len;
+
+ vto = kmap_atomic(*pgto, KM_USER0);
+ memcpy(vto + pgbase, p, copy);
+ kunmap_atomic(vto, KM_USER0);
+
+ pgbase += copy;
+ if (pgbase == PAGE_CACHE_SIZE) {
+ pgbase = 0;
+ pgto++;
+ }
+ p += copy;
+
+ } while ((len -= copy) != 0);
+}
- xdr_kunmap(xdr, skip, nr);
+/*
+ * _copy_from_pages
+ * @p: pointer to destination
+ * @pages: array of pages
+ * @pgbase: offset of source data
+ * @len: length
+ *
+ * Copies data into an arbitrary memory location from an array of pages
+ * The copy is assumed to be non-overlapping.
+ */
+static void
+_copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
+{
+ struct page **pgfrom;
+ char *vfrom;
+ size_t copy;
- skip += len_part;
- len -= len_part;
- } while (len);
+ pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
+ pgbase &= ~PAGE_CACHE_MASK;
+
+ do {
+ copy = PAGE_CACHE_SIZE - pgbase;
+ if (copy > len)
+ copy = len;
+
+ vfrom = kmap_atomic(*pgfrom, KM_USER0);
+ memcpy(p, vfrom + pgbase, copy);
+ kunmap_atomic(vfrom, KM_USER0);
+
+ pgbase += copy;
+ if (pgbase == PAGE_CACHE_SIZE) {
+ pgbase = 0;
+ pgfrom++;
+ }
+ p += copy;
+
+ } while ((len -= copy) != 0);
+}
+
+/*
+ * xdr_shrink_bufhead
+ * @buf: xdr_buf
+ * @len: bytes to remove from buf->head[0]
+ *
+ * Shrinks XDR buffer's header iovec buf->head[0] by
+ * 'len' bytes. The extra data is not lost, but is instead
+ * moved into the inlined pages and/or the tail.
+ */
+void
+xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
+{
+ struct iovec *head, *tail;
+ size_t copy, offs;
+ unsigned int pglen = buf->page_len;
+
+ tail = buf->tail;
+ head = buf->head;
+ BUG_ON (len > head->iov_len);
+
+ /* Shift the tail first */
+ if (tail->iov_len != 0) {
+ if (tail->iov_len > len) {
+ copy = tail->iov_len - len;
+ memmove((char *)tail->iov_base + len,
+ tail->iov_base, copy);
+ }
+ /* Copy from the inlined pages into the tail */
+ copy = len;
+ if (copy > pglen)
+ copy = pglen;
+ offs = len - copy;
+ if (offs >= tail->iov_len)
+ copy = 0;
+ else if (copy > tail->iov_len - offs)
+ copy = tail->iov_len - offs;
+ if (copy != 0)
+ _copy_from_pages((char *)tail->iov_base + offs,
+ buf->pages,
+ buf->page_base + pglen + offs - len,
+ copy);
+ /* Do we also need to copy data from the head into the tail ? */
+ if (len > pglen) {
+ offs = copy = len - pglen;
+ if (copy > tail->iov_len)
+ copy = tail->iov_len;
+ memcpy(tail->iov_base,
+ (char *)head->iov_base +
+ head->iov_len - offs,
+ copy);
+ }
+ }
+ /* Now handle pages */
+ if (pglen != 0) {
+ if (pglen > len)
+ _shift_data_right_pages(buf->pages,
+ buf->page_base + len,
+ buf->page_base,
+ pglen - len);
+ copy = len;
+ if (len > pglen)
+ copy = pglen;
+ _copy_to_pages(buf->pages, buf->page_base,
+ (char *)head->iov_base + head->iov_len - len,
+ copy);
+ }
+ head->iov_len -= len;
+ buf->len -= len;
+}
+
+void
+xdr_shift_buf(struct xdr_buf *buf, size_t len)
+{
+ xdr_shrink_bufhead(buf, len);
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)