2 * linux/net/iucv/af_iucv.c
4 * IUCV protocol stack for Linux on zSeries
6 * Copyright 2006 IBM Corporation
8 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/list.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/poll.h>
22 #include <asm/ebcdic.h>
23 #include <asm/cpcmd.h>
24 #include <linux/kmod.h>
26 #include <net/iucv/iucv.h>
27 #include <net/iucv/af_iucv.h>
29 #define CONFIG_IUCV_SOCK_DEBUG 1
34 static char iucv_userid[80];
36 static struct proto_ops iucv_sock_ops;
38 static struct proto iucv_proto = {
41 .obj_size = sizeof(struct iucv_sock),
44 /* Call Back functions */
45 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
46 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
47 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
48 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]);
49 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
51 static struct iucv_sock_list iucv_sk_list = {
52 .lock = RW_LOCK_UNLOCKED,
53 .autobind_name = ATOMIC_INIT(0)
56 static struct iucv_handler af_iucv_handler = {
57 .path_pending = iucv_callback_connreq,
58 .path_complete = iucv_callback_connack,
59 .path_severed = iucv_callback_connrej,
60 .message_pending = iucv_callback_rx,
61 .message_complete = iucv_callback_txdone
64 static inline void high_nmcpy(unsigned char *dst, char *src)
69 static inline void low_nmcpy(unsigned char *dst, char *src)
71 memcpy(&dst[8], src, 8);
75 static void iucv_sock_timeout(unsigned long arg)
77 struct sock *sk = (struct sock *)arg;
80 sk->sk_err = ETIMEDOUT;
81 sk->sk_state_change(sk);
88 static void iucv_sock_clear_timer(struct sock *sk)
90 sk_stop_timer(sk, &sk->sk_timer);
93 static void iucv_sock_init_timer(struct sock *sk)
95 init_timer(&sk->sk_timer);
96 sk->sk_timer.function = iucv_sock_timeout;
97 sk->sk_timer.data = (unsigned long)sk;
100 static struct sock *__iucv_get_sock_by_name(char *nm)
103 struct hlist_node *node;
105 sk_for_each(sk, node, &iucv_sk_list.head)
106 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
112 static void iucv_sock_destruct(struct sock *sk)
114 skb_queue_purge(&sk->sk_receive_queue);
115 skb_queue_purge(&sk->sk_write_queue);
119 static void iucv_sock_cleanup_listen(struct sock *parent)
123 /* Close non-accepted connections */
124 while ((sk = iucv_accept_dequeue(parent, NULL))) {
129 parent->sk_state = IUCV_CLOSED;
130 sock_set_flag(parent, SOCK_ZAPPED);
134 static void iucv_sock_kill(struct sock *sk)
136 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
139 iucv_sock_unlink(&iucv_sk_list, sk);
140 sock_set_flag(sk, SOCK_DEAD);
144 /* Close an IUCV socket */
145 static void iucv_sock_close(struct sock *sk)
147 unsigned char user_data[16];
148 struct iucv_sock *iucv = iucv_sk(sk);
152 iucv_sock_clear_timer(sk);
155 switch(sk->sk_state) {
157 iucv_sock_cleanup_listen(sk);
164 sk->sk_state = IUCV_CLOSING;
165 sk->sk_state_change(sk);
167 if(!skb_queue_empty(&iucv->send_skb_q)) {
168 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
169 timeo = sk->sk_lingertime;
171 timeo = IUCV_DISCONN_TIMEOUT;
172 err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
175 sk->sk_state = IUCV_CLOSED;
176 sk->sk_state_change(sk);
179 low_nmcpy(user_data, iucv->src_name);
180 high_nmcpy(user_data, iucv->dst_name);
181 ASCEBC(user_data, sizeof(user_data));
182 err = iucv_path_sever(iucv->path, user_data);
183 iucv_path_free(iucv->path);
187 sk->sk_err = ECONNRESET;
188 sk->sk_state_change(sk);
190 skb_queue_purge(&iucv->send_skb_q);
191 skb_queue_purge(&iucv->backlog_skb_q);
193 sock_set_flag(sk, SOCK_ZAPPED);
197 sock_set_flag(sk, SOCK_ZAPPED);
205 static void iucv_sock_init(struct sock *sk, struct sock *parent)
208 sk->sk_type = parent->sk_type;
211 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
215 sk = sk_alloc(PF_IUCV, prio, &iucv_proto, 1);
219 sock_init_data(sock, sk);
220 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
221 skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
222 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
223 iucv_sk(sk)->send_tag = 0;
225 sk->sk_destruct = iucv_sock_destruct;
226 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
227 sk->sk_allocation = GFP_DMA;
229 sock_reset_flag(sk, SOCK_ZAPPED);
231 sk->sk_protocol = proto;
232 sk->sk_state = IUCV_OPEN;
234 iucv_sock_init_timer(sk);
236 iucv_sock_link(&iucv_sk_list, sk);
240 /* Create an IUCV socket */
241 static int iucv_sock_create(struct socket *sock, int protocol)
245 if (sock->type != SOCK_STREAM)
246 return -ESOCKTNOSUPPORT;
248 sock->state = SS_UNCONNECTED;
249 sock->ops = &iucv_sock_ops;
251 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
255 iucv_sock_init(sk, NULL);
260 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
262 write_lock_bh(&l->lock);
263 sk_add_node(sk, &l->head);
264 write_unlock_bh(&l->lock);
267 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
269 write_lock_bh(&l->lock);
270 sk_del_node_init(sk);
271 write_unlock_bh(&l->lock);
274 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
277 list_add_tail(&iucv_sk(sk)->accept_q, &iucv_sk(parent)->accept_q);
278 iucv_sk(sk)->parent = parent;
279 parent->sk_ack_backlog++;
282 void iucv_accept_unlink(struct sock *sk)
284 list_del_init(&iucv_sk(sk)->accept_q);
285 iucv_sk(sk)->parent->sk_ack_backlog--;
286 iucv_sk(sk)->parent = NULL;
290 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
292 struct iucv_sock *isk, *n;
295 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){
296 sk = (struct sock *) isk;
299 if (sk->sk_state == IUCV_CLOSED) {
301 iucv_accept_unlink(sk);
305 if (sk->sk_state == IUCV_CONNECTED ||
306 sk->sk_state == IUCV_SEVERED ||
308 iucv_accept_unlink(sk);
310 sock_graft(sk, newsock);
312 if (sk->sk_state == IUCV_SEVERED)
313 sk->sk_state = IUCV_DISCONN;
324 int iucv_sock_wait_state(struct sock *sk, int state, int state2,
327 DECLARE_WAITQUEUE(wait, current);
330 add_wait_queue(sk->sk_sleep, &wait);
331 while (sk->sk_state != state && sk->sk_state != state2) {
332 set_current_state(TASK_INTERRUPTIBLE);
339 if (signal_pending(current)) {
340 err = sock_intr_errno(timeo);
345 timeo = schedule_timeout(timeo);
348 err = sock_error(sk);
352 set_current_state(TASK_RUNNING);
353 remove_wait_queue(sk->sk_sleep, &wait);
357 /* Bind an unbound socket */
358 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
361 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
362 struct sock *sk = sock->sk;
363 struct iucv_sock *iucv;
366 /* Verify the input sockaddr */
367 if (!addr || addr->sa_family != AF_IUCV)
371 if (sk->sk_state != IUCV_OPEN) {
376 write_lock_bh(&iucv_sk_list.lock);
379 if (__iucv_get_sock_by_name(sa->siucv_name)) {
388 /* Bind the socket */
389 memcpy(iucv->src_name, sa->siucv_name, 8);
391 /* Copy the user id */
392 memcpy(iucv->src_user_id, iucv_userid, 8);
393 sk->sk_state = IUCV_BOUND;
397 /* Release the socket list lock */
398 write_unlock_bh(&iucv_sk_list.lock);
404 /* Automatically bind an unbound socket */
405 static int iucv_sock_autobind(struct sock *sk)
407 struct iucv_sock *iucv = iucv_sk(sk);
408 char query_buffer[80];
412 /* Set the userid and name */
413 cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
417 memcpy(iucv->src_user_id, query_buffer, 8);
419 write_lock_bh(&iucv_sk_list.lock);
421 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
422 while (__iucv_get_sock_by_name(name)) {
423 sprintf(name, "%08x",
424 atomic_inc_return(&iucv_sk_list.autobind_name));
427 write_unlock_bh(&iucv_sk_list.lock);
429 memcpy(&iucv->src_name, name, 8);
434 /* Connect an unconnected socket */
435 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
438 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
439 struct sock *sk = sock->sk;
440 struct iucv_sock *iucv;
441 unsigned char user_data[16];
444 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
447 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
450 if (sk->sk_type != SOCK_STREAM)
455 if (sk->sk_state == IUCV_OPEN) {
456 err = iucv_sock_autobind(sk);
463 /* Set the destination information */
464 memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
465 memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
467 high_nmcpy(user_data, sa->siucv_name);
468 low_nmcpy(user_data, iucv_sk(sk)->src_name);
469 ASCEBC(user_data, sizeof(user_data));
473 iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT,
474 IPRMDATA, GFP_KERNEL);
475 err = iucv_path_connect(iucv->path, &af_iucv_handler,
476 sa->siucv_user_id, NULL, user_data, sk);
478 iucv_path_free(iucv->path);
484 if (sk->sk_state != IUCV_CONNECTED) {
485 err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
486 sock_sndtimeo(sk, flags & O_NONBLOCK));
489 if (sk->sk_state == IUCV_DISCONN) {
491 return -ECONNREFUSED;
498 /* Move a socket into listening state. */
499 static int iucv_sock_listen(struct socket *sock, int backlog)
501 struct sock *sk = sock->sk;
507 if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM)
510 sk->sk_max_ack_backlog = backlog;
511 sk->sk_ack_backlog = 0;
512 sk->sk_state = IUCV_LISTEN;
520 /* Accept a pending connection */
521 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
524 DECLARE_WAITQUEUE(wait, current);
525 struct sock *sk = sock->sk, *nsk;
529 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
531 if (sk->sk_state != IUCV_LISTEN) {
536 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
538 /* Wait for an incoming connection */
539 add_wait_queue_exclusive(sk->sk_sleep, &wait);
540 while (!(nsk = iucv_accept_dequeue(sk, newsock))){
541 set_current_state(TASK_INTERRUPTIBLE);
548 timeo = schedule_timeout(timeo);
549 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
551 if (sk->sk_state != IUCV_LISTEN) {
556 if (signal_pending(current)) {
557 err = sock_intr_errno(timeo);
562 set_current_state(TASK_RUNNING);
563 remove_wait_queue(sk->sk_sleep, &wait);
568 newsock->state = SS_CONNECTED;
575 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
578 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
579 struct sock *sk = sock->sk;
581 addr->sa_family = AF_IUCV;
582 *len = sizeof(struct sockaddr_iucv);
585 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
586 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
588 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
589 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
591 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
592 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
593 memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
598 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
599 struct msghdr *msg, size_t len)
601 struct sock *sk = sock->sk;
602 struct iucv_sock *iucv = iucv_sk(sk);
604 struct iucv_message txmsg;
607 err = sock_error(sk);
611 if (msg->msg_flags & MSG_OOB)
616 if (sk->sk_shutdown & SEND_SHUTDOWN) {
621 if (sk->sk_state == IUCV_CONNECTED){
622 if(!(skb = sock_alloc_send_skb(sk, len,
623 msg->msg_flags & MSG_DONTWAIT,
627 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)){
633 txmsg.tag = iucv->send_tag++;
634 memcpy(skb->cb, &txmsg.tag, 4);
635 skb_queue_tail(&iucv->send_skb_q, skb);
636 err = iucv_message_send(iucv->path, &txmsg, 0, 0,
637 (void *) skb->data, skb->len);
640 printk(KERN_ERR "AF_IUCV msg limit exceeded\n");
641 skb_unlink(skb, &iucv->send_skb_q);
661 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
662 struct msghdr *msg, size_t len, int flags)
664 int noblock = flags & MSG_DONTWAIT;
665 struct sock *sk = sock->sk;
666 struct iucv_sock *iucv = iucv_sk(sk);
667 int target, copied = 0;
668 struct sk_buff *skb, *rskb, *cskb;
671 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
672 skb_queue_empty(&iucv->backlog_skb_q) &&
673 skb_queue_empty(&sk->sk_receive_queue))
676 if (flags & (MSG_OOB))
679 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
681 skb = skb_recv_datagram(sk, flags, noblock, &err);
683 if (sk->sk_shutdown & RCV_SHUTDOWN)
688 copied = min_t(unsigned int, skb->len, len);
691 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
692 skb_queue_head(&sk->sk_receive_queue, skb);
700 /* Mark read part of skb as used */
701 if (!(flags & MSG_PEEK)) {
702 skb_pull(skb, copied);
705 skb_queue_head(&sk->sk_receive_queue, skb);
711 /* Queue backlog skbs */
712 rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
714 if (sock_queue_rcv_skb(sk, rskb)) {
715 skb_queue_head(&iucv_sk(sk)->backlog_skb_q,
719 rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
723 skb_queue_head(&sk->sk_receive_queue, skb);
726 return err ? : copied;
729 static inline unsigned int iucv_accept_poll(struct sock *parent)
731 struct iucv_sock *isk, *n;
734 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){
735 sk = (struct sock *) isk;
737 if (sk->sk_state == IUCV_CONNECTED)
738 return POLLIN | POLLRDNORM;
744 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
747 struct sock *sk = sock->sk;
748 unsigned int mask = 0;
750 poll_wait(file, sk->sk_sleep, wait);
752 if (sk->sk_state == IUCV_LISTEN)
753 return iucv_accept_poll(sk);
755 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
758 if (sk->sk_shutdown & RCV_SHUTDOWN)
761 if (sk->sk_shutdown == SHUTDOWN_MASK)
764 if (!skb_queue_empty(&sk->sk_receive_queue) ||
765 (sk->sk_shutdown & RCV_SHUTDOWN))
766 mask |= POLLIN | POLLRDNORM;
768 if (sk->sk_state == IUCV_CLOSED)
771 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
774 if (sock_writeable(sk))
775 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
777 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
782 static int iucv_sock_shutdown(struct socket *sock, int how)
784 struct sock *sk = sock->sk;
785 struct iucv_sock *iucv = iucv_sk(sk);
786 struct iucv_message txmsg;
788 u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
792 if ((how & ~SHUTDOWN_MASK) || !how)
796 switch(sk->sk_state) {
802 sk->sk_shutdown |= how;
806 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
809 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
826 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
827 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
831 skb_queue_purge(&sk->sk_receive_queue);
834 /* Wake up anyone sleeping in poll */
835 sk->sk_state_change(sk);
842 static int iucv_sock_release(struct socket *sock)
844 struct sock *sk = sock->sk;
852 /* Unregister with IUCV base support */
853 if (iucv_sk(sk)->path) {
854 iucv_path_sever(iucv_sk(sk)->path, NULL);
855 iucv_path_free(iucv_sk(sk)->path);
856 iucv_sk(sk)->path = NULL;
864 /* Callback wrappers - called from iucv base support */
865 static int iucv_callback_connreq(struct iucv_path *path,
866 u8 ipvmid[8], u8 ipuser[16])
868 unsigned char user_data[16];
869 unsigned char nuser_data[16];
870 unsigned char src_name[8];
871 struct hlist_node *node;
872 struct sock *sk, *nsk;
873 struct iucv_sock *iucv, *niucv;
876 memcpy(src_name, ipuser, 8);
878 /* Find out if this path belongs to af_iucv. */
879 read_lock(&iucv_sk_list.lock);
881 sk_for_each(sk, node, &iucv_sk_list.head)
882 if (sk->sk_state == IUCV_LISTEN &&
883 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
885 * Found a listening socket with
886 * src_name == ipuser[0-7].
891 read_unlock(&iucv_sk_list.lock);
893 /* No socket found, not one of our paths. */
898 /* Check if parent socket is listening */
899 low_nmcpy(user_data, iucv->src_name);
900 high_nmcpy(user_data, iucv->dst_name);
901 ASCEBC(user_data, sizeof(user_data));
902 if (sk->sk_state != IUCV_LISTEN) {
903 err = iucv_path_sever(path, user_data);
907 /* Check for backlog size */
908 if (sk_acceptq_is_full(sk)) {
909 err = iucv_path_sever(path, user_data);
913 /* Create the new socket */
914 nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
916 err = iucv_path_sever(path, user_data);
920 niucv = iucv_sk(nsk);
921 iucv_sock_init(nsk, sk);
923 /* Set the new iucv_sock */
924 memcpy(niucv->dst_name, ipuser + 8, 8);
925 EBCASC(niucv->dst_name, 8);
926 memcpy(niucv->dst_user_id, ipvmid, 8);
927 memcpy(niucv->src_name, iucv->src_name, 8);
928 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
931 /* Call iucv_accept */
932 high_nmcpy(nuser_data, ipuser + 8);
933 memcpy(nuser_data + 8, niucv->src_name, 8);
934 ASCEBC(nuser_data + 8, 8);
936 path->msglim = IUCV_QUEUELEN_DEFAULT;
937 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
939 err = iucv_path_sever(path, user_data);
943 iucv_accept_enqueue(sk, nsk);
946 nsk->sk_state = IUCV_CONNECTED;
947 sk->sk_data_ready(sk, 1);
954 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
956 struct sock *sk = path->private;
958 sk->sk_state = IUCV_CONNECTED;
959 sk->sk_state_change(sk);
962 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len,
963 struct sk_buff_head fragmented_skb_q)
965 int dataleft, size, copied = 0;
966 struct sk_buff *nskb;
970 if (dataleft >= sk->sk_rcvbuf / 4)
971 size = sk->sk_rcvbuf / 4;
975 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
979 memcpy(nskb->data, skb->data + copied, size);
983 nskb->h.raw = nskb->data;
984 nskb->nh.raw = nskb->data;
987 skb_queue_tail(fragmented_skb_q, nskb);
992 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
994 struct sock *sk = path->private;
995 struct iucv_sock *iucv = iucv_sk(sk);
996 struct sk_buff *skb, *fskb;
997 struct sk_buff_head fragmented_skb_q;
1000 skb_queue_head_init(&fragmented_skb_q);
1002 if (sk->sk_shutdown & RCV_SHUTDOWN)
1005 skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA);
1007 iucv_path_sever(path, NULL);
1011 if (msg->flags & IPRMDATA) {
1015 rc = iucv_message_receive(path, msg, 0, skb->data,
1021 if (skb->truesize >= sk->sk_rcvbuf / 4) {
1022 rc = iucv_fragment_skb(sk, skb, msg->length,
1027 iucv_path_sever(path, NULL);
1031 skb_reset_transport_header(skb);
1032 skb_reset_network_header(skb);
1033 skb->len = msg->length;
1036 /* Queue the fragmented skb */
1037 fskb = skb_dequeue(&fragmented_skb_q);
1039 if (!skb_queue_empty(&iucv->backlog_skb_q))
1040 skb_queue_tail(&iucv->backlog_skb_q, fskb);
1041 else if (sock_queue_rcv_skb(sk, fskb))
1042 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, fskb);
1043 fskb = skb_dequeue(&fragmented_skb_q);
1046 /* Queue the original skb if it exists (was not fragmented) */
1048 if (!skb_queue_empty(&iucv->backlog_skb_q))
1049 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
1050 else if (sock_queue_rcv_skb(sk, skb))
1051 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
1056 static void iucv_callback_txdone(struct iucv_path *path,
1057 struct iucv_message *msg)
1059 struct sock *sk = path->private;
1060 struct sk_buff *this;
1061 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1062 struct sk_buff *list_skb = list->next;
1063 unsigned long flags;
1066 spin_lock_irqsave(&list->lock, flags);
1070 list_skb = list_skb->next;
1071 } while (memcmp(&msg->tag, this->cb, 4) && list_skb);
1073 spin_unlock_irqrestore(&list->lock, flags);
1075 skb_unlink(this, &iucv_sk(sk)->send_skb_q);
1079 if (sk->sk_state == IUCV_CLOSING){
1080 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1081 sk->sk_state = IUCV_CLOSED;
1082 sk->sk_state_change(sk);
1088 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1090 struct sock *sk = path->private;
1092 if (!list_empty(&iucv_sk(sk)->accept_q))
1093 sk->sk_state = IUCV_SEVERED;
1095 sk->sk_state = IUCV_DISCONN;
1097 sk->sk_state_change(sk);
1100 static struct proto_ops iucv_sock_ops = {
1102 .owner = THIS_MODULE,
1103 .release = iucv_sock_release,
1104 .bind = iucv_sock_bind,
1105 .connect = iucv_sock_connect,
1106 .listen = iucv_sock_listen,
1107 .accept = iucv_sock_accept,
1108 .getname = iucv_sock_getname,
1109 .sendmsg = iucv_sock_sendmsg,
1110 .recvmsg = iucv_sock_recvmsg,
1111 .poll = iucv_sock_poll,
1112 .ioctl = sock_no_ioctl,
1113 .mmap = sock_no_mmap,
1114 .socketpair = sock_no_socketpair,
1115 .shutdown = iucv_sock_shutdown,
1116 .setsockopt = sock_no_setsockopt,
1117 .getsockopt = sock_no_getsockopt
1120 static struct net_proto_family iucv_sock_family_ops = {
1122 .owner = THIS_MODULE,
1123 .create = iucv_sock_create,
1126 static int afiucv_init(void)
1130 if (!MACHINE_IS_VM) {
1131 printk(KERN_ERR "AF_IUCV connection needs VM as base\n");
1132 err = -EPROTONOSUPPORT;
1135 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1136 if (unlikely(err)) {
1137 printk(KERN_ERR "AF_IUCV needs the VM userid\n");
1138 err = -EPROTONOSUPPORT;
1142 err = iucv_register(&af_iucv_handler, 0);
1145 err = proto_register(&iucv_proto, 0);
1148 err = sock_register(&iucv_sock_family_ops);
1151 printk(KERN_INFO "AF_IUCV lowlevel driver initialized\n");
1155 proto_unregister(&iucv_proto);
1157 iucv_unregister(&af_iucv_handler, 0);
1162 static void __exit afiucv_exit(void)
1164 sock_unregister(PF_IUCV);
1165 proto_unregister(&iucv_proto);
1166 iucv_unregister(&af_iucv_handler, 0);
1168 printk(KERN_INFO "AF_IUCV lowlevel driver unloaded\n");
1171 module_init(afiucv_init);
1172 module_exit(afiucv_exit);
1174 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1175 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1176 MODULE_VERSION(VERSION);
1177 MODULE_LICENSE("GPL");
1178 MODULE_ALIAS_NETPROTO(PF_IUCV);