+ return -1;
+ }
+
+ freeaddrinfo(result);
+
+ memset(&hints, 0, sizeof(hints));
+
+ if (s->ipv6) {
+ hints.ai_family = AF_INET6;
+ } else {
+ hints.ai_family = AF_INET;
+ }
+ if (s->udp) {
+ hints.ai_socktype = SOCK_DGRAM;
+ hints.ai_protocol = 0;
+ } else {
+ hints.ai_socktype = SOCK_RAW;
+ hints.ai_protocol = IPPROTO_L2TP;
Hang on, this is bogus. This is a *userspace* L2TP implementation!
We don't want a kernel L2TP driver to handle this socket. Luckily this
never happens anyway since net/l2tp/l2tp_ip.c only registers its socket
type for <AF_INET, SOCK_DGRAM, IPPROTO_L2TP> and <AF_INET6, SOCK_DGRAM,
IPPROTO_L2TP>.
When we create this socket with <AF_INET, SOCK_RAW, IPPROTO_L2TP> what
really happens is that the kernel falls back to the IPv4 raw socket
driver due to a wildcard match.
In other words, we shouldn't use IPPROTO_L2TP. Just use 0.
This is not passed to socket directly - both setups share a call to
getaddinfo() after that and use whatever it returns.
If you pass family, RAW, 0 to getaddrinfo it returns family, DGRAM, 0.
So when you use it later on the socket is setup incorrectly.
If you pass RAW, PROTO_L2TPV3 it returns the correct values to setup the
socket. Bug for bug canceling each other out :(
Otherwise, as far as the kernel is concerned you are absolutely correct
- I looked at the code, this is passed to userspace if it hits the
wildcard.
I have added some comments to address this at the PROTO_L2TPV3
definition so that it is not fixed by mistake later on.
I have addressed all other comments and added some examples to the .hx
(tested vs openwrt barrier breaker/3.8.13).
I am going to retest the final version and if it passes all tests
resubmit sometimes tomorrow.
A.