clash/transport/tuic/client.go

563 lines
12 KiB
Go
Raw Normal View History

2022-11-25 00:08:14 +00:00
package tuic
import (
"bufio"
"bytes"
"context"
"crypto/tls"
"errors"
"fmt"
2022-11-25 00:08:14 +00:00
"math/rand"
"net"
"net/netip"
2022-11-26 15:53:59 +00:00
"runtime"
2022-11-25 00:08:14 +00:00
"sync"
2022-11-25 10:32:30 +00:00
"sync/atomic"
2022-11-25 00:08:14 +00:00
"time"
"github.com/metacubex/quic-go"
2022-11-25 00:08:14 +00:00
N "github.com/Dreamacro/clash/common/net"
"github.com/Dreamacro/clash/common/pool"
2022-11-26 15:53:59 +00:00
"github.com/Dreamacro/clash/component/dialer"
2022-11-25 00:08:14 +00:00
C "github.com/Dreamacro/clash/constant"
"github.com/Dreamacro/clash/transport/tuic/congestion"
2022-11-25 00:08:14 +00:00
)
2022-11-25 10:32:30 +00:00
var (
ClientClosed = errors.New("tuic: client closed")
TooManyOpenStreams = errors.New("tuic: too many open streams")
)
2022-11-26 15:53:59 +00:00
type ClientOption struct {
DialFn func(ctx context.Context, opts ...dialer.Option) (pc net.PacketConn, addr net.Addr, err error)
TlsConfig *tls.Config
QuicConfig *quic.Config
Host string
Token [32]byte
UdpRelayMode string
CongestionController string
ReduceRtt bool
RequestTimeout int
MaxUdpRelayPacketSize int
2022-11-26 13:14:56 +00:00
FastOpen bool
MaxOpenStreams int64
2022-11-26 15:53:59 +00:00
}
2022-11-26 15:53:59 +00:00
type Client struct {
*ClientOption
udp bool
2022-11-25 00:08:14 +00:00
quicConn quic.Connection
connMutex sync.Mutex
openStreams atomic.Int64
2022-11-25 10:32:30 +00:00
2022-11-25 00:08:14 +00:00
udpInputMap sync.Map
2022-11-26 15:53:59 +00:00
// only ready for PoolClient
poolRef *PoolClient
optionRef any
lastVisited time.Time
2022-11-25 00:08:14 +00:00
}
2022-11-26 15:53:59 +00:00
func (t *Client) getQuicConn(ctx context.Context) (quic.Connection, error) {
2022-11-25 00:08:14 +00:00
t.connMutex.Lock()
defer t.connMutex.Unlock()
if t.quicConn != nil {
return t.quicConn, nil
}
2022-11-26 15:53:59 +00:00
pc, addr, err := t.DialFn(ctx)
2022-11-25 00:08:14 +00:00
if err != nil {
return nil, err
}
var quicConn quic.Connection
if t.ReduceRtt {
quicConn, err = quic.DialEarlyContext(ctx, pc, addr, t.Host, t.TlsConfig, t.QuicConfig)
} else {
quicConn, err = quic.DialContext(ctx, pc, addr, t.Host, t.TlsConfig, t.QuicConfig)
}
if err != nil {
return nil, err
}
switch t.CongestionController {
case "cubic":
quicConn.SetCongestionControl(
congestion.NewCubicSender(
congestion.DefaultClock{},
congestion.GetMaxPacketSize(quicConn.RemoteAddr()),
false,
nil,
),
)
case "new_reno":
quicConn.SetCongestionControl(
congestion.NewCubicSender(
congestion.DefaultClock{},
congestion.GetMaxPacketSize(quicConn.RemoteAddr()),
true,
nil,
),
)
case "bbr":
quicConn.SetCongestionControl(
congestion.NewBBRSender(
congestion.DefaultClock{},
congestion.GetMaxPacketSize(quicConn.RemoteAddr()),
congestion.InitialCongestionWindow,
congestion.DefaultBBRMaxCongestionWindow,
),
)
}
2022-11-25 00:08:14 +00:00
sendAuthentication := func(quicConn quic.Connection) (err error) {
defer func() {
t.deferQuicConn(quicConn, err)
}()
stream, err := quicConn.OpenUniStream()
if err != nil {
return err
}
buf := pool.GetBuffer()
defer pool.PutBuffer(buf)
2022-11-25 00:08:14 +00:00
err = NewAuthenticate(t.Token).WriteTo(buf)
if err != nil {
return err
}
_, err = buf.WriteTo(stream)
if err != nil {
return err
}
err = stream.Close()
if err != nil {
return
}
return nil
}
2022-11-25 09:15:45 +00:00
parseUDP := func(quicConn quic.Connection) (err error) {
2022-11-25 00:08:14 +00:00
defer func() {
t.deferQuicConn(quicConn, err)
}()
switch t.UdpRelayMode {
case "quic":
for {
var stream quic.ReceiveStream
stream, err = quicConn.AcceptUniStream(context.Background())
if err != nil {
return err
}
go func() (err error) {
var assocId uint32
defer func() {
t.deferQuicConn(quicConn, err)
if err != nil && assocId != 0 {
if val, ok := t.udpInputMap.LoadAndDelete(assocId); ok {
if conn, ok := val.(net.Conn); ok {
_ = conn.Close()
}
}
}
2022-11-25 10:32:30 +00:00
stream.CancelRead(0)
2022-11-25 00:08:14 +00:00
}()
reader := bufio.NewReader(stream)
packet, err := ReadPacket(reader)
if err != nil {
return
}
assocId = packet.ASSOC_ID
if val, ok := t.udpInputMap.Load(assocId); ok {
if conn, ok := val.(net.Conn); ok {
writer := bufio.NewWriterSize(conn, packet.BytesLen())
_ = packet.WriteTo(writer)
_ = writer.Flush()
}
}
return
}()
}
default: // native
for {
var message []byte
message, err = quicConn.ReceiveMessage()
if err != nil {
return err
}
go func() (err error) {
var assocId uint32
defer func() {
t.deferQuicConn(quicConn, err)
if err != nil && assocId != 0 {
if val, ok := t.udpInputMap.LoadAndDelete(assocId); ok {
if conn, ok := val.(net.Conn); ok {
_ = conn.Close()
}
}
}
}()
buffer := bytes.NewBuffer(message)
packet, err := ReadPacket(buffer)
if err != nil {
return
}
assocId = packet.ASSOC_ID
if val, ok := t.udpInputMap.Load(assocId); ok {
if conn, ok := val.(net.Conn); ok {
_, _ = conn.Write(message)
}
}
return
}()
}
}
2022-11-25 09:15:45 +00:00
}
go sendAuthentication(quicConn)
2022-11-26 15:53:59 +00:00
if t.udp {
2022-11-25 09:15:45 +00:00
go parseUDP(quicConn)
}
2022-11-25 00:08:14 +00:00
t.quicConn = quicConn
return quicConn, nil
}
func (t *Client) deferQuicConn(quicConn quic.Connection, err error) {
var netError net.Error
if err != nil && errors.As(err, &netError) {
t.connMutex.Lock()
defer t.connMutex.Unlock()
if t.quicConn == quicConn {
2022-11-25 03:32:05 +00:00
t.Close(err)
2022-11-25 00:08:14 +00:00
}
}
}
2022-11-25 03:32:05 +00:00
func (t *Client) Close(err error) {
quicConn := t.quicConn
if quicConn != nil {
2022-11-25 08:06:56 +00:00
_ = quicConn.CloseWithError(ProtocolError, err.Error())
2022-11-25 03:32:05 +00:00
t.udpInputMap.Range(func(key, value any) bool {
if conn, ok := value.(net.Conn); ok {
_ = conn.Close()
}
2022-11-25 11:14:09 +00:00
t.udpInputMap.Delete(key)
2022-11-25 03:32:05 +00:00
return true
})
t.quicConn = nil
}
}
2022-11-26 15:53:59 +00:00
func (t *Client) DialContext(ctx context.Context, metadata *C.Metadata) (net.Conn, error) {
quicConn, err := t.getQuicConn(ctx)
2022-11-25 00:08:14 +00:00
if err != nil {
return nil, err
}
2022-11-26 15:53:59 +00:00
openStreams := t.openStreams.Add(1)
if openStreams >= t.MaxOpenStreams {
2022-11-26 15:53:59 +00:00
t.openStreams.Add(-1)
2022-11-25 10:32:30 +00:00
return nil, TooManyOpenStreams
}
2022-11-25 09:15:45 +00:00
stream, err := func() (stream *quicStreamConn, err error) {
defer func() {
t.deferQuicConn(quicConn, err)
}()
buf := pool.GetBuffer()
defer pool.PutBuffer(buf)
err = NewConnect(NewAddress(metadata)).WriteTo(buf)
if err != nil {
return nil, err
}
2022-11-25 09:15:45 +00:00
quicStream, err := quicConn.OpenStream()
if err != nil {
return nil, err
}
2022-11-25 11:14:09 +00:00
stream = &quicStreamConn{
Stream: quicStream,
lAddr: quicConn.LocalAddr(),
rAddr: quicConn.RemoteAddr(),
client: t,
}
_, err = buf.WriteTo(stream)
if err != nil {
_ = stream.Close()
return nil, err
}
return stream, err
2022-11-25 00:08:14 +00:00
}()
2022-11-25 04:10:33 +00:00
if err != nil {
return nil, err
}
2022-11-27 08:38:41 +00:00
conn := &earlyConn{BufferedConn: N.NewBufferedConn(stream), RequestTimeout: t.RequestTimeout}
2022-11-26 13:14:56 +00:00
if !t.FastOpen {
err = conn.Response()
if err != nil {
return nil, err
}
}
return conn, nil
}
type earlyConn struct {
*N.BufferedConn
resOnce sync.Once
resErr error
2022-11-27 08:38:41 +00:00
RequestTimeout int
2022-11-26 13:14:56 +00:00
}
func (conn *earlyConn) response() error {
2022-11-27 08:38:41 +00:00
if conn.RequestTimeout > 0 {
_ = conn.SetReadDeadline(time.Now().Add(time.Duration(conn.RequestTimeout) * time.Millisecond))
}
2022-11-25 00:08:14 +00:00
response, err := ReadResponse(conn)
if err != nil {
2022-11-25 08:06:56 +00:00
_ = conn.Close()
2022-11-26 13:14:56 +00:00
return err
2022-11-25 00:08:14 +00:00
}
if response.IsFailed() {
2022-11-25 08:06:56 +00:00
_ = conn.Close()
2022-11-26 13:14:56 +00:00
return errors.New("connect failed")
2022-11-25 00:08:14 +00:00
}
2022-11-26 13:14:56 +00:00
_ = conn.SetReadDeadline(time.Time{})
return nil
}
func (conn *earlyConn) Response() error {
conn.resOnce.Do(func() {
conn.resErr = conn.response()
})
return conn.resErr
}
func (conn *earlyConn) Read(b []byte) (n int, err error) {
err = conn.Response()
if err != nil {
return 0, err
}
return conn.BufferedConn.Read(b)
2022-11-25 00:08:14 +00:00
}
type quicStreamConn struct {
quic.Stream
2022-11-25 11:14:09 +00:00
lock sync.Mutex
2022-11-25 03:32:05 +00:00
lAddr net.Addr
rAddr net.Addr
client *Client
2022-11-25 12:14:05 +00:00
closeOnce sync.Once
closeErr error
2022-11-25 00:08:14 +00:00
}
2022-11-25 11:14:09 +00:00
func (q *quicStreamConn) Write(p []byte) (n int, err error) {
q.lock.Lock()
defer q.lock.Unlock()
return q.Stream.Write(p)
}
2022-11-25 08:06:56 +00:00
func (q *quicStreamConn) Close() error {
2022-11-25 12:14:05 +00:00
q.closeOnce.Do(func() {
q.closeErr = q.close()
})
return q.closeErr
}
func (q *quicStreamConn) close() error {
2022-11-25 11:14:09 +00:00
defer time.AfterFunc(C.DefaultTCPTimeout, func() {
2022-11-26 15:53:59 +00:00
q.client.openStreams.Add(-1)
2022-11-25 11:14:09 +00:00
})
// https://github.com/cloudflare/cloudflared/commit/ed2bac026db46b239699ac5ce4fcf122d7cab2cd
// Make sure a possible writer does not block the lock forever. We need it, so we can close the writer
// side of the stream safely.
_ = q.Stream.SetWriteDeadline(time.Now())
// This lock is eventually acquired despite Write also acquiring it, because we set a deadline to writes.
q.lock.Lock()
defer q.lock.Unlock()
// We have to clean up the receiving stream ourselves since the Close in the bottom does not handle that.
2022-11-25 08:06:56 +00:00
q.Stream.CancelRead(0)
return q.Stream.Close()
}
2022-11-25 00:08:14 +00:00
func (q *quicStreamConn) LocalAddr() net.Addr {
return q.lAddr
}
func (q *quicStreamConn) RemoteAddr() net.Addr {
return q.rAddr
}
var _ net.Conn = &quicStreamConn{}
2022-11-26 15:53:59 +00:00
func (t *Client) ListenPacketContext(ctx context.Context, metadata *C.Metadata) (net.PacketConn, error) {
quicConn, err := t.getQuicConn(ctx)
2022-11-25 00:08:14 +00:00
if err != nil {
return nil, err
}
2022-11-26 15:53:59 +00:00
openStreams := t.openStreams.Add(1)
if openStreams >= t.MaxOpenStreams {
2022-11-26 15:53:59 +00:00
t.openStreams.Add(-1)
2022-11-25 12:14:05 +00:00
return nil, TooManyOpenStreams
}
2022-11-25 00:08:14 +00:00
pipe1, pipe2 := net.Pipe()
var connId uint32
for {
connId = rand.Uint32()
_, loaded := t.udpInputMap.LoadOrStore(connId, pipe1)
if !loaded {
break
}
}
pc := &quicStreamPacketConn{
connId: connId,
quicConn: quicConn,
lAddr: quicConn.LocalAddr(),
client: t,
inputConn: N.NewBufferedConn(pipe2),
}
return pc, nil
}
type quicStreamPacketConn struct {
connId uint32
quicConn quic.Connection
lAddr net.Addr
client *Client
inputConn *N.BufferedConn
closeOnce sync.Once
closeErr error
2022-11-25 12:14:05 +00:00
closed bool
2022-11-25 00:08:14 +00:00
}
func (q *quicStreamPacketConn) Close() error {
q.closeOnce.Do(func() {
2022-11-25 12:14:05 +00:00
q.closed = true
2022-11-25 00:08:14 +00:00
q.closeErr = q.close()
})
return q.closeErr
}
func (q *quicStreamPacketConn) close() (err error) {
2022-11-25 12:14:05 +00:00
defer time.AfterFunc(C.DefaultTCPTimeout, func() {
2022-11-26 15:53:59 +00:00
q.client.openStreams.Add(-1)
2022-11-25 12:14:05 +00:00
})
2022-11-25 00:08:14 +00:00
defer func() {
q.client.deferQuicConn(q.quicConn, err)
}()
2022-11-27 03:42:43 +00:00
q.client.udpInputMap.Delete(q.connId)
_ = q.inputConn.Close()
buf := pool.GetBuffer()
defer pool.PutBuffer(buf)
2022-11-25 00:08:14 +00:00
err = NewDissociate(q.connId).WriteTo(buf)
if err != nil {
return
}
stream, err := q.quicConn.OpenUniStream()
if err != nil {
return
}
_, err = buf.WriteTo(stream)
if err != nil {
return
}
err = stream.Close()
if err != nil {
return
}
return
}
func (q *quicStreamPacketConn) SetDeadline(t time.Time) error {
//TODO implement me
return nil
}
func (q *quicStreamPacketConn) SetReadDeadline(t time.Time) error {
return q.inputConn.SetReadDeadline(t)
}
func (q *quicStreamPacketConn) SetWriteDeadline(t time.Time) error {
//TODO implement me
return nil
}
func (q *quicStreamPacketConn) ReadFrom(p []byte) (n int, addr net.Addr, err error) {
packet, err := ReadPacket(q.inputConn)
if err != nil {
return
}
n = copy(p, packet.DATA)
addr = packet.ADDR.UDPAddr()
return
}
func (q *quicStreamPacketConn) WriteTo(p []byte, addr net.Addr) (n int, err error) {
if len(p) > q.client.MaxUdpRelayPacketSize {
return 0, fmt.Errorf("udp packet too large(%d > %d)", len(p), q.client.MaxUdpRelayPacketSize)
}
2022-11-25 12:14:05 +00:00
if q.closed {
return 0, net.ErrClosed
}
2022-11-25 00:08:14 +00:00
defer func() {
q.client.deferQuicConn(q.quicConn, err)
}()
addr.String()
buf := pool.GetBuffer()
defer pool.PutBuffer(buf)
2022-11-25 00:08:14 +00:00
addrPort, err := netip.ParseAddrPort(addr.String())
if err != nil {
return
}
err = NewPacket(q.connId, uint16(len(p)), NewAddressAddrPort(addrPort), p).WriteTo(buf)
if err != nil {
return
}
switch q.client.UdpRelayMode {
case "quic":
var stream quic.SendStream
stream, err = q.quicConn.OpenUniStream()
if err != nil {
return
}
2022-11-25 10:32:30 +00:00
defer stream.Close()
2022-11-25 00:08:14 +00:00
_, err = buf.WriteTo(stream)
if err != nil {
return
}
default: // native
err = q.quicConn.SendMessage(buf.Bytes())
if err != nil {
return
}
}
n = len(p)
return
}
func (q *quicStreamPacketConn) LocalAddr() net.Addr {
return q.lAddr
}
var _ net.PacketConn = &quicStreamPacketConn{}
2022-11-26 15:53:59 +00:00
func NewClient(clientOption *ClientOption, udp bool) *Client {
c := &Client{
ClientOption: clientOption,
udp: udp,
}
runtime.SetFinalizer(c, closeClient)
return c
}
func closeClient(client *Client) {
client.Close(ClientClosed)
}