Refactor: make inbound request contextual

This commit is contained in:
Dreamacro
2021-01-23 14:49:46 +08:00
parent 35925cb3da
commit f4de055aa1
19 changed files with 302 additions and 125 deletions

View File

@ -13,13 +13,15 @@ import (
"github.com/Dreamacro/clash/common/pool"
"github.com/Dreamacro/clash/component/resolver"
C "github.com/Dreamacro/clash/constant"
"github.com/Dreamacro/clash/context"
)
func handleHTTP(request *inbound.HTTPAdapter, outbound net.Conn) {
req := request.R
func handleHTTP(ctx *context.HTTPContext, outbound net.Conn) {
req := ctx.Request()
conn := ctx.Conn()
host := req.Host
inboundReader := bufio.NewReader(request)
inboundReader := bufio.NewReader(conn)
outboundReader := bufio.NewReader(outbound)
for {
@ -43,7 +45,7 @@ func handleHTTP(request *inbound.HTTPAdapter, outbound net.Conn) {
inbound.RemoveHopByHopHeaders(resp.Header)
if resp.StatusCode == http.StatusContinue {
err = resp.Write(request)
err = resp.Write(conn)
if err != nil {
break
}
@ -58,14 +60,14 @@ func handleHTTP(request *inbound.HTTPAdapter, outbound net.Conn) {
} else {
resp.Close = true
}
err = resp.Write(request)
err = resp.Write(conn)
if err != nil || resp.Close {
break
}
// even if resp.Write write body to the connection, but some http request have to Copy to close it
buf := pool.Get(pool.RelayBufferSize)
_, err = io.CopyBuffer(request, resp.Body, buf)
_, err = io.CopyBuffer(conn, resp.Body, buf)
pool.Put(buf)
if err != nil && err != io.EOF {
break
@ -129,8 +131,8 @@ func handleUDPToLocal(packet C.UDPPacket, pc net.PacketConn, key string, fAddr n
}
}
func handleSocket(request C.ServerAdapter, outbound net.Conn) {
relay(request, outbound)
func handleSocket(ctx C.ConnContext, outbound net.Conn) {
relay(ctx.Conn(), outbound)
}
// relay copies between left and right bidirectionally.

View File

@ -1,4 +1,4 @@
package tunnel
package statistic
import (
"sync"

View File

@ -1,4 +1,4 @@
package tunnel
package statistic
import (
"net"
@ -57,7 +57,7 @@ func (tt *tcpTracker) Close() error {
return tt.Conn.Close()
}
func newTCPTracker(conn C.Conn, manager *Manager, metadata *C.Metadata, rule C.Rule) *tcpTracker {
func NewTCPTracker(conn C.Conn, manager *Manager, metadata *C.Metadata, rule C.Rule) *tcpTracker {
uuid, _ := uuid.NewV4()
t := &tcpTracker{
@ -114,7 +114,7 @@ func (ut *udpTracker) Close() error {
return ut.PacketConn.Close()
}
func newUDPTracker(conn C.PacketConn, manager *Manager, metadata *C.Metadata, rule C.Rule) *udpTracker {
func NewUDPTracker(conn C.PacketConn, manager *Manager, metadata *C.Metadata, rule C.Rule) *udpTracker {
uuid, _ := uuid.NewV4()
ut := &udpTracker{

View File

@ -12,11 +12,13 @@ import (
"github.com/Dreamacro/clash/component/nat"
"github.com/Dreamacro/clash/component/resolver"
C "github.com/Dreamacro/clash/constant"
"github.com/Dreamacro/clash/context"
"github.com/Dreamacro/clash/log"
"github.com/Dreamacro/clash/tunnel/statistic"
)
var (
tcpQueue = make(chan C.ServerAdapter, 200)
tcpQueue = make(chan C.ConnContext, 200)
udpQueue = make(chan *inbound.PacketAdapter, 200)
natTable = nat.New()
rules []C.Rule
@ -36,8 +38,8 @@ func init() {
}
// Add request to queue
func Add(req C.ServerAdapter) {
tcpQueue <- req
func Add(ctx C.ConnContext) {
tcpQueue <- ctx
}
// AddPacket add udp Packet to queue
@ -141,9 +143,7 @@ func preHandleMetadata(metadata *C.Metadata) error {
return nil
}
func resolveMetadata(metadata *C.Metadata) (C.Proxy, C.Rule, error) {
var proxy C.Proxy
var rule C.Rule
func resolveMetadata(ctx C.PlainContext, metadata *C.Metadata) (proxy C.Proxy, rule C.Rule, err error) {
switch mode {
case Direct:
proxy = proxies["DIRECT"]
@ -151,13 +151,9 @@ func resolveMetadata(metadata *C.Metadata) (C.Proxy, C.Rule, error) {
proxy = proxies["GLOBAL"]
// Rule
default:
var err error
proxy, rule, err = match(metadata)
if err != nil {
return nil, nil, err
}
}
return proxy, rule, nil
return
}
func handleUDPConn(packet *inbound.PacketAdapter) {
@ -210,7 +206,8 @@ func handleUDPConn(packet *inbound.PacketAdapter) {
cond.Broadcast()
}()
proxy, rule, err := resolveMetadata(metadata)
ctx := context.NewPacketConnContext(metadata)
proxy, rule, err := resolveMetadata(ctx, metadata)
if err != nil {
log.Warnln("[UDP] Parse metadata failed: %s", err.Error())
return
@ -225,7 +222,8 @@ func handleUDPConn(packet *inbound.PacketAdapter) {
}
return
}
pc := newUDPTracker(rawPc, DefaultManager, metadata, rule)
ctx.InjectPacketConn(rawPc)
pc := statistic.NewUDPTracker(rawPc, statistic.DefaultManager, metadata, rule)
switch true {
case rule != nil:
@ -245,10 +243,10 @@ func handleUDPConn(packet *inbound.PacketAdapter) {
}()
}
func handleTCPConn(localConn C.ServerAdapter) {
defer localConn.Close()
func handleTCPConn(ctx C.ConnContext) {
defer ctx.Conn().Close()
metadata := localConn.Metadata()
metadata := ctx.Metadata()
if !metadata.Valid() {
log.Warnln("[Metadata] not valid: %#v", metadata)
return
@ -259,7 +257,7 @@ func handleTCPConn(localConn C.ServerAdapter) {
return
}
proxy, rule, err := resolveMetadata(metadata)
proxy, rule, err := resolveMetadata(ctx, metadata)
if err != nil {
log.Warnln("[Metadata] parse failed: %s", err.Error())
return
@ -274,7 +272,7 @@ func handleTCPConn(localConn C.ServerAdapter) {
}
return
}
remoteConn = newTCPTracker(remoteConn, DefaultManager, metadata, rule)
remoteConn = statistic.NewTCPTracker(remoteConn, statistic.DefaultManager, metadata, rule)
defer remoteConn.Close()
switch true {
@ -288,11 +286,11 @@ func handleTCPConn(localConn C.ServerAdapter) {
log.Infoln("[TCP] %s --> %v doesn't match any rule using DIRECT", metadata.SourceAddress(), metadata.String())
}
switch adapter := localConn.(type) {
case *inbound.HTTPAdapter:
handleHTTP(adapter, remoteConn)
case *inbound.SocketAdapter:
handleSocket(adapter, remoteConn)
switch c := ctx.(type) {
case *context.HTTPContext:
handleHTTP(c, remoteConn)
default:
handleSocket(ctx, remoteConn)
}
}