【go语言grpc之client端源码分析三】
go语言grpc之server端源码分析三
- newClientStream
- newAttemptLocked
上一篇在介绍了grpc.Dial之后,然后再介绍一下后面的
//创建RPC客户端client := pb.NewGreetsClient(conn)//设置超时时间_, cancel := context.WithTimeout(context.Background(), time.Second)defer cancel()reply, err := client.SayHello(context.Background(), &pb.HelloRequest{Name: "小超", Message: "回来吃饭吗"})if err != nil {log.Fatalf("couldn not greet: %v", err)return}log.Println(reply.Name, reply.Message)
然后看一下pb.NewGreetsClient还有SayHello的方法。
func NewGreetsClient(cc grpc.ClientConnInterface) GreetsClient {return &greetsClient{cc}
}func (c *greetsClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) {out := new(HelloReply)err := c.cc.Invoke(ctx, "/proto.Greets/SayHello", in, out, opts...)if err != nil {return nil, err}return out, nil
}
可以看出来核心就是调用ClientConn的Invoke方法。
来看一下具体的实现
func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {return cc.Invoke(ctx, method, args, reply, opts...)
}var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false}func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...)if err != nil {return err}if err := cs.SendMsg(req); err != nil {return err}return cs.RecvMsg(reply)
}
所以这里就是上面的三个方法,
newClientStream
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {if channelz.IsOn() {cc.incrCallsStarted()defer func() {if err != nil {cc.incrCallsFailed()}}()}// Provide an opportunity for the first RPC to see the first service config// provided by the resolver.if err := cc.waitForResolvedAddrs(ctx); err != nil {return nil, err}var mc serviceconfig.MethodConfigvar onCommit func()var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) {return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...)}rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method}rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo)if err != nil {return nil, toRPCErr(err)}return newStream(ctx, func() {})
}
可以看出来这个方法就是newClientStreamWithParams。
func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) {c := defaultCallInfo()if mc.WaitForReady != nil {c.failFast = !*mc.WaitForReady}// Possible context leak:// The cancel function for the child context we create will only be called// when RecvMsg returns a non-nil error, if the ClientConn is closed, or if// an error is generated by SendMsg.// https://github.com/grpc/grpc-go/issues/1818.var cancel context.CancelFuncif mc.Timeout != nil && *mc.Timeout >= 0 {ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)} else {ctx, cancel = context.WithCancel(ctx)}defer func() {if err != nil {cancel()}}()for _, o := range opts {if err := o.before(c); err != nil {return nil, toRPCErr(err)}}c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)if err := setCallInfoCodec(c); err != nil {return nil, err}callHdr := &transport.CallHdr{Host: cc.authority,Method: method,ContentSubtype: c.contentSubtype,DoneFunc: doneFunc,}// Set our outgoing compression according to the UseCompressor CallOption, if// set. In that case, also find the compressor from the encoding package.// Otherwise, use the compressor configured by the WithCompressor DialOption,// if set.var cp Compressorvar comp encoding.Compressorif ct := c.compressorType; ct != "" {callHdr.SendCompress = ctif ct != encoding.Identity {comp = encoding.GetCompressor(ct)if comp == nil {return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)}}} else if cc.dopts.cp != nil {callHdr.SendCompress = cc.dopts.cp.Type()cp = cc.dopts.cp}if c.creds != nil {callHdr.Creds = c.creds}cs := &clientStream{callHdr: callHdr,ctx: ctx,methodConfig: &mc,opts: opts,callInfo: c,cc: cc,desc: desc,codec: c.codec,cp: cp,comp: comp,cancel: cancel,firstAttempt: true,onCommit: onCommit,}if !cc.dopts.disableRetry {cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)}cs.binlog = binarylog.GetMethodLogger(method)if err := cs.newAttemptLocked(false /* isTransparent */); err != nil {cs.finish(err)return nil, err}op := func(a *csAttempt) error { return a.newStream() }if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {cs.finish(err)return nil, err}if cs.binlog != nil {md, _ := metadata.FromOutgoingContext(ctx)logEntry := &binarylog.ClientHeader{OnClientSide: true,Header: md,MethodName: method,Authority: cs.cc.authority,}if deadline, ok := ctx.Deadline(); ok {logEntry.Timeout = time.Until(deadline)if logEntry.Timeout < 0 {logEntry.Timeout = 0}}cs.binlog.Log(logEntry)}if desc != unaryStreamDesc {// Listen on cc and stream contexts to cleanup when the user closes the// ClientConn or cancels the stream context. In all other cases, an error// should already be injected into the recv buffer by the transport, which// the client will eventually receive, and then we will cancel the stream's// context in clientStream.finish.go func() {select {case <-cc.ctx.Done():cs.finish(ErrClientConnClosing)case <-ctx.Done():cs.finish(toRPCErr(ctx.Err()))}}()}return cs, nil
}
可以看出来这里是初始化了clientStream这个结构体,然后是调用了
newAttemptLocked方法
newAttemptLocked
// newAttemptLocked creates a new attempt with a transport.
// If it succeeds, then it replaces clientStream's attempt with this new attempt.
func (cs *clientStream) newAttemptLocked(isTransparent bool) (retErr error) {ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp)method := cs.callHdr.Methodsh := cs.cc.dopts.copts.StatsHandlervar beginTime time.Timeif sh != nil {ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast})beginTime = time.Now()begin := &stats.Begin{Client: true,BeginTime: beginTime,FailFast: cs.callInfo.failFast,IsClientStream: cs.desc.ClientStreams,IsServerStream: cs.desc.ServerStreams,IsTransparentRetryAttempt: isTransparent,}sh.HandleRPC(ctx, begin)}var trInfo *traceInfoif EnableTracing {trInfo = &traceInfo{tr: trace.New("grpc.Sent."+methodFamily(method), method),firstLine: firstLine{client: true,},}if deadline, ok := ctx.Deadline(); ok {trInfo.firstLine.deadline = time.Until(deadline)}trInfo.tr.LazyLog(&trInfo.firstLine, false)ctx = trace.NewContext(ctx, trInfo.tr)}newAttempt := &csAttempt{ctx: ctx,beginTime: beginTime,cs: cs,dc: cs.cc.dopts.dc,statsHandler: sh,trInfo: trInfo,}defer func() {if retErr != nil {// This attempt is not set in the clientStream, so it's finish won't// be called. Call it here for stats and trace in case they are not// nil.newAttempt.finish(retErr)}}()if err := ctx.Err(); err != nil {return toRPCErr(err)}if cs.cc.parsedTarget.Scheme == "xds" {// Add extra metadata (metadata that will be added by transport) to context// so the balancer can see them.ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs("content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype),))}t, done, err := cs.cc.getTransport(ctx, cs.callInfo.failFast, cs.callHdr.Method)if err != nil {return err}if trInfo != nil {trInfo.firstLine.SetRemoteAddr(t.RemoteAddr())}newAttempt.t = tnewAttempt.done = donecs.attempt = newAttemptreturn nil
}
看一下这里的getTransport方法。
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{Ctx: ctx,FullMethodName: method,})if err != nil {return nil, nil, toRPCErr(err)}return t, done, nil
}
注意一下这里的cc.blockingpicker.pick。是不是很熟悉,其实就是前面的。
然后看一下pick这个方法
// pick returns the transport that will be used for the RPC.
// It may block in the following cases:
// - there's no picker
// - the current picker returns ErrNoSubConnAvailable
// - the current picker returns other errors and failfast is false.
// - the subConn returned by the current picker is not READY
// When one of these situations happens, pick blocks until the picker gets updated.
func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) {var ch chan struct{}var lastPickErr errorfor {pw.mu.Lock()if pw.done {pw.mu.Unlock()return nil, nil, ErrClientConnClosing}if pw.picker == nil {ch = pw.blockingCh}//fmt.Printf(" pw.picker:%v nil:%v ch == pw.blockingCh:%v\n", pw.picker, pw.picker == nil, ch == pw.blockingCh)if ch == pw.blockingCh {// This could happen when either:// - pw.picker is nil (the previous if condition), or// - has called pick on the current picker.pw.mu.Unlock()select {case <-ctx.Done():var errStr stringif lastPickErr != nil {errStr = "latest balancer error: " + lastPickErr.Error()} else {errStr = ctx.Err().Error()}switch ctx.Err() {case context.DeadlineExceeded:return nil, nil, status.Error(codes.DeadlineExceeded, errStr)case context.Canceled:return nil, nil, status.Error(codes.Canceled, errStr)}case <-ch:}continue}ch = pw.blockingChp := pw.pickerpw.mu.Unlock()pickResult, err := p.Pick(info)if err != nil {if err == balancer.ErrNoSubConnAvailable {continue}if _, ok := status.FromError(err); ok {// Status error: end the RPC unconditionally with this status.return nil, nil, err}// For all other errors, wait for ready RPCs should block and other// RPCs should fail with unavailable.if !failfast {lastPickErr = errcontinue}return nil, nil, status.Error(codes.Unavailable, err.Error())}acw, ok := pickResult.SubConn.(*acBalancerWrapper)if !ok {logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn)continue}if t := acw.getAddrConn().getReadyTransport(); t != nil {if channelz.IsOn() {return t, doneChannelzWrapper(acw, pickResult.Done), nil}return t, pickResult.Done, nil}if pickResult.Done != nil {// Calling done with nil error, no bytes sent and no bytes received.// DoneInfo with default value works.pickResult.Done(balancer.DoneInfo{})}logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick")// If ok == false, ac.state is not READY.// A valid picker always returns READY subConn. This means the state of ac// just changed, and picker will be updated shortly.// continue back to the beginning of the for loop to repick.}
}
注意这里的p.Pick,这个就是在pickfirst中进行更新后调用的,如下
func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) {if logger.V(2) {logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s)}if b.sc != sc {if logger.V(2) {logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized")}return}b.state = s.ConnectivityStateif s.ConnectivityState == connectivity.Shutdown {b.sc = nilreturn}switch s.ConnectivityState {case connectivity.Ready:b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}})case connectivity.Connecting:b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}})case connectivity.Idle:b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &idlePicker{sc: sc}})case connectivity.TransientFailure:b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState,Picker: &picker{err: s.ConnectionError},})}
}
如果成功了就是返回ready下面的balancer.PickResult。返回了SubConn,然后失败了就是返回了balancer.PickResult但是其中的err是错误的,这样在cc.blockingpicker.pick的时候,就可以返回具体的成功或者失败。
这样完成了整个逻辑的闭环。
下面的sendMsg和ReckMsg和之前的没有什么区别,就是在http2的基础上加上hpack头部压缩和proto的序列化,就不进行赘述了。