1
// Copyright 2013 The Go Authors. All rights reserved.
2
// Use of this source code is governed by a BSD-style
3
// license that can be found in the LICENSE file.
11
"code.google.com/p/go.tools/go/types"
14
// CanPoint reports whether the type T is pointerlike,
15
// for the purposes of this analysis.
16
func CanPoint(T types.Type) bool {
17
switch T := T.(type) {
19
if obj := T.Obj(); obj.Name() == "Value" && obj.Pkg().Path() == "reflect" {
20
return true // treat reflect.Value like interface{}
22
return CanPoint(T.Underlying())
24
case *types.Pointer, *types.Interface, *types.Map, *types.Chan, *types.Signature, *types.Slice:
28
return false // array struct tuple builtin basic
31
// CanHaveDynamicTypes reports whether the type T can "hold" dynamic types,
32
// i.e. is an interface (incl. reflect.Type) or a reflect.Value.
34
func CanHaveDynamicTypes(T types.Type) bool {
35
switch T := T.(type) {
37
if obj := T.Obj(); obj.Name() == "Value" && obj.Pkg().Path() == "reflect" {
38
return true // reflect.Value
40
return CanHaveDynamicTypes(T.Underlying())
41
case *types.Interface:
47
// isInterface reports whether T is an interface type.
48
func isInterface(T types.Type) bool {
49
_, ok := T.Underlying().(*types.Interface)
53
// mustDeref returns the element type of its argument, which must be a
54
// pointer; panic ensues otherwise.
55
func mustDeref(typ types.Type) types.Type {
56
return typ.Underlying().(*types.Pointer).Elem()
59
// deref returns a pointer's element type; otherwise it returns typ.
60
func deref(typ types.Type) types.Type {
61
if p, ok := typ.Underlying().(*types.Pointer); ok {
67
// A fieldInfo describes one subelement (node) of the flattening-out
68
// of a type T: the subelement's type and its path from the root of T.
70
// For example, for this type:
71
// type line struct{ points []struct{x, y int} }
72
// flatten() of the inner struct yields the following []fieldInfo:
73
// struct{ x, y int } ""
76
// and flatten(line) yields:
77
// struct{ points []struct{x, y int} } ""
78
// struct{ x, y int } ".points[*]"
82
type fieldInfo struct {
85
// op and tail describe the path to the element (e.g. ".a#2.b[*].c").
86
op interface{} // *Array: true; *Tuple: int; *Struct: *types.Var; *Named: nil
90
// path returns a user-friendly string describing the subelement path.
92
func (fi *fieldInfo) path() string {
94
for p := fi; p != nil; p = p.tail {
95
switch op := p.op.(type) {
97
fmt.Fprintf(&buf, "[*]")
99
fmt.Fprintf(&buf, "#%d", op)
101
fmt.Fprintf(&buf, ".%s", op.Name())
107
// flatten returns a list of directly contained fields in the preorder
108
// traversal of the type tree of t. The resulting elements are all
109
// scalars (basic types or pointerlike types), except for struct/array
110
// "identity" nodes, whose type is that of the aggregate.
112
// reflect.Value is considered pointerlike, similar to interface{}.
114
// Callers must not mutate the result.
116
func (a *analysis) flatten(t types.Type) []*fieldInfo {
117
fl, ok := a.flattenMemo[t]
119
switch t := t.(type) {
123
// Debuggability hack: don't remove
124
// the named type from interfaces as
125
// they're very verbose.
126
fl = append(fl, &fieldInfo{typ: t})
138
fl = append(fl, &fieldInfo{typ: t})
141
fl = append(fl, &fieldInfo{typ: t}) // identity node
142
for _, fi := range a.flatten(t.Elem()) {
143
fl = append(fl, &fieldInfo{typ: fi.typ, op: true, tail: fi})
147
fl = append(fl, &fieldInfo{typ: t}) // identity node
148
for i, n := 0, t.NumFields(); i < n; i++ {
150
for _, fi := range a.flatten(f.Type()) {
151
fl = append(fl, &fieldInfo{typ: fi.typ, op: f, tail: fi})
156
// No identity node: tuples are never address-taken.
157
for i, n := 0, t.Len(); i < n; i++ {
159
for _, fi := range a.flatten(f.Type()) {
160
fl = append(fl, &fieldInfo{typ: fi.typ, op: i, tail: fi})
168
a.flattenMemo[t] = fl
174
// sizeof returns the number of pointerlike abstractions (nodes) in the type t.
175
func (a *analysis) sizeof(t types.Type) uint32 {
176
return uint32(len(a.flatten(t)))
179
// shouldTrack reports whether object type T contains (recursively)
180
// any fields whose addresses should be tracked.
181
func (a *analysis) shouldTrack(T types.Type) bool {
182
if a.track == trackAll {
183
return true // fast path
185
track, ok := a.trackTypes[T]
187
a.trackTypes[T] = true // break cycles conservatively
188
// NB: reflect.Value, reflect.Type are pre-populated to true.
189
for _, fi := range a.flatten(T) {
190
switch ft := fi.typ.Underlying().(type) {
191
case *types.Interface, *types.Signature:
192
track = true // needed for callgraph
196
track = a.track&trackChan != 0 || a.shouldTrack(ft.Elem())
198
track = a.track&trackMap != 0 || a.shouldTrack(ft.Key()) || a.shouldTrack(ft.Elem())
200
track = a.track&trackSlice != 0 || a.shouldTrack(ft.Elem())
202
track = a.track&trackPtr != 0 || a.shouldTrack(ft.Elem())
203
case *types.Array, *types.Struct:
204
// No need to look at field types since they will follow (flattened).
206
// Includes *types.Tuple, which are never address-taken.
213
a.trackTypes[T] = track
214
if !track && a.log != nil {
215
fmt.Fprintf(a.log, "Type not tracked: %s\n", T)
221
// offsetOf returns the (abstract) offset of field index within struct
223
func (a *analysis) offsetOf(typ types.Type, index int) uint32 {
225
switch t := typ.Underlying().(type) {
227
for i := 0; i < index; i++ {
228
offset += a.sizeof(t.At(i).Type())
231
offset++ // the node for the struct itself
232
for i := 0; i < index; i++ {
233
offset += a.sizeof(t.Field(i).Type())
236
panic(fmt.Sprintf("offsetOf(%s : %T)", typ, typ))
241
// sliceToArray returns the type representing the arrays to which
242
// slice type slice points.
243
func sliceToArray(slice types.Type) *types.Array {
244
return types.NewArray(slice.Underlying().(*types.Slice).Elem(), 1)
247
// Node set -------------------------------------------------------------------
249
// NB, mutator methods are attached to *nodeset.
250
// nodeset may be a reference, but its address matters!
251
type nodeset map[nodeid]struct{}
253
// ---- Accessors ----
255
func (ns nodeset) String() string {
260
fmt.Fprintf(&buf, "%sn%d", sep, n)
267
// diff returns the set-difference x - y. nil => empty.
269
// TODO(adonovan): opt: extremely inefficient. BDDs do this in
270
// constant time. Sparse bitvectors are linear but very fast.
271
func (x nodeset) diff(y nodeset) nodeset {
274
if _, ok := y[k]; !ok {
281
// clone() returns an unaliased copy of x.
282
func (x nodeset) clone() nodeset {
286
// ---- Mutators ----
288
func (ns *nodeset) add(n nodeid) bool {
293
(*ns)[n] = struct{}{}
297
func (x *nodeset) addAll(y nodeset) bool {
311
// Constraint set -------------------------------------------------------------
313
type constraintset map[constraint]struct{}
315
func (cs *constraintset) add(c constraint) bool {
318
*cs = make(constraintset)
320
(*cs)[c] = struct{}{}
324
// Worklist -------------------------------------------------------------------
326
const empty nodeid = 1<<32 - 1
328
type worklist interface {
329
add(nodeid) // Adds a node to the set
330
take() nodeid // Takes a node from the set and returns it, or empty
333
// Simple nondeterministic worklist based on a built-in map.
334
type mapWorklist struct {
338
func (w *mapWorklist) add(n nodeid) {
339
w.set[n] = struct{}{}
342
func (w *mapWorklist) take() nodeid {
343
for k := range w.set {
350
func makeMapWorklist() worklist {
351
return &mapWorklist{make(nodeset)}