forked from rai-project/tensorflow
-
Notifications
You must be signed in to change notification settings - Fork 0
/
config.pb.go
6003 lines (5806 loc) · 160 KB
/
config.pb.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: config.proto
package tensorflow
import (
encoding_binary "encoding/binary"
fmt "fmt"
io "io"
math "math"
proto "github.com/gogo/protobuf/proto"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// Optimization level
type OptimizerOptions_Level int32
const (
// L1 is the default level.
// Optimization performed at L1 :
// 1. Common subexpression elimination
// 2. Constant folding
OptimizerOptions_L1 OptimizerOptions_Level = 0
// No optimizations
OptimizerOptions_L0 OptimizerOptions_Level = -1
)
var OptimizerOptions_Level_name = map[int32]string{
0: "L1",
-1: "L0",
}
var OptimizerOptions_Level_value = map[string]int32{
"L1": 0,
"L0": -1,
}
func (x OptimizerOptions_Level) String() string {
return proto.EnumName(OptimizerOptions_Level_name, int32(x))
}
func (OptimizerOptions_Level) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_3eaf2c85e69e9ea4, []int{1, 0}
}
// Control the use of the compiler/jit. Experimental.
type OptimizerOptions_GlobalJitLevel int32
const (
OptimizerOptions_DEFAULT OptimizerOptions_GlobalJitLevel = 0
OptimizerOptions_OFF OptimizerOptions_GlobalJitLevel = -1
// The following settings turn on compilation, with higher values being
// more aggressive. Higher values may reduce opportunities for parallelism
// and may use more memory. (At present, there is no distinction, but this
// is expected to change.)
OptimizerOptions_ON_1 OptimizerOptions_GlobalJitLevel = 1
OptimizerOptions_ON_2 OptimizerOptions_GlobalJitLevel = 2
)
var OptimizerOptions_GlobalJitLevel_name = map[int32]string{
0: "DEFAULT",
-1: "OFF",
1: "ON_1",
2: "ON_2",
}
var OptimizerOptions_GlobalJitLevel_value = map[string]int32{
"DEFAULT": 0,
"OFF": -1,
"ON_1": 1,
"ON_2": 2,
}
func (x OptimizerOptions_GlobalJitLevel) String() string {
return proto.EnumName(OptimizerOptions_GlobalJitLevel_name, int32(x))
}
func (OptimizerOptions_GlobalJitLevel) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_3eaf2c85e69e9ea4, []int{1, 1}
}
// TODO(pbar) Turn this into a TraceOptions proto which allows
// tracing to be controlled in a more orthogonal manner?
type RunOptions_TraceLevel int32
const (
RunOptions_NO_TRACE RunOptions_TraceLevel = 0
RunOptions_SOFTWARE_TRACE RunOptions_TraceLevel = 1
RunOptions_HARDWARE_TRACE RunOptions_TraceLevel = 2
RunOptions_FULL_TRACE RunOptions_TraceLevel = 3
)
var RunOptions_TraceLevel_name = map[int32]string{
0: "NO_TRACE",
1: "SOFTWARE_TRACE",
2: "HARDWARE_TRACE",
3: "FULL_TRACE",
}
var RunOptions_TraceLevel_value = map[string]int32{
"NO_TRACE": 0,
"SOFTWARE_TRACE": 1,
"HARDWARE_TRACE": 2,
"FULL_TRACE": 3,
}
func (x RunOptions_TraceLevel) String() string {
return proto.EnumName(RunOptions_TraceLevel_name, int32(x))
}
func (RunOptions_TraceLevel) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_3eaf2c85e69e9ea4, []int{6, 0}
}
type GPUOptions struct {
// Fraction of the available GPU memory to allocate for each process.
// 1 means to allocate all of the GPU memory, 0.5 means the process
// allocates up to ~50% of the available GPU memory.
//
// GPU memory is pre-allocated unless the allow_growth option is enabled.
//
// If greater than 1.0, uses CUDA unified memory to potentially oversubscribe
// the amount of memory available on the GPU device by using host memory as a
// swap space. Accessing memory not available on the device will be
// significantly slower as that would require memory transfer between the host
// and the device. Options to reduce the memory requirement should be
// considered before enabling this option as this may come with a negative
// performance impact. Oversubscription using the unified memory requires
// Pascal class or newer GPUs and it is currently only supported on the Linux
// operating system. See
// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirements
// for the detailed requirements.
PerProcessGpuMemoryFraction float64 `protobuf:"fixed64,1,opt,name=per_process_gpu_memory_fraction,json=perProcessGpuMemoryFraction,proto3" json:"per_process_gpu_memory_fraction,omitempty"`
// If true, the allocator does not pre-allocate the entire specified
// GPU memory region, instead starting small and growing as needed.
AllowGrowth bool `protobuf:"varint,4,opt,name=allow_growth,json=allowGrowth,proto3" json:"allow_growth,omitempty"`
// The type of GPU allocation strategy to use.
//
// Allowed values:
// "": The empty string (default) uses a system-chosen default
// which may change over time.
//
// "BFC": A "Best-fit with coalescing" algorithm, simplified from a
// version of dlmalloc.
AllocatorType string `protobuf:"bytes,2,opt,name=allocator_type,json=allocatorType,proto3" json:"allocator_type,omitempty"`
// Delay deletion of up to this many bytes to reduce the number of
// interactions with gpu driver code. If 0, the system chooses
// a reasonable default (several MBs).
DeferredDeletionBytes int64 `protobuf:"varint,3,opt,name=deferred_deletion_bytes,json=deferredDeletionBytes,proto3" json:"deferred_deletion_bytes,omitempty"`
// A comma-separated list of GPU ids that determines the 'visible'
// to 'virtual' mapping of GPU devices. For example, if TensorFlow
// can see 8 GPU devices in the process, and one wanted to map
// visible GPU devices 5 and 3 as "/device:GPU:0", and "/device:GPU:1",
// then one would specify this field as "5,3". This field is similar in
// spirit to the CUDA_VISIBLE_DEVICES environment variable, except
// it applies to the visible GPU devices in the process.
//
// NOTE:
// 1. The GPU driver provides the process with the visible GPUs
// in an order which is not guaranteed to have any correlation to
// the *physical* GPU id in the machine. This field is used for
// remapping "visible" to "virtual", which means this operates only
// after the process starts. Users are required to use vendor
// specific mechanisms (e.g., CUDA_VISIBLE_DEVICES) to control the
// physical to visible device mapping prior to invoking TensorFlow.
// 2. In the code, the ids in this list are also called "platform GPU id"s,
// and the 'virtual' ids of GPU devices (i.e. the ids in the device
// name "/device:GPU:<id>") are also called "TF GPU id"s. Please
// refer to third_party/tensorflow/core/common_runtime/gpu/gpu_id.h
// for more information.
VisibleDeviceList string `protobuf:"bytes,5,opt,name=visible_device_list,json=visibleDeviceList,proto3" json:"visible_device_list,omitempty"`
// In the event polling loop sleep this many microseconds between
// PollEvents calls, when the queue is not empty. If value is not
// set or set to 0, gets set to a non-zero default.
PollingActiveDelayUsecs int32 `protobuf:"varint,6,opt,name=polling_active_delay_usecs,json=pollingActiveDelayUsecs,proto3" json:"polling_active_delay_usecs,omitempty"`
// This field is deprecated and ignored.
PollingInactiveDelayMsecs int32 `protobuf:"varint,7,opt,name=polling_inactive_delay_msecs,json=pollingInactiveDelayMsecs,proto3" json:"polling_inactive_delay_msecs,omitempty"`
// Force all tensors to be gpu_compatible. On a GPU-enabled TensorFlow,
// enabling this option forces all CPU tensors to be allocated with Cuda
// pinned memory. Normally, TensorFlow will infer which tensors should be
// allocated as the pinned memory. But in case where the inference is
// incomplete, this option can significantly speed up the cross-device memory
// copy performance as long as it fits the memory.
// Note that this option is not something that should be
// enabled by default for unknown or very large models, since all Cuda pinned
// memory is unpageable, having too much pinned memory might negatively impact
// the overall host system performance.
ForceGpuCompatible bool `protobuf:"varint,8,opt,name=force_gpu_compatible,json=forceGpuCompatible,proto3" json:"force_gpu_compatible,omitempty"`
// Everything inside experimental is subject to change and is not subject
// to API stability guarantees in
// https://www.tensorflow.org/guide/version_compat.
Experimental *GPUOptions_Experimental `protobuf:"bytes,9,opt,name=experimental,proto3" json:"experimental,omitempty"`
}
func (m *GPUOptions) Reset() { *m = GPUOptions{} }
func (m *GPUOptions) String() string { return proto.CompactTextString(m) }
func (*GPUOptions) ProtoMessage() {}
func (*GPUOptions) Descriptor() ([]byte, []int) {
return fileDescriptor_3eaf2c85e69e9ea4, []int{0}
}
func (m *GPUOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GPUOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GPUOptions.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *GPUOptions) XXX_Merge(src proto.Message) {
xxx_messageInfo_GPUOptions.Merge(m, src)
}
func (m *GPUOptions) XXX_Size() int {
return m.Size()
}
func (m *GPUOptions) XXX_DiscardUnknown() {
xxx_messageInfo_GPUOptions.DiscardUnknown(m)
}
var xxx_messageInfo_GPUOptions proto.InternalMessageInfo
func (m *GPUOptions) GetPerProcessGpuMemoryFraction() float64 {
if m != nil {
return m.PerProcessGpuMemoryFraction
}
return 0
}
func (m *GPUOptions) GetAllowGrowth() bool {
if m != nil {
return m.AllowGrowth
}
return false
}
func (m *GPUOptions) GetAllocatorType() string {
if m != nil {
return m.AllocatorType
}
return ""
}
func (m *GPUOptions) GetDeferredDeletionBytes() int64 {
if m != nil {
return m.DeferredDeletionBytes
}
return 0
}
func (m *GPUOptions) GetVisibleDeviceList() string {
if m != nil {
return m.VisibleDeviceList
}
return ""
}
func (m *GPUOptions) GetPollingActiveDelayUsecs() int32 {
if m != nil {
return m.PollingActiveDelayUsecs
}
return 0
}
func (m *GPUOptions) GetPollingInactiveDelayMsecs() int32 {
if m != nil {
return m.PollingInactiveDelayMsecs
}
return 0
}
func (m *GPUOptions) GetForceGpuCompatible() bool {
if m != nil {
return m.ForceGpuCompatible
}
return false
}
func (m *GPUOptions) GetExperimental() *GPUOptions_Experimental {
if m != nil {
return m.Experimental
}
return nil
}
type GPUOptions_Experimental struct {
// The multi virtual device settings. If empty (not set), it will create
// single virtual device on each visible GPU, according to the settings
// in "visible_device_list" above. Otherwise, the number of elements in the
// list must be the same as the number of visible GPUs (after
// "visible_device_list" filtering if it is set), and the string represented
// device names (e.g. /device:GPU:<id>) will refer to the virtual
// devices and have the <id> field assigned sequentially starting from 0,
// according to the order they appear in this list and the "memory_limit"
// list inside each element. For example,
// visible_device_list = "1,0"
// virtual_devices { memory_limit: 1GB memory_limit: 2GB }
// virtual_devices {}
// will create three virtual devices as:
// /device:GPU:0 -> visible GPU 1 with 1GB memory
// /device:GPU:1 -> visible GPU 1 with 2GB memory
// /device:GPU:2 -> visible GPU 0 with all available memory
//
// NOTE:
// 1. It's invalid to set both this and "per_process_gpu_memory_fraction"
// at the same time.
// 2. Currently this setting is per-process, not per-session. Using
// different settings in different sessions within same process will
// result in undefined behavior.
VirtualDevices []*GPUOptions_Experimental_VirtualDevices `protobuf:"bytes,1,rep,name=virtual_devices,json=virtualDevices,proto3" json:"virtual_devices,omitempty"`
// If true, uses CUDA unified memory for memory allocations. If
// per_process_gpu_memory_fraction option is greater than 1.0, then unified
// memory is used regardless of the value for this field. See comments for
// per_process_gpu_memory_fraction field for more details and requirements
// of the unified memory. This option is useful to oversubscribe memory if
// multiple processes are sharing a single GPU while individually using less
// than 1.0 per process memory fraction.
UseUnifiedMemory bool `protobuf:"varint,2,opt,name=use_unified_memory,json=useUnifiedMemory,proto3" json:"use_unified_memory,omitempty"`
// If > 1, the number of device-to-device copy streams to create
// for each GPUDevice. Default value is 0, which is automatically
// converted to 1.
NumDevToDevCopyStreams int32 `protobuf:"varint,3,opt,name=num_dev_to_dev_copy_streams,json=numDevToDevCopyStreams,proto3" json:"num_dev_to_dev_copy_streams,omitempty"`
}
func (m *GPUOptions_Experimental) Reset() { *m = GPUOptions_Experimental{} }
func (m *GPUOptions_Experimental) String() string { return proto.CompactTextString(m) }
func (*GPUOptions_Experimental) ProtoMessage() {}
func (*GPUOptions_Experimental) Descriptor() ([]byte, []int) {
return fileDescriptor_3eaf2c85e69e9ea4, []int{0, 0}
}
func (m *GPUOptions_Experimental) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GPUOptions_Experimental) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GPUOptions_Experimental.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *GPUOptions_Experimental) XXX_Merge(src proto.Message) {
xxx_messageInfo_GPUOptions_Experimental.Merge(m, src)
}
func (m *GPUOptions_Experimental) XXX_Size() int {
return m.Size()
}
func (m *GPUOptions_Experimental) XXX_DiscardUnknown() {
xxx_messageInfo_GPUOptions_Experimental.DiscardUnknown(m)
}
var xxx_messageInfo_GPUOptions_Experimental proto.InternalMessageInfo
func (m *GPUOptions_Experimental) GetVirtualDevices() []*GPUOptions_Experimental_VirtualDevices {
if m != nil {
return m.VirtualDevices
}
return nil
}
func (m *GPUOptions_Experimental) GetUseUnifiedMemory() bool {
if m != nil {
return m.UseUnifiedMemory
}
return false
}
func (m *GPUOptions_Experimental) GetNumDevToDevCopyStreams() int32 {
if m != nil {
return m.NumDevToDevCopyStreams
}
return 0
}
// Configuration for breaking down a visible GPU into multiple "virtual"
// devices.
type GPUOptions_Experimental_VirtualDevices struct {
// Per "virtual" device memory limit, in MB. The number of elements in
// the list is the number of virtual devices to create on the
// corresponding visible GPU (see "virtual_devices" below).
// If empty, it will create single virtual device taking all available
// memory from the device.
//
// For the concept of "visible" and "virtual" GPU, see the comments for
// "visible_device_list" above for more information.
MemoryLimitMb []float32 `protobuf:"fixed32,1,rep,packed,name=memory_limit_mb,json=memoryLimitMb,proto3" json:"memory_limit_mb,omitempty"`
}
func (m *GPUOptions_Experimental_VirtualDevices) Reset() {
*m = GPUOptions_Experimental_VirtualDevices{}
}
func (m *GPUOptions_Experimental_VirtualDevices) String() string { return proto.CompactTextString(m) }
func (*GPUOptions_Experimental_VirtualDevices) ProtoMessage() {}
func (*GPUOptions_Experimental_VirtualDevices) Descriptor() ([]byte, []int) {
return fileDescriptor_3eaf2c85e69e9ea4, []int{0, 0, 0}
}
func (m *GPUOptions_Experimental_VirtualDevices) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GPUOptions_Experimental_VirtualDevices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GPUOptions_Experimental_VirtualDevices.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *GPUOptions_Experimental_VirtualDevices) XXX_Merge(src proto.Message) {
xxx_messageInfo_GPUOptions_Experimental_VirtualDevices.Merge(m, src)
}
func (m *GPUOptions_Experimental_VirtualDevices) XXX_Size() int {
return m.Size()
}
func (m *GPUOptions_Experimental_VirtualDevices) XXX_DiscardUnknown() {
xxx_messageInfo_GPUOptions_Experimental_VirtualDevices.DiscardUnknown(m)
}
var xxx_messageInfo_GPUOptions_Experimental_VirtualDevices proto.InternalMessageInfo
func (m *GPUOptions_Experimental_VirtualDevices) GetMemoryLimitMb() []float32 {
if m != nil {
return m.MemoryLimitMb
}
return nil
}
// Options passed to the graph optimizer
type OptimizerOptions struct {
// If true, optimize the graph using common subexpression elimination.
DoCommonSubexpressionElimination bool `protobuf:"varint,1,opt,name=do_common_subexpression_elimination,json=doCommonSubexpressionElimination,proto3" json:"do_common_subexpression_elimination,omitempty"`
// If true, perform constant folding optimization on the graph.
DoConstantFolding bool `protobuf:"varint,2,opt,name=do_constant_folding,json=doConstantFolding,proto3" json:"do_constant_folding,omitempty"`
// Constant folding optimization replaces tensors whose values can be
// predetermined, with constant nodes. To avoid inserting too large constants,
// the size of each constant created can be limited. If this value is zero, a
// default limit of 10 MiB will be applied. If constant folding optimization
// is disabled, this value is ignored.
MaxFoldedConstantInBytes int64 `protobuf:"varint,6,opt,name=max_folded_constant_in_bytes,json=maxFoldedConstantInBytes,proto3" json:"max_folded_constant_in_bytes,omitempty"`
// If true, perform function inlining on the graph.
DoFunctionInlining bool `protobuf:"varint,4,opt,name=do_function_inlining,json=doFunctionInlining,proto3" json:"do_function_inlining,omitempty"`
// Overall optimization level. The actual optimizations applied will be the
// logical OR of the flags that this level implies and any flags already set.
OptLevel OptimizerOptions_Level `protobuf:"varint,3,opt,name=opt_level,json=optLevel,proto3,enum=tensorflow.OptimizerOptions_Level" json:"opt_level,omitempty"`
GlobalJitLevel OptimizerOptions_GlobalJitLevel `protobuf:"varint,5,opt,name=global_jit_level,json=globalJitLevel,proto3,enum=tensorflow.OptimizerOptions_GlobalJitLevel" json:"global_jit_level,omitempty"`
}
func (m *OptimizerOptions) Reset() { *m = OptimizerOptions{} }
func (m *OptimizerOptions) String() string { return proto.CompactTextString(m) }
func (*OptimizerOptions) ProtoMessage() {}
func (*OptimizerOptions) Descriptor() ([]byte, []int) {
return fileDescriptor_3eaf2c85e69e9ea4, []int{1}
}
func (m *OptimizerOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *OptimizerOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_OptimizerOptions.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *OptimizerOptions) XXX_Merge(src proto.Message) {
xxx_messageInfo_OptimizerOptions.Merge(m, src)
}
func (m *OptimizerOptions) XXX_Size() int {
return m.Size()
}
func (m *OptimizerOptions) XXX_DiscardUnknown() {
xxx_messageInfo_OptimizerOptions.DiscardUnknown(m)
}
var xxx_messageInfo_OptimizerOptions proto.InternalMessageInfo
func (m *OptimizerOptions) GetDoCommonSubexpressionElimination() bool {
if m != nil {
return m.DoCommonSubexpressionElimination
}
return false
}
func (m *OptimizerOptions) GetDoConstantFolding() bool {
if m != nil {
return m.DoConstantFolding
}
return false
}
func (m *OptimizerOptions) GetMaxFoldedConstantInBytes() int64 {
if m != nil {
return m.MaxFoldedConstantInBytes
}
return 0
}
func (m *OptimizerOptions) GetDoFunctionInlining() bool {
if m != nil {
return m.DoFunctionInlining
}
return false
}
func (m *OptimizerOptions) GetOptLevel() OptimizerOptions_Level {
if m != nil {
return m.OptLevel
}
return OptimizerOptions_L1
}
func (m *OptimizerOptions) GetGlobalJitLevel() OptimizerOptions_GlobalJitLevel {
if m != nil {
return m.GlobalJitLevel
}
return OptimizerOptions_DEFAULT
}
type GraphOptions struct {
// If true, use control flow to schedule the activation of Recv nodes.
// (Currently ignored.)
EnableRecvScheduling bool `protobuf:"varint,2,opt,name=enable_recv_scheduling,json=enableRecvScheduling,proto3" json:"enable_recv_scheduling,omitempty"`
// Options controlling how graph is optimized.
OptimizerOptions *OptimizerOptions `protobuf:"bytes,3,opt,name=optimizer_options,json=optimizerOptions,proto3" json:"optimizer_options,omitempty"`
// The number of steps to run before returning a cost model detailing
// the memory usage and performance of each node of the graph. 0 means
// no cost model.
BuildCostModel int64 `protobuf:"varint,4,opt,name=build_cost_model,json=buildCostModel,proto3" json:"build_cost_model,omitempty"`
// The number of steps to skip before collecting statistics for the
// cost model.
BuildCostModelAfter int64 `protobuf:"varint,9,opt,name=build_cost_model_after,json=buildCostModelAfter,proto3" json:"build_cost_model_after,omitempty"`
// Annotate each Node with Op output shape data, to the extent it can
// be statically inferred.
InferShapes bool `protobuf:"varint,5,opt,name=infer_shapes,json=inferShapes,proto3" json:"infer_shapes,omitempty"`
// Only place the subgraphs that are run, rather than the entire graph.
//
// This is useful for interactive graph building, where one might
// produce graphs that cannot be placed during the debugging
// process. In particular, it allows the client to continue work in
// a session after adding a node to a graph whose placement
// constraints are unsatisfiable.
PlacePrunedGraph bool `protobuf:"varint,6,opt,name=place_pruned_graph,json=placePrunedGraph,proto3" json:"place_pruned_graph,omitempty"`
// If true, transfer float values between processes as bfloat16.
EnableBfloat16Sendrecv bool `protobuf:"varint,7,opt,name=enable_bfloat16_sendrecv,json=enableBfloat16Sendrecv,proto3" json:"enable_bfloat16_sendrecv,omitempty"`
// If > 0, record a timeline every this many steps.
// EXPERIMENTAL: This currently has no effect in MasterSession.
TimelineStep int32 `protobuf:"varint,8,opt,name=timeline_step,json=timelineStep,proto3" json:"timeline_step,omitempty"`
// Options that control the type and amount of graph rewriting.
// Not currently configurable via the public Python API (i.e. there is no API
// stability guarantee if you import RewriterConfig explicitly).
RewriteOptions *RewriterConfig `protobuf:"bytes,10,opt,name=rewrite_options,json=rewriteOptions,proto3" json:"rewrite_options,omitempty"`
}
func (m *GraphOptions) Reset() { *m = GraphOptions{} }
func (m *GraphOptions) String() string { return proto.CompactTextString(m) }
func (*GraphOptions) ProtoMessage() {}
func (*GraphOptions) Descriptor() ([]byte, []int) {
return fileDescriptor_3eaf2c85e69e9ea4, []int{2}
}
func (m *GraphOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *GraphOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_GraphOptions.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *GraphOptions) XXX_Merge(src proto.Message) {
xxx_messageInfo_GraphOptions.Merge(m, src)
}
func (m *GraphOptions) XXX_Size() int {
return m.Size()
}
func (m *GraphOptions) XXX_DiscardUnknown() {
xxx_messageInfo_GraphOptions.DiscardUnknown(m)
}
var xxx_messageInfo_GraphOptions proto.InternalMessageInfo
func (m *GraphOptions) GetEnableRecvScheduling() bool {
if m != nil {
return m.EnableRecvScheduling
}
return false
}
func (m *GraphOptions) GetOptimizerOptions() *OptimizerOptions {
if m != nil {
return m.OptimizerOptions
}
return nil
}
func (m *GraphOptions) GetBuildCostModel() int64 {
if m != nil {
return m.BuildCostModel
}
return 0
}
func (m *GraphOptions) GetBuildCostModelAfter() int64 {
if m != nil {
return m.BuildCostModelAfter
}
return 0
}
func (m *GraphOptions) GetInferShapes() bool {
if m != nil {
return m.InferShapes
}
return false
}
func (m *GraphOptions) GetPlacePrunedGraph() bool {
if m != nil {
return m.PlacePrunedGraph
}
return false
}
func (m *GraphOptions) GetEnableBfloat16Sendrecv() bool {
if m != nil {
return m.EnableBfloat16Sendrecv
}
return false
}
func (m *GraphOptions) GetTimelineStep() int32 {
if m != nil {
return m.TimelineStep
}
return 0
}
func (m *GraphOptions) GetRewriteOptions() *RewriterConfig {
if m != nil {
return m.RewriteOptions
}
return nil
}
type ThreadPoolOptionProto struct {
// The number of threads in the pool.
//
// 0 means the system picks a value based on where this option proto is used
// (see the declaration of the specific field for more info).
NumThreads int32 `protobuf:"varint,1,opt,name=num_threads,json=numThreads,proto3" json:"num_threads,omitempty"`
// The global name of the threadpool.
//
// If empty, then the threadpool is made and used according to the scope it's
// in - e.g., for a session threadpool, it is used by that session only.
//
// If non-empty, then:
// - a global threadpool associated with this name is looked
// up or created. This allows, for example, sharing one threadpool across
// many sessions (e.g., like the default behavior, if
// inter_op_parallelism_threads is not configured), but still partitioning
// into a large and small pool.
// - if the threadpool for this global_name already exists, then it is an
// error if the existing pool was created using a different num_threads
// value as is specified on this call.
// - threadpools created this way are never garbage collected.
GlobalName string `protobuf:"bytes,2,opt,name=global_name,json=globalName,proto3" json:"global_name,omitempty"`
}
func (m *ThreadPoolOptionProto) Reset() { *m = ThreadPoolOptionProto{} }
func (m *ThreadPoolOptionProto) String() string { return proto.CompactTextString(m) }
func (*ThreadPoolOptionProto) ProtoMessage() {}
func (*ThreadPoolOptionProto) Descriptor() ([]byte, []int) {
return fileDescriptor_3eaf2c85e69e9ea4, []int{3}
}
func (m *ThreadPoolOptionProto) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ThreadPoolOptionProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ThreadPoolOptionProto.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ThreadPoolOptionProto) XXX_Merge(src proto.Message) {
xxx_messageInfo_ThreadPoolOptionProto.Merge(m, src)
}
func (m *ThreadPoolOptionProto) XXX_Size() int {
return m.Size()
}
func (m *ThreadPoolOptionProto) XXX_DiscardUnknown() {
xxx_messageInfo_ThreadPoolOptionProto.DiscardUnknown(m)
}
var xxx_messageInfo_ThreadPoolOptionProto proto.InternalMessageInfo
func (m *ThreadPoolOptionProto) GetNumThreads() int32 {
if m != nil {
return m.NumThreads
}
return 0
}
func (m *ThreadPoolOptionProto) GetGlobalName() string {
if m != nil {
return m.GlobalName
}
return ""
}
type RPCOptions struct {
// If true, always use RPC to contact the session target.
//
// If false (the default option), TensorFlow may use an optimized
// transport for client-master communication that avoids the RPC
// stack. This option is primarily for used testing the RPC stack.
UseRpcForInprocessMaster bool `protobuf:"varint,1,opt,name=use_rpc_for_inprocess_master,json=useRpcForInprocessMaster,proto3" json:"use_rpc_for_inprocess_master,omitempty"`
}
func (m *RPCOptions) Reset() { *m = RPCOptions{} }
func (m *RPCOptions) String() string { return proto.CompactTextString(m) }
func (*RPCOptions) ProtoMessage() {}
func (*RPCOptions) Descriptor() ([]byte, []int) {
return fileDescriptor_3eaf2c85e69e9ea4, []int{4}
}
func (m *RPCOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RPCOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_RPCOptions.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *RPCOptions) XXX_Merge(src proto.Message) {
xxx_messageInfo_RPCOptions.Merge(m, src)
}
func (m *RPCOptions) XXX_Size() int {
return m.Size()
}
func (m *RPCOptions) XXX_DiscardUnknown() {
xxx_messageInfo_RPCOptions.DiscardUnknown(m)
}
var xxx_messageInfo_RPCOptions proto.InternalMessageInfo
func (m *RPCOptions) GetUseRpcForInprocessMaster() bool {
if m != nil {
return m.UseRpcForInprocessMaster
}
return false
}
// Session configuration parameters.
// The system picks appropriate values for fields that are not set.
type ConfigProto struct {
// Map from device type name (e.g., "CPU" or "GPU" ) to maximum
// number of devices of that type to use. If a particular device
// type is not found in the map, the system picks an appropriate
// number.
DeviceCount map[string]int32 `protobuf:"bytes,1,rep,name=device_count,json=deviceCount,proto3" json:"device_count,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
// The execution of an individual op (for some op types) can be
// parallelized on a pool of intra_op_parallelism_threads.
// 0 means the system picks an appropriate number.
IntraOpParallelismThreads int32 `protobuf:"varint,2,opt,name=intra_op_parallelism_threads,json=intraOpParallelismThreads,proto3" json:"intra_op_parallelism_threads,omitempty"`
// Nodes that perform blocking operations are enqueued on a pool of
// inter_op_parallelism_threads available in each process.
//
// 0 means the system picks an appropriate number.
//
// Note that the first Session created in the process sets the
// number of threads for all future sessions unless use_per_session_threads is
// true or session_inter_op_thread_pool is configured.
InterOpParallelismThreads int32 `protobuf:"varint,5,opt,name=inter_op_parallelism_threads,json=interOpParallelismThreads,proto3" json:"inter_op_parallelism_threads,omitempty"`
// If true, use a new set of threads for this session rather than the global
// pool of threads. Only supported by direct sessions.
//
// If false, use the global threads created by the first session, or the
// per-session thread pools configured by session_inter_op_thread_pool.
//
// This option is deprecated. The same effect can be achieved by setting
// session_inter_op_thread_pool to have one element, whose num_threads equals
// inter_op_parallelism_threads.
UsePerSessionThreads bool `protobuf:"varint,9,opt,name=use_per_session_threads,json=usePerSessionThreads,proto3" json:"use_per_session_threads,omitempty"`
// This option is experimental - it may be replaced with a different mechanism
// in the future.
//
// Configures session thread pools. If this is configured, then RunOptions for
// a Run call can select the thread pool to use.
//
// The intended use is for when some session invocations need to run in a
// background pool limited to a small number of threads:
// - For example, a session may be configured to have one large pool (for
// regular compute) and one small pool (for periodic, low priority work);
// using the small pool is currently the mechanism for limiting the inter-op
// parallelism of the low priority work. Note that it does not limit the
// parallelism of work spawned by a single op kernel implementation.
// - Using this setting is normally not needed in training, but may help some
// serving use cases.
// - It is also generally recommended to set the global_name field of this
// proto, to avoid creating multiple large pools. It is typically better to
// run the non-low-priority work, even across sessions, in a single large
// pool.
SessionInterOpThreadPool []*ThreadPoolOptionProto `protobuf:"bytes,12,rep,name=session_inter_op_thread_pool,json=sessionInterOpThreadPool,proto3" json:"session_inter_op_thread_pool,omitempty"`
// Assignment of Nodes to Devices is recomputed every placement_period
// steps until the system warms up (at which point the recomputation
// typically slows down automatically).
PlacementPeriod int32 `protobuf:"varint,3,opt,name=placement_period,json=placementPeriod,proto3" json:"placement_period,omitempty"`
// When any filters are present sessions will ignore all devices which do not
// match the filters. Each filter can be partially specified, e.g. "/job:ps"
// "/job:worker/replica:3", etc.
DeviceFilters []string `protobuf:"bytes,4,rep,name=device_filters,json=deviceFilters,proto3" json:"device_filters,omitempty"`
// Options that apply to all GPUs.
GpuOptions *GPUOptions `protobuf:"bytes,6,opt,name=gpu_options,json=gpuOptions,proto3" json:"gpu_options,omitempty"`
// Whether soft placement is allowed. If allow_soft_placement is true,
// an op will be placed on CPU if
// 1. there's no GPU implementation for the OP
// or
// 2. no GPU devices are known or registered
// or
// 3. need to co-locate with reftype input(s) which are from CPU.
AllowSoftPlacement bool `protobuf:"varint,7,opt,name=allow_soft_placement,json=allowSoftPlacement,proto3" json:"allow_soft_placement,omitempty"`
// Whether device placements should be logged.
LogDevicePlacement bool `protobuf:"varint,8,opt,name=log_device_placement,json=logDevicePlacement,proto3" json:"log_device_placement,omitempty"`
// Options that apply to all graphs.
GraphOptions *GraphOptions `protobuf:"bytes,10,opt,name=graph_options,json=graphOptions,proto3" json:"graph_options,omitempty"`
// Global timeout for all blocking operations in this session. If non-zero,
// and not overridden on a per-operation basis, this value will be used as the
// deadline for all blocking operations.
OperationTimeoutInMs int64 `protobuf:"varint,11,opt,name=operation_timeout_in_ms,json=operationTimeoutInMs,proto3" json:"operation_timeout_in_ms,omitempty"`
// Options that apply when this session uses the distributed runtime.
RpcOptions *RPCOptions `protobuf:"bytes,13,opt,name=rpc_options,json=rpcOptions,proto3" json:"rpc_options,omitempty"`
// Optional list of all workers to use in this session.
ClusterDef *ClusterDef `protobuf:"bytes,14,opt,name=cluster_def,json=clusterDef,proto3" json:"cluster_def,omitempty"`
// If true, any resources such as Variables used in the session will not be
// shared with other sessions.
IsolateSessionState bool `protobuf:"varint,15,opt,name=isolate_session_state,json=isolateSessionState,proto3" json:"isolate_session_state,omitempty"`
Experimental *ConfigProto_Experimental `protobuf:"bytes,16,opt,name=experimental,proto3" json:"experimental,omitempty"`
}
func (m *ConfigProto) Reset() { *m = ConfigProto{} }
func (m *ConfigProto) String() string { return proto.CompactTextString(m) }
func (*ConfigProto) ProtoMessage() {}
func (*ConfigProto) Descriptor() ([]byte, []int) {
return fileDescriptor_3eaf2c85e69e9ea4, []int{5}
}
func (m *ConfigProto) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ConfigProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ConfigProto.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ConfigProto) XXX_Merge(src proto.Message) {
xxx_messageInfo_ConfigProto.Merge(m, src)
}
func (m *ConfigProto) XXX_Size() int {
return m.Size()
}
func (m *ConfigProto) XXX_DiscardUnknown() {
xxx_messageInfo_ConfigProto.DiscardUnknown(m)
}
var xxx_messageInfo_ConfigProto proto.InternalMessageInfo
func (m *ConfigProto) GetDeviceCount() map[string]int32 {
if m != nil {
return m.DeviceCount
}
return nil
}
func (m *ConfigProto) GetIntraOpParallelismThreads() int32 {
if m != nil {
return m.IntraOpParallelismThreads
}
return 0
}
func (m *ConfigProto) GetInterOpParallelismThreads() int32 {
if m != nil {
return m.InterOpParallelismThreads
}
return 0
}
func (m *ConfigProto) GetUsePerSessionThreads() bool {
if m != nil {
return m.UsePerSessionThreads
}
return false
}
func (m *ConfigProto) GetSessionInterOpThreadPool() []*ThreadPoolOptionProto {
if m != nil {
return m.SessionInterOpThreadPool
}
return nil
}
func (m *ConfigProto) GetPlacementPeriod() int32 {
if m != nil {
return m.PlacementPeriod
}
return 0
}
func (m *ConfigProto) GetDeviceFilters() []string {
if m != nil {
return m.DeviceFilters
}
return nil
}
func (m *ConfigProto) GetGpuOptions() *GPUOptions {
if m != nil {
return m.GpuOptions
}
return nil
}
func (m *ConfigProto) GetAllowSoftPlacement() bool {
if m != nil {
return m.AllowSoftPlacement
}
return false
}
func (m *ConfigProto) GetLogDevicePlacement() bool {
if m != nil {
return m.LogDevicePlacement
}
return false
}
func (m *ConfigProto) GetGraphOptions() *GraphOptions {
if m != nil {
return m.GraphOptions
}
return nil
}
func (m *ConfigProto) GetOperationTimeoutInMs() int64 {
if m != nil {
return m.OperationTimeoutInMs
}
return 0
}