Upload 4 files
Browse files
.gitattributes
CHANGED
|
@@ -56,3 +56,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 56 |
# Video files - compressed
|
| 57 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 58 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 56 |
# Video files - compressed
|
| 57 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 58 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 59 |
+
example_dataset/1.0.0/example_dataset-train.tfrecord-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
|
example_dataset/.DS_Store
ADDED
|
Binary file (6.15 kB). View file
|
|
|
example_dataset/1.0.0/dataset_info.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"citation": "// TODO(example_dataset): BibTeX citation",
|
| 3 |
+
"description": "TODO(example_dataset): Markdown description of your dataset.\nDescription is **formatted** as markdown.\n\nIt should also contain any processing which has been applied (if any),\n(e.g. corrupted example skipped, images cropped,...):",
|
| 4 |
+
"fileFormat": "tfrecord",
|
| 5 |
+
"moduleName": "rlds_np_rollout.rlds_np_rollout_dataset_builder",
|
| 6 |
+
"name": "example_dataset",
|
| 7 |
+
"releaseNotes": {
|
| 8 |
+
"1.0.0": "Initial release."
|
| 9 |
+
},
|
| 10 |
+
"splits": [
|
| 11 |
+
{
|
| 12 |
+
"filepathTemplate": "{DATASET}-{SPLIT}.{FILEFORMAT}-{SHARD_X_OF_Y}",
|
| 13 |
+
"name": "train",
|
| 14 |
+
"numBytes": "196209009",
|
| 15 |
+
"shardLengths": [
|
| 16 |
+
"1"
|
| 17 |
+
]
|
| 18 |
+
}
|
| 19 |
+
],
|
| 20 |
+
"version": "1.0.0"
|
| 21 |
+
}
|
example_dataset/1.0.0/example_dataset-train.tfrecord-00000-of-00001
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:34aa73ab0f005905907699464b2a9da4bc4d57a009e683bf1ecbbff4e1291fbf
|
| 3 |
+
size 196209025
|
example_dataset/1.0.0/features.json
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"featuresDict": {
|
| 3 |
+
"features": {
|
| 4 |
+
"episode_metadata": {
|
| 5 |
+
"featuresDict": {
|
| 6 |
+
"features": {
|
| 7 |
+
"file_path": {
|
| 8 |
+
"description": "Path to the original data file.",
|
| 9 |
+
"pythonClassName": "tensorflow_datasets.core.features.text_feature.Text",
|
| 10 |
+
"text": {}
|
| 11 |
+
}
|
| 12 |
+
}
|
| 13 |
+
},
|
| 14 |
+
"pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict"
|
| 15 |
+
},
|
| 16 |
+
"steps": {
|
| 17 |
+
"pythonClassName": "tensorflow_datasets.core.features.dataset_feature.Dataset",
|
| 18 |
+
"sequence": {
|
| 19 |
+
"feature": {
|
| 20 |
+
"featuresDict": {
|
| 21 |
+
"features": {
|
| 22 |
+
"action": {
|
| 23 |
+
"description": "Robot action, consists of [7x joint velocities, 2x gripper velocities, 1x terminate episode].",
|
| 24 |
+
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor",
|
| 25 |
+
"tensor": {
|
| 26 |
+
"dtype": "float32",
|
| 27 |
+
"encoding": "none",
|
| 28 |
+
"shape": {
|
| 29 |
+
"dimensions": [
|
| 30 |
+
"7"
|
| 31 |
+
]
|
| 32 |
+
}
|
| 33 |
+
}
|
| 34 |
+
},
|
| 35 |
+
"discount": {
|
| 36 |
+
"description": "Discount if provided, default to 1.",
|
| 37 |
+
"pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar",
|
| 38 |
+
"tensor": {
|
| 39 |
+
"dtype": "float32",
|
| 40 |
+
"encoding": "none",
|
| 41 |
+
"shape": {}
|
| 42 |
+
}
|
| 43 |
+
},
|
| 44 |
+
"is_first": {
|
| 45 |
+
"description": "True on first step of the episode.",
|
| 46 |
+
"pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar",
|
| 47 |
+
"tensor": {
|
| 48 |
+
"dtype": "bool",
|
| 49 |
+
"encoding": "none",
|
| 50 |
+
"shape": {}
|
| 51 |
+
}
|
| 52 |
+
},
|
| 53 |
+
"is_last": {
|
| 54 |
+
"description": "True on last step of the episode.",
|
| 55 |
+
"pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar",
|
| 56 |
+
"tensor": {
|
| 57 |
+
"dtype": "bool",
|
| 58 |
+
"encoding": "none",
|
| 59 |
+
"shape": {}
|
| 60 |
+
}
|
| 61 |
+
},
|
| 62 |
+
"is_terminal": {
|
| 63 |
+
"description": "True on last step of the episode if it is a terminal step, True for demos.",
|
| 64 |
+
"pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar",
|
| 65 |
+
"tensor": {
|
| 66 |
+
"dtype": "bool",
|
| 67 |
+
"encoding": "none",
|
| 68 |
+
"shape": {}
|
| 69 |
+
}
|
| 70 |
+
},
|
| 71 |
+
"language_embedding": {
|
| 72 |
+
"description": "Kona language embedding. See https://tfhub.dev/google/universal-sentence-encoder-large/5",
|
| 73 |
+
"pythonClassName": "tensorflow_datasets.core.features.tensor_feature.Tensor",
|
| 74 |
+
"tensor": {
|
| 75 |
+
"dtype": "float32",
|
| 76 |
+
"encoding": "none",
|
| 77 |
+
"shape": {
|
| 78 |
+
"dimensions": [
|
| 79 |
+
"512"
|
| 80 |
+
]
|
| 81 |
+
}
|
| 82 |
+
}
|
| 83 |
+
},
|
| 84 |
+
"language_instruction": {
|
| 85 |
+
"description": "Language Instruction.",
|
| 86 |
+
"pythonClassName": "tensorflow_datasets.core.features.text_feature.Text",
|
| 87 |
+
"text": {}
|
| 88 |
+
},
|
| 89 |
+
"observation": {
|
| 90 |
+
"featuresDict": {
|
| 91 |
+
"features": {
|
| 92 |
+
"image": {
|
| 93 |
+
"description": "Main camera RGB observation.",
|
| 94 |
+
"image": {
|
| 95 |
+
"dtype": "uint8",
|
| 96 |
+
"encodingFormat": "png",
|
| 97 |
+
"shape": {
|
| 98 |
+
"dimensions": [
|
| 99 |
+
"224",
|
| 100 |
+
"224",
|
| 101 |
+
"3"
|
| 102 |
+
]
|
| 103 |
+
}
|
| 104 |
+
},
|
| 105 |
+
"pythonClassName": "tensorflow_datasets.core.features.image_feature.Image"
|
| 106 |
+
},
|
| 107 |
+
"state": {
|
| 108 |
+
"description": "Robot state, consists of [7x robot joint angles, 2x gripper position, 1x door opening angle].",
|
| 109 |
+
"pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar",
|
| 110 |
+
"tensor": {
|
| 111 |
+
"dtype": "float32",
|
| 112 |
+
"encoding": "none",
|
| 113 |
+
"shape": {}
|
| 114 |
+
}
|
| 115 |
+
}
|
| 116 |
+
}
|
| 117 |
+
},
|
| 118 |
+
"pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict"
|
| 119 |
+
},
|
| 120 |
+
"reward": {
|
| 121 |
+
"description": "Reward if provided, 1 on final step for demos.",
|
| 122 |
+
"pythonClassName": "tensorflow_datasets.core.features.scalar.Scalar",
|
| 123 |
+
"tensor": {
|
| 124 |
+
"dtype": "float32",
|
| 125 |
+
"encoding": "none",
|
| 126 |
+
"shape": {}
|
| 127 |
+
}
|
| 128 |
+
}
|
| 129 |
+
}
|
| 130 |
+
},
|
| 131 |
+
"pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict"
|
| 132 |
+
},
|
| 133 |
+
"length": "-1"
|
| 134 |
+
}
|
| 135 |
+
}
|
| 136 |
+
}
|
| 137 |
+
},
|
| 138 |
+
"pythonClassName": "tensorflow_datasets.core.features.features_dict.FeaturesDict"
|
| 139 |
+
}
|