musiccaps
1 row where aspect_list contains "christian rap", aspect_list contains "keyboard harmony" and aspect_list contains "male singer"
This data as json, CSV (advanced)
ytid ▼ | url | caption | aspect_list | audioset_names | author_id | start_s | end_s | is_balanced_subset | is_audioset_eval | audioset_ids |
---|---|---|---|---|---|---|---|---|---|---|
dwAo0dKCyBI | A male vocalist sings this rap. The tempo is medium with the keyboard harmony, digital drum rhythm and boomy bass. The song is passionate, youthful, emotional, buoyant, insightful, intense and story telling in a rhythmic patter. This song is contemporary Rap/Hip. | ["male singer", "medium tempo", "hip-hop", "rap", "insightful", "clean lyrics", "simple lyrics", "christian rap", "youthful", "teen rap", "bouncy", "punchy", "steady bass", "beat boxing", "keyboard harmony", "drum machine", "digital drum rhythm", "rhythmic patter", "rhythmic speech", "passionate", "emotional", "impactful", "intense"] | ["Afrobeat", "Christian music", "Music", "Rapping", "Reggae", "Rhythm and blues", "Hip hop music"] | 7 | 30 | 40 | 1 | 1 | ["/m/0145m", "/m/02mscn", "/m/04rlf", "/m/06bxc", "/m/06cqb", "/m/06j6l", "/m/0glt670"] |
Advanced export
JSON shape: default, array, newline-delimited, object
CREATE TABLE [musiccaps] ( [ytid] TEXT PRIMARY KEY, [url] TEXT, [caption] TEXT, [aspect_list] TEXT, [audioset_names] TEXT, [author_id] TEXT, [start_s] TEXT, [end_s] TEXT, [is_balanced_subset] INTEGER, [is_audioset_eval] INTEGER, [audioset_ids] TEXT );