[Mesa-dev] [PATCH 3/4] glsl: add ir_cache class and functions for shader serialization
Tapani Pälli
tapani.palli at intel.com
Thu Oct 24 10:28:05 CEST 2013
Patch introduces ir_cache class that can serialize a gl_shader
to memory and back with help of memory_writer and memory_map
classes.
There are also utility functions to (un)serialize gl_shader and
gl_shader_program structures. This makes it possible to implement
a shader compiler cache for individual shaders and functionality
required by OES_get_program_binary extension.
Signed-off-by: Tapani Pälli <tapani.palli at intel.com>
---
src/glsl/Makefile.sources | 2 +
src/glsl/ir_cache.h | 691 +++++++++++++++++
src/glsl/ir_cache_serialize.cpp | 967 ++++++++++++++++++++++++
src/glsl/ir_cache_unserialize.cpp | 1508 +++++++++++++++++++++++++++++++++++++
4 files changed, 3168 insertions(+)
create mode 100644 src/glsl/ir_cache.h
create mode 100644 src/glsl/ir_cache_serialize.cpp
create mode 100644 src/glsl/ir_cache_unserialize.cpp
diff --git a/src/glsl/Makefile.sources b/src/glsl/Makefile.sources
index 2f7bfa1..1a3e72e 100644
--- a/src/glsl/Makefile.sources
+++ b/src/glsl/Makefile.sources
@@ -30,6 +30,8 @@ LIBGLSL_FILES = \
$(GLSL_SRCDIR)/hir_field_selection.cpp \
$(GLSL_SRCDIR)/ir_basic_block.cpp \
$(GLSL_SRCDIR)/ir_builder.cpp \
+ $(GLSL_SRCDIR)/ir_cache_serialize.cpp \
+ $(GLSL_SRCDIR)/ir_cache_unserialize.cpp \
$(GLSL_SRCDIR)/ir_clone.cpp \
$(GLSL_SRCDIR)/ir_constant_expression.cpp \
$(GLSL_SRCDIR)/ir.cpp \
diff --git a/src/glsl/ir_cache.h b/src/glsl/ir_cache.h
new file mode 100644
index 0000000..c5f2200
--- /dev/null
+++ b/src/glsl/ir_cache.h
@@ -0,0 +1,691 @@
+/* -*- c++ -*- */
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#pragma once
+#ifndef IR_CACHE_H
+#define IR_CACHE_H
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include "ir.h"
+#include "main/imports.h"
+#include "glsl_parser_extras.h"
+#include "program/hash_table.h"
+
+/* cache specific debug output */
+#ifdef SHADER_CACHE_DEBUG
+#define CACHE_DEBUG(fmt, args...) printf(fmt, ## args)
+#else
+#define CACHE_DEBUG(fmt, args...) do {} while (0)
+#endif
+
+/* C API for the cache */
+#ifdef __cplusplus
+#define _EXTC extern "C"
+#else
+#define _EXTC
+#endif
+
+_EXTC char *_mesa_shader_serialize(struct gl_shader *shader,
+ struct _mesa_glsl_parse_state *state,
+ const char *mesa_sha, size_t *size);
+
+_EXTC struct gl_shader *_mesa_shader_unserialize(void *mem_ctx,
+ void *blob, const char *mesa_sha, size_t size);
+
+_EXTC char *_mesa_program_serialize(struct gl_shader_program *prog,
+ size_t *size, const char *mesa_sha);
+
+_EXTC int _mesa_program_unserialize(struct gl_shader_program *prog,
+ const GLvoid *blob, size_t size, const char *mesa_sha);
+
+#ifdef __cplusplus
+
+
+/* helper class for writing data to memory */
+struct memory_writer
+{
+public:
+ memory_writer() :
+ memory(NULL),
+ memory_p(NULL),
+ curr_size(0),
+ pos(0)
+ { }
+
+ /* NOTE - there is no dtor to free memory, user is responsible */
+ void free_memory()
+ {
+ if (memory)
+ free(memory);
+ }
+
+ /* realloc more memory */
+ int grow(int32_t size)
+ {
+ char *more_mem = (char *) realloc(memory, curr_size + size);
+ if (more_mem == NULL) {
+ free(memory);
+ memory = NULL;
+ return -1;
+ } else {
+ memory = more_mem;
+ memory_p = memory + pos;
+ curr_size += size;
+ return 0;
+ }
+ }
+
+ /* write functions for different types */
+#define _WRITE_TYPE(type) inline int write(type *val) {\
+ return write(val, sizeof(*val), 1, -1);\
+}
+
+ _WRITE_TYPE(uint8_t)
+ _WRITE_TYPE(int32_t)
+ _WRITE_TYPE(uint32_t)
+ _WRITE_TYPE(bool)
+ _WRITE_TYPE(gl_texture_index)
+ _WRITE_TYPE(ir_node_type)
+ _WRITE_TYPE(ir_loop_jump::jump_mode)
+ _WRITE_TYPE(ir_expression_operation)
+ _WRITE_TYPE(GLbitfield64)
+
+ int write(const void *data, int32_t size, int32_t nmemb,
+ int32_t offset = -1)
+ {
+ int32_t amount = size * nmemb;
+ int32_t extra = 8192;
+
+ /* reallocate more if does not fix to current memory */
+ if (!memory || pos > (int32_t)(curr_size - amount))
+ if (grow(amount + extra))
+ return -1;
+
+ /**
+ * by default data is written to pos memory_p, however
+ * if an offset value >= 0 is passed then that value is
+ * used as offset from start of the memory blob
+ */
+ char *dst = memory_p;
+
+ if (offset > -1)
+ dst = memory + offset;
+
+ memcpy(dst, data, amount);
+
+ /* if no offset given, forward the pointer */
+ if (offset == -1) {
+ memory_p += amount;
+ pos += amount;
+ }
+ return 0;
+ }
+
+ int write_string(const char *str)
+ {
+ if (!str)
+ return -1;
+ uint32_t len = strlen(str);
+ write(&len);
+ write(str, 1, len);
+ return 0;
+ }
+
+ inline int32_t position() { return pos; }
+ inline char *mem() { return memory; }
+
+private:
+ char *memory;
+ char *memory_p;
+ int32_t curr_size;
+ int32_t pos;
+};
+
+
+/* helper for string serialization */
+struct string_data
+{
+public:
+ string_data() : len(0), data(NULL) {}
+ string_data(const char *cstr) :
+ data(NULL)
+ {
+ if (cstr) {
+ len = strlen(cstr);
+ data = _mesa_strdup(cstr);
+ }
+ }
+
+ ~string_data() {
+ if (data) {
+ free(data);
+ data = NULL;
+ }
+ }
+
+ int serialize(memory_writer &blob)
+ {
+ if (!data)
+ return -1;
+
+ blob.write(&len);
+ blob.write(data, 1, len);
+ return 0;
+ }
+
+ void set(const char *cstr)
+ {
+ if (data)
+ free(data);
+ data = _mesa_strdup(cstr);
+ len = strlen(cstr);
+ }
+
+ uint32_t len;
+ char *data;
+};
+
+
+/* data required to serialize glsl_type */
+struct glsl_type_data
+{
+public:
+ glsl_type_data() :
+ name(NULL),
+ element_type(NULL),
+ field_names(NULL),
+ field_types(NULL),
+ field_major(NULL) {}
+
+ glsl_type_data(const glsl_type *t) :
+ base_type(t->base_type),
+ length(t->length),
+ vector_elms(t->vector_elements),
+ matrix_cols(t->matrix_columns),
+ sampler_dimensionality(t->sampler_dimensionality),
+ sampler_shadow(t->sampler_shadow),
+ sampler_array(t->sampler_array),
+ sampler_type(t->sampler_type),
+ interface_packing(t->interface_packing),
+ element_type(NULL),
+ field_names(NULL),
+ field_types(NULL),
+ field_major(NULL)
+ {
+ name = new string_data(t->name);
+
+ /* for array, save element type information */
+ if (t->base_type == GLSL_TYPE_ARRAY)
+ element_type =
+ new glsl_type_data(t->element_type());
+
+ /* with structs, copy each struct field name + type */
+ else if (t->base_type == GLSL_TYPE_STRUCT) {
+
+ field_names = new string_data[t->length];
+ field_types = new glsl_type_data*[t->length];
+ field_major = new uint32_t[t->length];
+
+ glsl_struct_field *field = t->fields.structure;
+ glsl_type_data **field_t = field_types;
+ for (unsigned k = 0; k < t->length; k++, field++, field_t++) {
+ field_names[k].set(field->name);
+ *field_t = new glsl_type_data(field->type);
+ field_major[k] = field->row_major;
+ }
+ }
+ }
+
+ ~glsl_type_data() {
+ delete name;
+ delete element_type;
+ delete [] field_names;
+ delete [] field_major;
+ if (field_types) {
+ struct glsl_type_data **data = field_types;
+ for (int k = 0; k < length; k++, data++)
+ delete *data;
+ delete [] field_types;
+ }
+ }
+
+ int serialize(memory_writer &blob)
+ {
+ uint32_t ir_len = 666;
+ blob.write(&name->len);
+ blob.write(name->data, 1, name->len);
+
+ int32_t start_pos = blob.position();
+ blob.write(&ir_len);
+
+ blob.write(this, sizeof(*this), 1);
+
+ if (base_type == GLSL_TYPE_ARRAY)
+ element_type->serialize(blob);
+ else if (base_type == GLSL_TYPE_STRUCT) {
+ struct string_data *data = field_names;
+ glsl_type_data **field_t = field_types;
+ for (int k = 0; k < length; k++, data++, field_t++) {
+ data->serialize(blob);
+ (*field_t)->serialize(blob);
+ blob.write(&field_major[k]);
+ }
+ }
+
+ ir_len = blob.position() - start_pos - sizeof(ir_len);
+ blob.write(&ir_len, sizeof(ir_len), 1, start_pos);
+ return 0;
+ }
+
+ int32_t base_type;
+ int32_t length;
+ int32_t vector_elms;
+ int32_t matrix_cols;
+
+ uint32_t sampler_dimensionality;
+ uint32_t sampler_shadow;
+ uint32_t sampler_array;
+ uint32_t sampler_type;
+
+ uint32_t interface_packing;
+
+ struct string_data *name;
+
+ /* array element type */
+ struct glsl_type_data *element_type;
+
+ /* structure fields */
+ struct string_data *field_names;
+ struct glsl_type_data **field_types;
+ uint32_t *field_major;
+};
+
+
+/* helper to create a unique id from a ir_variable address */
+static uint32_t _unique_id(ir_variable *var)
+{
+ char buffer[256];
+ _mesa_snprintf(buffer, 256, "%s_%p", var->name, var);
+ return _mesa_str_checksum(buffer);
+}
+
+
+/* data required to serialize ir_variable */
+struct ir_cache_variable_data
+{
+public:
+ ir_cache_variable_data() :
+ type(NULL),
+ name(NULL),
+ unique_name(NULL),
+ state_slots(NULL) {}
+
+ ir_cache_variable_data(ir_variable *ir) :
+ unique_id(_unique_id(ir)),
+ max_array_access(ir->max_array_access),
+ ir_type(ir->ir_type),
+ mode(ir->mode),
+ location(ir->location),
+ read_only(ir->read_only),
+ centroid(ir->centroid),
+ invariant(ir->invariant),
+ interpolation(ir->interpolation),
+ origin_upper_left(ir->origin_upper_left),
+ pixel_center_integer(ir->pixel_center_integer),
+ explicit_location(ir->explicit_location),
+ explicit_index(ir->explicit_index),
+ explicit_binding(ir->explicit_binding),
+ has_initializer(ir->has_initializer),
+ depth_layout(ir->depth_layout),
+ location_frac(ir->location_frac),
+ num_state_slots(ir->num_state_slots),
+ has_constant_value(ir->constant_value ? 1 : 0),
+ has_constant_initializer(ir->constant_initializer ? 1 : 0)
+ {
+ name = new string_data(ir->name);
+
+ /* name can be NULL, see ir_print_visitor for explanation */
+ if (!ir->name)
+ name->set("parameter");
+
+ char uniq[256];
+ _mesa_snprintf(uniq, 256, "%s_%d", name->data, unique_id);
+ unique_name = new string_data(uniq);
+ type = new glsl_type_data(ir->type);
+
+ state_slots = (ir_state_slot *) malloc
+ (ir->num_state_slots * sizeof(ir->state_slots[0]));
+ memcpy(state_slots, ir->state_slots,
+ ir->num_state_slots * sizeof(ir->state_slots[0]));
+ }
+
+ ~ir_cache_variable_data()
+ {
+ delete name;
+ delete unique_name;
+ delete type;
+
+ if (state_slots)
+ free(state_slots);
+ }
+
+ int serialize(memory_writer &blob)
+ {
+ type->serialize(blob);
+ name->serialize(blob);
+ unique_name->serialize(blob);
+ blob.write(this, sizeof(*this), 1);
+
+ for (unsigned i = 0; i < num_state_slots; i++) {
+ blob.write(&state_slots[i].swizzle);
+ for (unsigned j = 0; j < 5; j++) {
+ blob.write(&state_slots[i].tokens[j]);
+ }
+ }
+ return 0;
+ }
+
+ struct glsl_type_data *type;
+ struct string_data *name;
+ struct string_data *unique_name;
+
+ uint32_t unique_id;
+
+ uint32_t max_array_access;
+ int32_t ir_type;
+ int32_t mode;
+ int32_t location;
+ uint32_t read_only;
+ uint32_t centroid;
+ uint32_t invariant;
+ uint32_t interpolation;
+ uint32_t origin_upper_left;
+ uint32_t pixel_center_integer;
+ uint32_t explicit_location;
+ uint32_t explicit_index;
+ uint32_t explicit_binding;
+ uint8_t has_initializer;
+ int32_t depth_layout;
+ uint32_t location_frac;
+
+ uint32_t num_state_slots;
+ struct ir_state_slot *state_slots;
+
+ uint8_t has_constant_value;
+ uint8_t has_constant_initializer;
+};
+
+
+/* helper class to read instructions */
+struct memory_map
+{
+public:
+ memory_map() :
+ fd(0),
+ cache_size(0),
+ cache_mmap(NULL),
+ cache_mmap_p(NULL) { }
+
+ /* read from disk */
+ int map(const char *path)
+ {
+ struct stat stat_info;
+ if (stat(path, &stat_info) != 0)
+ return -1;
+
+ cache_size = stat_info.st_size;
+
+ fd = open(path, O_RDONLY);
+ if (fd) {
+ cache_mmap_p = cache_mmap = (char *)
+ mmap(NULL, cache_size, PROT_READ, MAP_PRIVATE, fd, 0);
+ return (cache_mmap == MAP_FAILED) ? -1 : 0;
+ }
+ return -1;
+ }
+
+ /* read from memory */
+ int map(const void *memory, size_t size)
+ {
+ cache_mmap_p = cache_mmap = (char *) memory;
+ cache_size = size;
+ return 0;
+ }
+
+ ~memory_map() {
+ if (cache_mmap) {
+ munmap(cache_mmap, cache_size);
+ close(fd);
+ }
+ }
+
+ /* move read pointer forward */
+ inline void ffwd(int len)
+ {
+ cache_mmap_p += len;
+ }
+
+ /* position of read pointer */
+ inline uint32_t position()
+ {
+ return cache_mmap_p - cache_mmap;
+ }
+
+ inline void read_string(char *str)
+ {
+ uint32_t len;
+ read(&len);
+ memcpy(str, cache_mmap_p, len);
+ str[len] = '\0';
+ ffwd(len);
+ }
+
+ /* read functions for different types */
+#define _READ_TYPE(type) inline void read(type *val) {\
+ *val = *(type *) cache_mmap_p;\
+ ffwd(sizeof(type));\
+}
+ _READ_TYPE(int32_t)
+ _READ_TYPE(uint32_t)
+ _READ_TYPE(bool)
+ _READ_TYPE(GLboolean)
+ _READ_TYPE(gl_texture_index)
+ _READ_TYPE(ir_expression_operation)
+
+ inline void read(void *dst, size_t size)
+ {
+ memcpy(dst, cache_mmap_p, size);
+ ffwd(size);
+ }
+
+ /* read and serialize a gl_shader */
+ inline struct gl_shader *read_shader(void *mem_ctx,
+ const char *mesa_sha, size_t size)
+ {
+ struct gl_shader *sha = _mesa_shader_unserialize(mem_ctx,
+ cache_mmap_p, mesa_sha, size);
+ ffwd(size);
+ return sha;
+ }
+
+private:
+
+ int32_t fd;
+ int32_t cache_size;
+ char *cache_mmap;
+ char *cache_mmap_p;
+};
+
+
+/* class to read and write gl_shader */
+struct ir_cache
+{
+public:
+ ir_cache(bool prototypes = false) :
+ prototypes_only(prototypes)
+ {
+ var_ht = hash_table_ctor(0, hash_table_string_hash,
+ hash_table_string_compare);
+ }
+
+ ~ir_cache()
+ {
+ hash_table_call_foreach(this->var_ht, delete_key, NULL);
+ hash_table_dtor(this->var_ht);
+ }
+
+ /* serialize gl_shader to memory */
+ char *serialize(struct gl_shader *shader,
+ struct _mesa_glsl_parse_state *state,
+ const char *mesa_sha, size_t *size);
+
+ /* unserialize gl_shader from mapped memory */
+ struct gl_shader *unserialize(void *mem_ctx, memory_map &map,
+ uint32_t shader_size,
+ struct _mesa_glsl_parse_state *state,
+ const char *mesa_sha,
+ int *error_code);
+
+ enum cache_error {
+ GENERAL_READ_ERROR = -1,
+ DIFFERENT_MESA_SHA = -2,
+ DIFFERENT_LANG_VER = -3,
+ };
+
+ /**
+ * this method is public so that gl_shader_program
+ * unserialization can use it when reading the
+ * uniform storage
+ */
+ const glsl_type *read_glsl_type(memory_map &map,
+ struct _mesa_glsl_parse_state *state);
+
+private:
+
+ /* variables and methods required for serialization */
+
+ memory_writer blob;
+
+ bool prototypes_only;
+
+ /**
+ * writes ir_type and instruction dump size as a 'header'
+ * for each instruction before calling save_ir
+ */
+ int save(ir_instruction *ir);
+
+ int save_ir(ir_variable *ir);
+ int save_ir(ir_assignment *ir);
+ int save_ir(ir_call *ir);
+ int save_ir(ir_constant *ir);
+ int save_ir(ir_dereference_array *ir);
+ int save_ir(ir_dereference_record *ir);
+ int save_ir(ir_dereference_variable *ir);
+ int save_ir(ir_discard *ir);
+ int save_ir(ir_expression *ir);
+ int save_ir(ir_function *ir);
+ int save_ir(ir_function_signature *ir);
+ int save_ir(ir_if *ir);
+ int save_ir(ir_loop *ir);
+ int save_ir(ir_loop_jump *ir);
+ int save_ir(ir_return *ir);
+ int save_ir(ir_swizzle *ir);
+ int save_ir(ir_texture *ir);
+ int save_ir(ir_emit_vertex *ir);
+ int save_ir(ir_end_primitive *ir);
+
+
+ /* variables and methods required for unserialization */
+
+ struct _mesa_glsl_parse_state *state;
+ void *mem_ctx;
+
+ struct exec_list *top_level;
+ struct exec_list *prototypes;
+ struct exec_list *current_function;
+
+ int read_header(struct gl_shader *shader, memory_map &map,
+ const char *mesa_sha);
+ int read_prototypes(memory_map &map);
+
+ int read_instruction(struct exec_list *list, memory_map &map,
+ bool ignore = false);
+
+ int read_ir_variable(struct exec_list *list, memory_map &map);
+ int read_ir_assignment(struct exec_list *list, memory_map &map);
+ int read_ir_function(struct exec_list *list, memory_map &map);
+ int read_ir_if(struct exec_list *list, memory_map &map);
+ int read_ir_return(struct exec_list *list, memory_map &map);
+ int read_ir_call(struct exec_list *list, memory_map &map);
+ int read_ir_discard(struct exec_list *list, memory_map &map);
+ int read_ir_loop(struct exec_list *list, memory_map &map);
+ int read_ir_loop_jump(struct exec_list *list, memory_map &map);
+ int read_emit_vertex(struct exec_list *list, memory_map &map);
+ int read_end_primitive(struct exec_list *list, memory_map &map);
+
+ /* rvalue readers */
+ ir_rvalue *read_ir_rvalue(memory_map &map);
+ ir_constant *read_ir_constant(memory_map &map,
+ struct exec_list *list = NULL);
+ ir_swizzle *read_ir_swizzle(memory_map &map);
+ ir_texture *read_ir_texture(memory_map &map);
+ ir_expression *read_ir_expression(memory_map &map);
+ ir_dereference_array *read_ir_dereference_array(memory_map &map);
+ ir_dereference_record *read_ir_dereference_record(memory_map &map);
+ ir_dereference_variable *read_ir_dereference_variable(memory_map &map);
+
+ /**
+ * var_ht is used to store created ir_variables with a unique_key for
+ * each so that ir_dereference_variable creation can find the variable
+ */
+ struct hash_table *var_ht;
+
+ /**
+ * these 2 functions are ~copy-pasta from string_to_uint_map,
+ * we need a hash here with a string key and pointer value
+ */
+ void hash_store(void * value, const char *key)
+ {
+ char *dup_key = _mesa_strdup(key);
+ bool result = hash_table_replace(this->var_ht, value, dup_key);
+ if (result)
+ free(dup_key);
+ }
+
+ static void delete_key(const void *key, void *data, void *closure)
+ {
+ (void) data;
+ (void) closure;
+ free((char *)key);
+ }
+
+};
+#endif /* ifdef __cplusplus */
+
+#endif /* IR_CACHE_H */
diff --git a/src/glsl/ir_cache_serialize.cpp b/src/glsl/ir_cache_serialize.cpp
new file mode 100644
index 0000000..4da4386
--- /dev/null
+++ b/src/glsl/ir_cache_serialize.cpp
@@ -0,0 +1,967 @@
+/* -*- c++ -*- */
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "ir_cache.h"
+#include "ir_uniform.h"
+#include "main/macros.h"
+
+int
+ir_cache::save_ir(ir_variable *ir)
+{
+ ir_cache_variable_data *data = new ir_cache_variable_data(ir);
+
+ data->serialize(blob);
+
+ CACHE_DEBUG("save ir_variable [%s] id %d\n",
+ data->name->data, data->unique_id);
+
+ delete data;
+
+ if (ir->constant_value)
+ if (save(ir->constant_value))
+ return -1;
+
+ if (ir->constant_initializer)
+ if (save(ir->constant_initializer))
+ return -1;
+
+ uint8_t has_interface_type =
+ ir->is_interface_instance() ? 1 : 0;
+
+ blob.write(&has_interface_type);
+
+ if (has_interface_type) {
+ glsl_type_data *data = new glsl_type_data(ir->get_interface_type());
+ data->serialize(blob);
+ delete data;
+ }
+
+ return 0;
+}
+
+
+int
+ir_cache::save_ir(ir_dereference_array *ir)
+{
+ blob.write(&ir->array->ir_type);
+
+ save(ir->array);
+
+ blob.write(&ir->array_index->ir_type);
+
+ return save(ir->array_index);
+}
+
+
+int
+ir_cache::save_ir(ir_dereference_record *ir)
+{
+ blob.write_string(ir->field);
+ blob.write(&ir->record->ir_type);
+
+ return save(ir->record);
+}
+
+
+int
+ir_cache::save_ir(ir_dereference_variable *ir)
+{
+ blob.write_string(ir->var->name);
+ uint32_t unique_id = _unique_id(ir->var);
+ blob.write(&unique_id);
+
+ CACHE_DEBUG("save ir_dereference_variable [%s] id %d\n",
+ ir->var->name, unique_id);
+
+ return 0;
+}
+
+
+int
+ir_cache::save_ir(ir_constant *ir)
+{
+ glsl_type_data *data = new glsl_type_data(ir->type);
+ data->serialize(blob);
+
+ blob.write(&ir->value, sizeof(ir_constant_data), 1);
+
+ delete data;
+
+ if (ir->array_elements) {
+ for (unsigned i = 0; i < ir->type->length; i++)
+ if (save(ir->array_elements[i]))
+ return -1;
+ }
+
+ uint32_t components = 0;
+
+ /* struct constant, dump components exec_list */
+ if (!ir->components.is_empty()) {
+ foreach_iter(exec_list_iterator, iter, ir->components)
+ components++;
+ blob.write(&components);
+
+ foreach_iter(exec_list_iterator, iter, ir->components) {
+ ir_instruction *const inst = (ir_instruction *) iter.get();
+ if (save(inst))
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+
+int
+ir_cache::save_ir(ir_expression *ir)
+{
+ glsl_type_data *data = new glsl_type_data(ir->type);
+ int32_t num_operands = ir->get_num_operands();
+
+ data->serialize(blob);
+ delete data;
+
+ blob.write(&ir->operation);
+ blob.write(&num_operands);
+
+ /* operand ir_type below is written to make parsing easier */
+ for (unsigned i = 0; i < ir->get_num_operands(); i++) {
+ blob.write(&ir->operands[i]->ir_type);
+ if (save(ir->operands[i]))
+ return -1;
+ }
+ return 0;
+}
+
+
+int
+ir_cache::save_ir(ir_function *ir)
+{
+ uint32_t sig_amount = 0;
+
+ foreach_iter(exec_list_iterator, iter, *ir)
+ sig_amount++;
+
+ blob.write_string(ir->name);
+ blob.write(&sig_amount);
+
+ CACHE_DEBUG("save ir_function [%s], %d sigs\n", ir->name, sig_amount);
+
+ foreach_iter(exec_list_iterator, iter, *ir) {
+ ir_function_signature *const sig = (ir_function_signature *) iter.get();
+ if (save(sig))
+ return -1;
+ }
+
+ return 0;
+}
+
+
+int
+ir_cache::save_ir(ir_function_signature *ir)
+{
+ int32_t par_count = 0;
+ int32_t body_size = 0;
+ uint32_t is_builtin = ir->is_builtin();
+
+ foreach_iter(exec_list_iterator, iter, ir->parameters)
+ par_count++;
+
+ foreach_iter(exec_list_iterator, iter, ir->body)
+ body_size++;
+
+ CACHE_DEBUG("signature (%s), returns %d, params %d size %d (builtin %d)\n",
+ ir->function_name(), ir->return_type->base_type, par_count,
+ body_size, is_builtin);
+
+ blob.write(&par_count);
+ blob.write(&body_size);
+ blob.write(&is_builtin);
+
+ /* dump the return type of function */
+ glsl_type_data *data = new glsl_type_data(ir->return_type);
+ data->serialize(blob);
+ delete data;
+
+ /* function parameters */
+ foreach_iter(exec_list_iterator, iter, ir->parameters) {
+ ir_variable *const inst = (ir_variable *) iter.get();
+ CACHE_DEBUG(" parameter %s\n", inst->name);
+ if (save(inst))
+ return -1;
+ }
+
+ if (prototypes_only)
+ return 0;
+
+ /* function body */
+ foreach_iter(exec_list_iterator, iter, ir->body) {
+ ir_instruction *const inst = (ir_instruction *) iter.get();
+ CACHE_DEBUG(" body instruction node type %d\n", inst->ir_type);
+ if (save(inst))
+ return -1;
+ }
+
+ return 0;
+}
+
+
+int
+ir_cache::save_ir(ir_assignment *ir)
+{
+ uint32_t write_mask = ir->write_mask;
+
+ blob.write(&write_mask);
+
+ /* lhs (ir_deference_*) */
+ blob.write(&ir->lhs->ir_type);
+
+ blob.write_string(ir->lhs->variable_referenced()->name);
+
+ if (save(ir->lhs))
+ return -1;
+
+ if (ir->condition) {
+ CACHE_DEBUG("%s: assignment has condition, not supported", __func__);
+ }
+
+ /* rhs (constant, expression ...) */
+ blob.write(&ir->rhs->ir_type);
+
+ if (save(ir->rhs))
+ return -1;
+
+ return 0;
+}
+
+
+int
+ir_cache::save_ir(ir_return *ir)
+{
+ ir_rvalue *const value = ir->get_value();
+ uint8_t has_rvalue = value ? 1 : 0;
+
+ blob.write(&has_rvalue);
+
+ if (has_rvalue) {
+ blob.write(&value->ir_type);
+ return save(value);
+ }
+ return 0;
+}
+
+
+int
+ir_cache::save_ir(ir_swizzle *ir)
+{
+ uint32_t components = ir->mask.num_components;
+ const uint32_t mask[4] = {
+ ir->mask.x,
+ ir->mask.y,
+ ir->mask.z,
+ ir->mask.w
+ };
+
+ blob.write(&components);
+ blob.write(&mask, sizeof(mask[0]), 4);
+ blob.write(&ir->val->ir_type);
+
+ return save(ir->val);
+}
+
+
+int
+ir_cache::save_ir(ir_texture *ir)
+{
+ int32_t op = ir->op;
+ uint8_t has_coordinate = ir->coordinate ? 1 : 0;
+ uint8_t has_projector = ir->projector ? 1 : 0;
+ uint8_t has_shadow_comp = ir->shadow_comparitor ? 1 : 0;
+ uint8_t has_offset = ir->offset ? 1 : 0;
+
+ CACHE_DEBUG("save_ir_texture: op %d, coord %d proj %d shadow %d\n",
+ op, has_coordinate, has_projector, has_shadow_comp);
+
+ blob.write(&op);
+ blob.write(&has_coordinate);
+ blob.write(&has_projector);
+ blob.write(&has_shadow_comp);
+ blob.write(&has_offset);
+
+ glsl_type_data *data = new glsl_type_data(ir->type);
+ data->serialize(blob);
+ delete data;
+
+ /* sampler */
+ blob.write(&ir->sampler->ir_type);
+ if (save(ir->sampler))
+ return -1;
+
+ if (has_coordinate) {
+ blob.write(&ir->coordinate->ir_type);
+ if (save(ir->coordinate))
+ return -1;
+ }
+
+ if (has_projector) {
+ blob.write(&ir->projector->ir_type);
+ if (save(ir->projector))
+ return -1;
+ }
+
+ if (has_shadow_comp) {
+ blob.write(&ir->shadow_comparitor->ir_type);
+ if (save(ir->shadow_comparitor))
+ return -1;
+ }
+
+ if (has_offset) {
+ blob.write(&ir->offset->ir_type);
+ if (save(ir->offset))
+ return -1;
+ }
+
+ /* lod_info structure */
+ uint8_t has_lod = ir->lod_info.lod ? 1 : 0;
+ uint8_t has_bias = ir->lod_info.bias ? 1 : 0;
+ uint8_t has_sample_index = ir->lod_info.sample_index ? 1 : 0;
+ uint8_t has_component = ir->lod_info.component ? 1 : 0;
+ uint8_t has_dpdx = ir->lod_info.grad.dPdx ? 1 : 0;
+ uint8_t has_dpdy = ir->lod_info.grad.dPdy ? 1 : 0;
+
+ blob.write(&has_lod);
+ blob.write(&has_bias);
+ blob.write(&has_sample_index);
+ blob.write(&has_component);
+ blob.write(&has_dpdx);
+ blob.write(&has_dpdy);
+
+ if (has_lod)
+ if (save(ir->lod_info.lod))
+ return -1;
+ if (has_bias)
+ if (save(ir->lod_info.bias))
+ return -1;
+ if (has_sample_index)
+ if (save(ir->lod_info.sample_index))
+ return -1;
+ if (has_component)
+ if (save(ir->lod_info.component))
+ return -1;
+ if (has_dpdx)
+ if (save(ir->lod_info.grad.dPdx))
+ return -1;
+ if (has_dpdy)
+ if (save(ir->lod_info.grad.dPdy))
+ return -1;
+
+ return 0;
+}
+
+
+int
+ir_cache::save_ir(ir_discard *ir)
+{
+ uint8_t has_condition = ir->condition ? 1 : 0;
+ blob.write(&has_condition);
+
+ if (ir->condition != NULL) {
+ CACHE_DEBUG("%s: error, there is no cond support here yet...\n",
+ __func__);
+ if (save(ir->condition))
+ return -1;
+ }
+
+ return 0;
+}
+
+
+int
+ir_cache::save_ir(ir_call *ir)
+{
+ blob.write_string(ir->callee_name());
+
+ uint8_t has_return_deref = ir->return_deref ? 1 : 0;
+ uint8_t list_len = 0;
+ uint8_t use_builtin = ir->use_builtin;
+
+ blob.write(&has_return_deref);
+
+ if (ir->return_deref)
+ if (save(ir->return_deref))
+ return -1;
+
+ /* call parameter list */
+ foreach_iter(exec_list_iterator, iter, *ir)
+ list_len++;
+
+ blob.write(&list_len);
+
+ foreach_iter(exec_list_iterator, iter, *ir) {
+ ir_instruction *const inst = (ir_instruction *) iter.get();
+
+ int32_t ir_type = inst->ir_type;
+ blob.write(&ir_type);
+
+ if (save(inst))
+ return -1;
+ }
+
+ blob.write(&use_builtin);
+
+ return 0;
+}
+
+
+int
+ir_cache::save_ir(ir_if *ir)
+{
+ uint32_t then_len = 0, else_len = 0;
+
+ /* then and else branch lengths */
+ foreach_iter(exec_list_iterator, iter, ir->then_instructions)
+ then_len++;
+ foreach_iter(exec_list_iterator, iter, ir->else_instructions)
+ else_len++;
+
+ blob.write(&then_len);
+ blob.write(&else_len);
+ blob.write(&ir->condition->ir_type);
+
+ CACHE_DEBUG("dump ir_if (then %d else %d), condition ir_type %d\n",
+ then_len, else_len, ir->condition->ir_type);
+
+ save(ir->condition);
+
+ /* dump branch instruction lists */
+ foreach_iter(exec_list_iterator, iter, ir->then_instructions) {
+ ir_instruction *const inst = (ir_instruction *) iter.get();
+ CACHE_DEBUG(" ir_if then instruction node type %d\n", inst->ir_type);
+ if (save(inst))
+ return -1;
+ }
+
+ foreach_iter(exec_list_iterator, iter, ir->else_instructions) {
+ ir_instruction *const inst = (ir_instruction *) iter.get();
+ CACHE_DEBUG(" ir_if else instruction node type %d\n", inst->ir_type);
+ if (save(inst))
+ return -1;
+ }
+
+ return 0;
+}
+
+
+int
+ir_cache::save_ir(ir_loop *ir)
+{
+ uint8_t has_counter = ir->counter ? 1 : 0;
+ uint8_t has_from = ir->from ? 1 : 0;
+ uint8_t has_to = ir->to ? 1 : 0;
+ uint8_t has_incr = ir->increment ? 1 : 0;
+ uint32_t body_size = 0;
+
+ foreach_iter(exec_list_iterator, iter, ir->body_instructions)
+ body_size++;
+
+ blob.write(&has_from);
+ blob.write(&has_to);
+ blob.write(&has_incr);
+ blob.write(&has_counter);
+ blob.write(&ir->cmp);
+ blob.write(&body_size);
+
+ if (has_from) {
+ blob.write(&ir->from->ir_type);
+ if (save(ir->from))
+ return -1;
+ }
+
+ if (has_to) {
+ blob.write(&ir->to->ir_type);
+ if (save(ir->to))
+ return -1;
+ }
+
+ if (has_incr) {
+ blob.write(&ir->increment->ir_type);
+ if (save(ir->increment))
+ return -1;
+ }
+
+ if (has_counter) {
+ blob.write_string(ir->counter->name);
+ if (save(ir->counter))
+ return -1;
+ }
+
+ foreach_iter(exec_list_iterator, iter, ir->body_instructions) {
+ ir_instruction *const inst = (ir_instruction *) iter.get();
+ CACHE_DEBUG("save loop instruction type %d\n", inst->ir_type);
+ if (save(inst))
+ return -1;
+ }
+
+ return 0;
+}
+
+
+int ir_cache::save_ir(ir_loop_jump *ir)
+{
+ return blob.write(&ir->mode);
+}
+
+
+int
+ir_cache::save_ir(ir_emit_vertex *ir)
+{
+ return 0;
+}
+
+
+int
+ir_cache::save_ir(ir_end_primitive *ir)
+{
+ return 0;
+}
+
+
+/**
+ * writes instruction type, packet size and calls
+ * save function for the instruction to save the data
+ */
+int
+ir_cache::save(ir_instruction *ir)
+{
+ uint32_t ir_len = 666;
+
+ blob.write(&ir->ir_type);
+
+ int32_t start_pos = blob.position();
+
+ blob.write(&ir_len);
+
+#define SAVE_IR(type)\
+ if (save_ir(static_cast<type *>(ir))) goto write_errors;
+
+ switch(ir->ir_type) {
+
+ case ir_type_variable:
+ SAVE_IR(ir_variable);
+ break;
+ case ir_type_call:
+ SAVE_IR(ir_call);
+ break;
+ case ir_type_constant:
+ SAVE_IR(ir_constant);
+ break;
+ case ir_type_discard:
+ SAVE_IR(ir_discard);
+ break;
+ case ir_type_expression:
+ SAVE_IR(ir_expression);
+ break;
+ case ir_type_dereference_array:
+ SAVE_IR(ir_dereference_array);
+ break;
+ case ir_type_dereference_record:
+ SAVE_IR(ir_dereference_record);
+ break;
+ case ir_type_dereference_variable:
+ SAVE_IR(ir_dereference_variable);
+ break;
+ case ir_type_function:
+ SAVE_IR(ir_function);
+ break;
+ case ir_type_function_signature:
+ SAVE_IR(ir_function_signature);
+ break;
+ case ir_type_swizzle:
+ SAVE_IR(ir_swizzle);
+ break;
+ case ir_type_texture:
+ SAVE_IR(ir_texture);
+ break;
+ case ir_type_assignment:
+ SAVE_IR(ir_assignment);
+ break;
+ case ir_type_if:
+ SAVE_IR(ir_if);
+ break;
+ case ir_type_loop:
+ SAVE_IR(ir_loop);
+ break;
+ case ir_type_loop_jump:
+ SAVE_IR(ir_loop_jump);
+ break;
+ case ir_type_return:
+ SAVE_IR(ir_return);
+ break;
+ case ir_type_emit_vertex:
+ SAVE_IR(ir_emit_vertex);
+ break;
+ case ir_type_end_primitive:
+ SAVE_IR(ir_end_primitive);
+ break;
+
+ default:
+ CACHE_DEBUG("%s: error, type %d not implemented\n",
+ __func__, ir->ir_type);
+ return -1;
+ }
+
+ ir_len = blob.position() - start_pos - sizeof(ir_len);
+
+ blob.write(&ir_len, sizeof(ir_len), 1, start_pos);
+
+ return 0;
+
+write_errors:
+ CACHE_DEBUG("%s: write errors (ir type %d)\n", __func__, ir->ir_type);
+ return -1;
+}
+
+
+static void
+_write_header(gl_shader *shader, const char *mesa_sha, memory_writer &blob)
+{
+ GET_CURRENT_CONTEXT(ctx);
+
+ blob.write_string(mesa_sha);
+ blob.write_string((const char *)ctx->Driver.GetString(ctx, GL_VENDOR));
+ blob.write_string((const char *)ctx->Driver.GetString(ctx, GL_RENDERER));
+ blob.write(&shader->Version);
+ blob.write(&shader->Type);
+ blob.write(&shader->IsES);
+
+ /* post-link data */
+ blob.write(&shader->num_samplers);
+ blob.write(&shader->active_samplers);
+ blob.write(&shader->shadow_samplers);
+ blob.write(&shader->num_uniform_components);
+ blob.write(&shader->num_combined_uniform_components);
+
+ for (unsigned i = 0; i < MAX_SAMPLERS; i++)
+ blob.write(&shader->SamplerUnits[i]);
+
+ for (unsigned i = 0; i < MAX_SAMPLERS; i++)
+ blob.write(&shader->SamplerTargets[i]);
+}
+
+
+static void
+_dump_bool(bool value, memory_writer &blob)
+{
+ uint8_t val = value;
+ blob.write(&val);
+}
+
+
+/**
+ * some of the state such as extension bits is required from
+ * the preprocessing stage, this is when caching unlinked shaders
+ */
+static void
+_write_state(struct _mesa_glsl_parse_state *state, memory_writer &blob)
+{
+ blob.write(&state->language_version);
+
+ _dump_bool(state->ARB_draw_buffers_enable, blob);
+ _dump_bool(state->ARB_draw_buffers_warn, blob);
+ _dump_bool(state->ARB_draw_instanced_enable, blob);
+ _dump_bool(state->ARB_draw_instanced_warn, blob);
+ _dump_bool(state->ARB_explicit_attrib_location_enable, blob);
+ _dump_bool(state->ARB_explicit_attrib_location_warn, blob);
+ _dump_bool(state->ARB_fragment_coord_conventions_enable, blob);
+ _dump_bool(state->ARB_fragment_coord_conventions_warn, blob);
+ _dump_bool(state->ARB_texture_rectangle_enable, blob);
+ _dump_bool(state->ARB_texture_rectangle_warn, blob);
+ _dump_bool(state->EXT_texture_array_enable, blob);
+ _dump_bool(state->EXT_texture_array_warn, blob);
+ _dump_bool(state->ARB_shader_texture_lod_enable, blob);
+ _dump_bool(state->ARB_shader_texture_lod_warn, blob);
+ _dump_bool(state->ARB_shader_stencil_export_enable, blob);
+ _dump_bool(state->ARB_shader_stencil_export_warn, blob);
+ _dump_bool(state->AMD_conservative_depth_enable, blob);
+ _dump_bool(state->AMD_conservative_depth_warn, blob);
+ _dump_bool(state->ARB_conservative_depth_enable, blob);
+ _dump_bool(state->ARB_conservative_depth_warn, blob);
+ _dump_bool(state->AMD_shader_stencil_export_enable, blob);
+ _dump_bool(state->AMD_shader_stencil_export_warn, blob);
+ _dump_bool(state->OES_texture_3D_enable, blob);
+ _dump_bool(state->OES_texture_3D_warn, blob);
+ _dump_bool(state->OES_EGL_image_external_enable, blob);
+ _dump_bool(state->OES_EGL_image_external_warn, blob);
+ _dump_bool(state->ARB_shader_bit_encoding_enable, blob);
+ _dump_bool(state->ARB_shader_bit_encoding_warn, blob);
+ _dump_bool(state->ARB_uniform_buffer_object_enable, blob);
+ _dump_bool(state->ARB_uniform_buffer_object_warn, blob);
+ _dump_bool(state->OES_standard_derivatives_enable, blob);
+ _dump_bool(state->OES_standard_derivatives_warn, blob);
+ _dump_bool(state->ARB_texture_cube_map_array_enable, blob);
+ _dump_bool(state->ARB_texture_cube_map_array_warn, blob);
+ _dump_bool(state->ARB_shading_language_packing_enable, blob);
+ _dump_bool(state->ARB_shading_language_packing_warn, blob);
+ _dump_bool(state->ARB_texture_multisample_enable, blob);
+ _dump_bool(state->ARB_texture_multisample_warn, blob);
+ _dump_bool(state->ARB_texture_query_lod_enable, blob);
+ _dump_bool(state->ARB_texture_query_lod_warn, blob);
+ _dump_bool(state->ARB_gpu_shader5_enable, blob);
+ _dump_bool(state->ARB_gpu_shader5_warn, blob);
+ _dump_bool(state->AMD_vertex_shader_layer_enable, blob);
+ _dump_bool(state->AMD_vertex_shader_layer_warn, blob);
+ _dump_bool(state->ARB_shading_language_420pack_enable, blob);
+ _dump_bool(state->ARB_shading_language_420pack_warn, blob);
+ _dump_bool(state->EXT_shader_integer_mix_enable, blob);
+ _dump_bool(state->EXT_shader_integer_mix_warn, blob);
+}
+
+
+/**
+ * serializes a single gl_shader, writes shader header
+ * information and exec_list of instructions
+ */
+char *
+ir_cache::serialize(struct gl_shader *shader,
+ struct _mesa_glsl_parse_state *state,
+ const char *mesa_sha, size_t *size)
+{
+ uint32_t total = 0;
+
+ prototypes_only = true;
+
+ *size = 0;
+
+ int32_t start_pos = blob.position();
+ uint32_t shader_data_len = 666;
+ uint32_t shader_type = shader->Type;
+
+ blob.write(&shader_data_len);
+ blob.write(&shader_type);
+
+ _write_header(shader, mesa_sha, blob);
+
+ if (state)
+ _write_state(state, blob);
+
+ /* count variables + functions and dump prototypes */
+ foreach_list_const(node, shader->ir) {
+ if (((ir_instruction *) node)->as_variable())
+ total++;
+ if (((ir_instruction *) node)->as_function())
+ total++;
+ }
+
+ blob.write(&total);
+
+ CACHE_DEBUG("write %d prototypes\n", total);
+
+ foreach_list_const(node, shader->ir) {
+ ir_instruction *const inst = (ir_instruction *) node;
+ if (inst->as_variable())
+ if (save(inst))
+ goto write_errors;
+ }
+
+ foreach_list_const(node, shader->ir) {
+ ir_instruction *const inst = (ir_instruction *) node;
+ if (inst->as_function())
+ if (save(inst))
+ goto write_errors;
+ }
+
+ /* all shader instructions */
+ prototypes_only = false;
+ foreach_list_const(node, shader->ir) {
+ ir_instruction *instruction = (ir_instruction *) node;
+ if (save(instruction))
+ goto write_errors;
+ }
+
+ CACHE_DEBUG("cached a shader\n");
+
+ /* how much has been written */
+ *size = blob.position();
+
+ shader_data_len = blob.position() -
+ start_pos - sizeof(shader_data_len);
+ blob.write(&shader_data_len, sizeof(shader_data_len), 1, start_pos);
+
+ return blob.mem();
+
+write_errors:
+
+ blob.free_memory();
+ return NULL;
+}
+
+
+extern "C" char *
+_mesa_shader_serialize(struct gl_shader *shader,
+ struct _mesa_glsl_parse_state *state,
+ const char *mesa_sha, size_t *size)
+{
+ ir_cache cache;
+ return cache.serialize(shader, state, mesa_sha, size);
+}
+
+
+static void
+calc_item(const void *key, void *data, void *closure)
+{
+ unsigned *sz = (unsigned *) closure;
+ *sz = *sz + 1;
+}
+
+
+static unsigned
+_hash_table_size(struct string_to_uint_map *map)
+{
+ unsigned size = 0;
+ map->iterate(calc_item, &size);
+ return size;
+}
+
+
+static void
+serialize_item(const void *key, void *data, void *closure)
+{
+ memory_writer *blob = (memory_writer *) closure;
+ unsigned value = ((intptr_t)data);
+
+ blob->write_string((char *)key);
+ blob->write(&value);
+}
+
+
+static void
+_serialize_hash_table(struct string_to_uint_map *map, memory_writer *blob)
+{
+ unsigned size = _hash_table_size(map);
+ blob->write(&size);
+ map->iterate(serialize_item, blob);
+}
+
+
+static void
+_serialize_uniform_storage(gl_uniform_storage *uni, memory_writer &blob)
+{
+ blob.write_string(uni->name);
+
+ glsl_type_data *data = new glsl_type_data(uni->type);
+ data->serialize(blob);
+ delete data;
+
+ blob.write(&uni->array_elements);
+ blob.write(&uni->initialized);
+ blob.write(&uni->block_index);
+ blob.write(&uni->offset);
+ blob.write(&uni->matrix_stride);
+ blob.write(&uni->row_major);
+
+ for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
+ blob.write(&uni->sampler[i].index);
+ blob.write(&uni->sampler[i].active);
+ }
+
+ const unsigned elements = MAX2(1, uni->array_elements);
+ const unsigned data_components = elements * uni->type->components();
+ uint32_t size = elements * MAX2(1, data_components);
+
+ CACHE_DEBUG("%s: size %ld\n", __func__,
+ size * sizeof(union gl_constant_value));
+
+ blob.write(&size);
+ blob.write(uni->storage, sizeof(union gl_constant_value), size);
+}
+
+
+extern "C" char *
+_mesa_program_serialize(struct gl_shader_program *prog, size_t *size,
+ const char *mesa_sha)
+{
+ memory_writer blob;
+
+ blob.write(&prog->Type);
+ blob.write(&prog->NumShaders);
+ blob.write(&prog->LinkStatus);
+ blob.write(&prog->Version);
+ blob.write(&prog->IsES);
+ blob.write(&prog->NumUserUniformStorage);
+ blob.write(&prog->UniformLocationBaseScale);
+
+ /* hash tables */
+ _serialize_hash_table(prog->AttributeBindings, &blob);
+ _serialize_hash_table(prog->FragDataBindings, &blob);
+ _serialize_hash_table(prog->FragDataIndexBindings, &blob);
+ _serialize_hash_table(prog->UniformHash, &blob);
+
+ /* uniform storage */
+ if (prog->UniformStorage) {
+ for (unsigned i = 0; i < prog->NumUserUniformStorage; ++i)
+ _serialize_uniform_storage(&prog->UniformStorage[i], blob);
+ }
+
+ /* Shaders IR, to be decided if we want these to be available */
+#if 0
+ for (unsigned i = 0; i < prog->NumShaders; i++) {
+ size_t sha_size = 0;
+ char *data = _mesa_shader_serialize(prog->Shaders[i],
+ NULL, mesa_sha, &sha_size);
+
+ if (data) {
+ blob.write(data, sha_size, 1);
+ free(data);
+ }
+ }
+#endif
+
+ /* _LinkedShaders IR */
+ for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
+ size_t sha_size = 0;
+
+ if (!prog->_LinkedShaders[i])
+ continue;
+
+ char *data = _mesa_shader_serialize(prog->_LinkedShaders[i],
+ NULL, mesa_sha, &sha_size);
+
+ if (!data) {
+ CACHE_DEBUG("error serializing data for index %d\n", i);
+ free(blob.mem());
+ return NULL;
+ }
+
+ /* index in _LinkedShaders list + shader blob */
+ if (data) {
+ blob.write(&i);
+ blob.write(data, sha_size, 1);
+ free(data);
+ }
+ }
+
+ *size = blob.position();
+ return blob.mem();
+}
+
diff --git a/src/glsl/ir_cache_unserialize.cpp b/src/glsl/ir_cache_unserialize.cpp
new file mode 100644
index 0000000..8591e43
--- /dev/null
+++ b/src/glsl/ir_cache_unserialize.cpp
@@ -0,0 +1,1508 @@
+/* -*- c++ -*- */
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "ir_cache.h"
+#include "linker.h"
+#include "ir_uniform.h"
+#include "main/macros.h"
+#include "main/uniforms.h"
+#include "main/shaderobj.h"
+#include "main/shaderapi.h"
+#include "program/program.h"
+
+static ir_variable *
+search_var(struct exec_list *list, const char *name)
+{
+ foreach_list_safe(node, list) {
+ ir_variable *var = ((ir_instruction *) node)->as_variable();
+ if (var && strstr(name, var->name))
+ return var;
+ }
+ return NULL;
+}
+
+
+static ir_function *
+search_func(struct _mesa_glsl_parse_state *state, struct exec_list *list,
+ const char *name, struct exec_list *parameters)
+{
+ foreach_list_safe(node, list) {
+ ir_function *func = ((ir_instruction *) node)->as_function();
+ if (func && strstr(name, func->name) &&
+ func->matching_signature(state, parameters))
+ return func;
+ }
+ return NULL;
+}
+
+
+/**
+ * main purpose of the header is that it validates that cached
+ * shader was produced with the same Mesa drivers
+ */
+int
+ir_cache::read_header(struct gl_shader *shader, memory_map &map,
+ const char *mesa_sha)
+{
+ char cache_mesa_sha[256];
+ char driver_vendor[256];
+ char driver_renderer[256];
+
+ map.read_string(cache_mesa_sha);
+ map.read_string(driver_vendor);
+ map.read_string(driver_renderer);
+
+ map.read(&shader->Version);
+ map.read(&shader->Type);
+ map.read(&shader->IsES);
+
+ CACHE_DEBUG("%s: version %d, type 0x%x, %s (mesa %s)\n",
+ __func__, shader->Version, shader->Type,
+ (shader->IsES) ? "glsl es" : "desktop glsl",
+ cache_mesa_sha);
+
+ int error = memcmp(cache_mesa_sha, mesa_sha, strlen(mesa_sha));
+ if (error)
+ return error;
+
+ /* post-link data */
+ map.read(&shader->num_samplers);
+ map.read(&shader->active_samplers);
+ map.read(&shader->shadow_samplers);
+ map.read(&shader->num_uniform_components);
+ map.read(&shader->num_combined_uniform_components);
+
+ for (unsigned i = 0; i < MAX_SAMPLERS; i++)
+ map.read(&shader->SamplerUnits[i]);
+
+ for (unsigned i = 0; i < MAX_SAMPLERS; i++)
+ map.read(&shader->SamplerTargets[i]);
+
+ return 0;
+}
+
+
+const glsl_type *
+ir_cache::read_glsl_type(memory_map &map,
+ struct _mesa_glsl_parse_state *_state)
+{
+ char name[256];
+ uint32_t type_size;
+
+ map.read_string(name);
+ map.read(&type_size);
+
+ const glsl_type *type_exists = NULL;
+
+ if (_state && _state->symbols)
+ type_exists = _state->symbols->get_type(name);
+
+ /* if type exists, move read pointer forward and return type */
+ if (type_exists) {
+ map.ffwd(type_size);
+ return type_exists;
+ }
+
+ glsl_type_data data;
+
+ map.read(&data, sizeof(data));
+
+ data.name = NULL;
+ data.element_type = NULL;
+ data.field_names = NULL;
+ data.field_types = NULL;
+ data.field_major = NULL;
+
+ if (data.base_type == GLSL_TYPE_SAMPLER) {
+
+ switch(data.sampler_dimensionality) {
+ case 0:
+ return glsl_type::sampler1D_type;
+ case 1:
+ return glsl_type::sampler2D_type;
+ case 2:
+ return glsl_type::sampler3D_type;
+ case 3:
+ return glsl_type::samplerCube_type;
+ default:
+ CACHE_DEBUG("%s: unknown sampler type (dim %d)\n",
+ __func__, data.sampler_dimensionality);
+ }
+ }
+
+ /* array type has additional element_type information */
+ if (data.base_type == GLSL_TYPE_ARRAY) {
+ const glsl_type *element_type = read_glsl_type(map, state);
+ if (!element_type) {
+ CACHE_DEBUG("error reading array element type\n");
+ return NULL;
+ }
+ return glsl_type::get_array_instance(element_type, data.length);
+ }
+
+ /* structures have fields containing of names and types */
+ else if (data.base_type == GLSL_TYPE_STRUCT) {
+ glsl_struct_field *fields = ralloc_array(mem_ctx,
+ glsl_struct_field, data.length);
+ for (int k = 0; k < data.length; k++) {
+ char field_name[256];
+ uint32_t row_major;
+ map.read_string(field_name);
+ fields[k].name = _mesa_strdup(field_name);
+ fields[k].type = read_glsl_type(map, state);
+ map.read(&row_major);
+ fields[k].row_major = row_major;
+ }
+ const glsl_type *struct_t =
+ glsl_type::get_record_instance(fields, data.length, name);
+
+ /* free allocated memory */
+ for (int k = 0; k < data.length; k++)
+ free((void *)fields[k].name);
+ ralloc_free(fields);
+
+ return struct_t;
+ }
+
+ return glsl_type::get_instance(data.base_type,
+ data.vector_elms, data.matrix_cols);
+}
+
+
+int
+ir_cache::read_ir_variable(struct exec_list *list, memory_map &map)
+{
+ char name[256];
+ char unique_name[256];
+ glsl_type_data type_data;
+ ir_cache_variable_data data;
+
+ const glsl_type *type = read_glsl_type(map, state);
+
+ map.read_string(name);
+ map.read_string(unique_name);
+ map.read(&data, sizeof(data));
+ data.name = NULL;
+ data.unique_name = NULL;
+ data.type = NULL;
+ data.state_slots = NULL;
+
+ ir_variable *var = new(mem_ctx)ir_variable(type,
+ name, (ir_variable_mode) data.mode);
+
+ if (!var)
+ return -1;
+
+ var->explicit_location = data.explicit_location;
+ var->explicit_index = data.explicit_index;
+ var->explicit_binding = data.explicit_binding;
+
+ var->max_array_access = data.max_array_access;
+ var->location = data.location;
+ var->read_only = data.read_only;
+ var->centroid = data.centroid;
+ var->invariant = data.invariant;
+ var->interpolation = data.interpolation;
+ var->origin_upper_left = data.origin_upper_left;
+ var->pixel_center_integer = data.pixel_center_integer;
+ var->depth_layout = (ir_depth_layout) data.depth_layout;
+ var->has_initializer = data.has_initializer;
+ var->location_frac = data.location_frac;
+ var->num_state_slots = data.num_state_slots;
+
+ var->state_slots = NULL;
+
+ if (var->num_state_slots > 0) {
+ var->state_slots = ralloc_array(var, ir_state_slot,
+ var->num_state_slots);
+
+ for (unsigned i = 0; i < var->num_state_slots; i++) {
+ map.read(&var->state_slots[i].swizzle);
+ for (int j = 0; j < 5; j++) {
+ map.read(&var->state_slots[i].tokens[j]);
+ }
+ }
+ }
+
+ if (data.has_constant_value)
+ var->constant_value = read_ir_constant(map);
+
+ if (data.has_constant_initializer)
+ var->constant_initializer = read_ir_constant(map);
+
+ uint8_t has_interface_type;
+ map.read(&has_interface_type);
+
+ if (has_interface_type)
+ var->init_interface_type(read_glsl_type(map, state));
+
+ /* store address to this variable */
+ hash_store(var, unique_name);
+
+ list->push_tail(var);
+
+ return 0;
+}
+
+
+int
+ir_cache::read_ir_function(struct exec_list *list, memory_map &map)
+{
+ char name[256];
+ int32_t par_count = 0;
+ int32_t body_count = 0;
+ uint32_t is_builtin = 0;
+ int ir_type;
+ uint32_t len;
+ uint32_t sig_amount;
+
+ map.read_string(name);
+ map.read(&sig_amount);
+
+ ir_function *f = new(mem_ctx) ir_function(name);
+ ir_function_signature *sig = NULL;
+
+ /* add all signatures to the function */
+ for (unsigned j = 0; j < sig_amount; j++) {
+
+ /* ir_function_signature */
+ map.read(&ir_type);
+ map.read(&len);
+
+ if (ir_type != ir_type_function_signature) {
+ CACHE_DEBUG("cache format error with function %s\n", name);
+ return -1;
+ }
+
+ map.read(&par_count);
+ map.read(&body_count);
+ map.read(&is_builtin);
+
+ CACHE_DEBUG("%s: [%s] %d parameters, body size %d (is_builtin %d)\n",
+ __func__, name, par_count, body_count, is_builtin);
+
+ const glsl_type *return_type = read_glsl_type(map, state);
+
+ if (!return_type) {
+ CACHE_DEBUG("no return type found for [%s]\n", name);
+ return -1;
+ }
+
+ sig = new(mem_ctx) ir_function_signature(return_type);
+
+ /* fill parameters for function signature */
+ for (int k = 0; k < par_count; k++)
+ if (read_instruction(&sig->parameters, map))
+ goto read_errors;
+
+ /* insert function parameter variables to prototypes list ... */
+ foreach_list_const(node, &sig->parameters) {
+ ir_variable *var = ((ir_instruction *) node)->as_variable();
+ if (var)
+ prototypes->push_tail(var->clone(mem_ctx, NULL));
+ }
+
+ current_function = &sig->body;
+
+ /* fill instructions for the function body */
+ if (!prototypes_only)
+ for (int k = 0; k < body_count; k++)
+ if (read_instruction(&sig->body, map, is_builtin ? true : false))
+ goto read_errors;
+
+ sig->is_defined = body_count ? 1 : 0;
+
+ if (!is_builtin) {
+ f->add_signature(sig);
+ } else {
+ ir_function_signature *builtin_sig =
+ _mesa_glsl_find_builtin_function(state, name, &sig->parameters);
+
+ if (builtin_sig) {
+ CACHE_DEBUG("found builtin signature for [%s]\n", name);
+ f->add_signature(sig);
+ } else {
+ CACHE_DEBUG("cannot find builtin, function [%s]\n", name);
+ return -1;
+ }
+ }
+
+ } /* for each function signature */
+
+ CACHE_DEBUG("added %s function [%s]\n",
+ is_builtin ? "builtin" : "user", name);
+
+ /* push ready function to the IR exec_list */
+ list->push_tail(f);
+
+ return 0;
+
+read_errors:
+ CACHE_DEBUG("%s: read errors with [%s]\n", __func__, name);
+ if (sig)
+ ralloc_free(sig);
+ return -1;
+
+}
+
+
+ir_dereference_array *
+ir_cache::read_ir_dereference_array(memory_map &map)
+{
+ int ir_type;
+ uint32_t len;
+
+ CACHE_DEBUG("%s\n", __func__);
+
+ map.read(&ir_type);
+ map.read(&len);
+
+ ir_rvalue *array_rval = read_ir_rvalue(map);
+ ir_rvalue *index_rval = read_ir_rvalue(map);
+
+ if (array_rval && index_rval)
+ return new(mem_ctx) ir_dereference_array(array_rval, index_rval);
+
+ CACHE_DEBUG("%s: could not get rvalues", __func__);
+ return NULL;
+}
+
+
+ir_dereference_record *
+ir_cache::read_ir_dereference_record(memory_map &map)
+{
+ int ir_type;
+ uint32_t len;
+ char name[256];
+
+ CACHE_DEBUG("%s\n", __func__);
+
+ map.read(&ir_type);
+ map.read(&len);
+ map.read_string(name);
+
+ ir_rvalue *rval = read_ir_rvalue(map);
+
+ if (rval)
+ return new(mem_ctx) ir_dereference_record(rval, name);
+
+ CACHE_DEBUG("%s: could not get rvalue", __func__);
+ return NULL;
+}
+
+
+/**
+ * Reads in a variable deref, seeks variable address
+ * from a map with it's unique_name
+ */
+ir_dereference_variable *
+ir_cache::read_ir_dereference_variable(memory_map &map)
+{
+ int ir_type;
+ uint32_t len;
+ char name[256];
+ char unique_name[256];
+ uint32_t unique_id;
+
+ map.read(&ir_type);
+ map.read(&len);
+ map.read_string(name);
+ map.read(&unique_id);
+
+ _mesa_snprintf(unique_name, 256, "%s_%d", name, unique_id);
+ const void *addr = hash_table_find(var_ht, (const void *) unique_name);
+
+ CACHE_DEBUG("found addr %p with name %s\n", addr, unique_name);
+
+ if (addr != 0) {
+ ir_variable *var = (ir_variable*) addr;
+ return new(mem_ctx) ir_dereference_variable(var);
+ }
+
+ CACHE_DEBUG("%s: could not find [%s]\n", __func__, name);
+ return NULL;
+}
+
+
+ir_constant *
+ir_cache::read_ir_constant(memory_map &map, struct exec_list *list)
+{
+ ir_constant *con = NULL;
+ int ir_type;
+ uint32_t len;
+
+ map.read(&ir_type);
+ map.read(&len);
+
+ const glsl_type *constant_type = read_glsl_type(map, state);
+
+ /* data structure */
+ ir_constant_data data;
+ map.read(&data, sizeof(data));
+
+ con = new(mem_ctx) ir_constant(constant_type, &data);
+
+ /* constant with array of constants */
+ if (constant_type->base_type == GLSL_TYPE_ARRAY) {
+ con->array_elements = ralloc_array(mem_ctx, ir_constant *,
+ constant_type->length);
+
+ for (unsigned i = 0; i < constant_type->length; i++)
+ con->array_elements[i] = read_ir_constant(map);
+
+ goto read_return;
+ }
+
+ else if (constant_type->base_type == GLSL_TYPE_STRUCT) {
+ uint32_t components;
+ map.read(&components);
+ for (unsigned i = 0; i < components; i++)
+ if (read_instruction(&con->components, map))
+ goto read_errors;
+ }
+
+read_return:
+ if (list)
+ list->push_tail(con);
+
+ return con;
+
+read_errors:
+ ralloc_free(con);
+ return NULL;
+}
+
+
+ir_swizzle *
+ir_cache::read_ir_swizzle(memory_map &map)
+{
+ unsigned swiz[4] = { 0 };
+ unsigned count;
+ int ir_type;
+ uint32_t len;
+
+ CACHE_DEBUG("%s\n", __func__);
+
+ map.read(&ir_type);
+ map.read(&len);
+
+ /* num of components + swizzle mask, rvalue */
+ map.read(&count);
+ map.read(swiz, 4 * sizeof(unsigned));
+
+ ir_rvalue *rval = read_ir_rvalue(map);
+
+ if (rval)
+ return new(mem_ctx) ir_swizzle(rval, swiz, count);
+
+ CACHE_DEBUG("error, could not handle rvalue for swizzle\n");
+ return NULL;
+}
+
+
+ir_texture *
+ir_cache::read_ir_texture(memory_map &map)
+{
+ int ir_type;
+ uint32_t len;
+ int32_t op;
+ uint8_t has_coordinate;
+ uint8_t has_projector;
+ uint8_t has_shadow_comp;
+ uint8_t has_offset;
+ const glsl_type *type = NULL;
+ ir_dereference *sampler = NULL;
+
+ map.read(&ir_type);
+ map.read(&len);
+
+ map.read(&op);
+ map.read(&has_coordinate);
+ map.read(&has_projector);
+ map.read(&has_shadow_comp);
+ map.read(&has_offset);
+
+ CACHE_DEBUG("%s: op %d coord %d proj %d shadow %d offset %d\n", __func__,
+ op, has_coordinate, has_projector,
+ has_shadow_comp, has_offset);
+
+ ir_texture *new_tex = new(mem_ctx) ir_texture((ir_texture_opcode)op);
+
+ if (!new_tex)
+ goto errors;
+
+ type = read_glsl_type(map, state);
+
+ /* sampler type */
+ map.read(&ir_type);
+
+ switch (ir_type) {
+ case ir_type_dereference_variable:
+ sampler = read_ir_dereference_variable(map);
+ break;
+ case ir_type_dereference_record:
+ sampler = read_ir_dereference_record(map);
+ break;
+ case ir_type_dereference_array:
+ sampler = read_ir_dereference_array(map);
+ break;
+ default:
+ CACHE_DEBUG("%s: error, unhandled sampler type %d\n",
+ __func__, ir_type);
+ }
+
+ if (!sampler)
+ goto errors;
+
+ new_tex->set_sampler(sampler, type);
+
+ if (has_coordinate)
+ new_tex->coordinate = read_ir_rvalue(map);
+
+ if (has_projector)
+ new_tex->projector = read_ir_rvalue(map);
+
+ if (has_shadow_comp)
+ new_tex->shadow_comparitor = read_ir_rvalue(map);
+
+ if (has_offset)
+ new_tex->offset = read_ir_rvalue(map);
+
+ /* lod_info structure */
+ uint8_t has_lod;
+ uint8_t has_bias;
+ uint8_t has_sample_index;
+ uint8_t has_component;
+ uint8_t has_dpdx;
+ uint8_t has_dpdy;
+
+ map.read(&has_lod);
+ map.read(&has_bias);
+ map.read(&has_sample_index);
+ map.read(&has_component);
+ map.read(&has_dpdx);
+ map.read(&has_dpdy);
+
+ memset(&new_tex->lod_info, 0, sizeof(ir_texture::lod_info));
+
+ if (has_lod)
+ new_tex->lod_info.lod = read_ir_rvalue(map);
+ if (has_bias)
+ new_tex->lod_info.bias = read_ir_rvalue(map);
+ if (has_sample_index)
+ new_tex->lod_info.sample_index = read_ir_rvalue(map);
+ if (has_component)
+ new_tex->lod_info.component = read_ir_rvalue(map);
+ if (has_dpdx)
+ new_tex->lod_info.grad.dPdx = read_ir_rvalue(map);
+ if (has_dpdy)
+ new_tex->lod_info.grad.dPdy = read_ir_rvalue(map);
+
+ return new_tex;
+
+errors:
+ CACHE_DEBUG("error, could not read ir_texture\n");
+ return NULL;
+}
+
+
+ir_expression *
+ir_cache::read_ir_expression(memory_map &map)
+{
+ ir_expression_operation operation;
+ ir_rvalue *ir_rvalue_table[4] = { NULL };
+ int operands;
+ int ir_type;
+ uint32_t len;
+
+ CACHE_DEBUG("%s\n", __func__);
+
+ map.read(&ir_type);
+ map.read(&len);
+
+ /* glsl_type resulted from operation */
+ const glsl_type *rval_type = read_glsl_type(map, state);
+
+ /* read operation type + all operands for creating ir_expression */
+ map.read(&operation);
+ map.read(&operands);
+
+ CACHE_DEBUG("%s : operation %d, operands %d\n",
+ __func__, operation, operands);
+
+ for (int k = 0; k < operands; k++) {
+ ir_rvalue *val = read_ir_rvalue(map);
+
+ if (!val)
+ return NULL;
+
+ ir_rvalue_table[k] = val;
+ }
+
+ return new(mem_ctx) ir_expression(operation,
+ rval_type,
+ ir_rvalue_table[0],
+ ir_rvalue_table[1],
+ ir_rvalue_table[2],
+ ir_rvalue_table[3]);
+}
+
+
+ir_rvalue *
+ir_cache::read_ir_rvalue(memory_map &map)
+{
+ int32_t ir_type = ir_type_unset;
+
+ map.read(&ir_type);
+
+ CACHE_DEBUG("%s: ir_value %d\n", __func__, ir_type);
+
+ switch(ir_type) {
+ case ir_type_constant:
+ return read_ir_constant(map);
+ case ir_type_dereference_variable:
+ return read_ir_dereference_variable(map);
+ case ir_type_dereference_record:
+ return read_ir_dereference_record(map);
+ case ir_type_dereference_array:
+ return read_ir_dereference_array(map);
+ case ir_type_expression:
+ return read_ir_expression(map);
+ case ir_type_swizzle:
+ return read_ir_swizzle(map);
+ case ir_type_texture:
+ return read_ir_texture(map);
+ default:
+ CACHE_DEBUG("%s: error, unhandled type %d\n",
+ __func__, ir_type);
+ break;
+ }
+ return NULL;
+}
+
+
+/**
+ * read assignment instruction, mask + rvalue
+ */
+int
+ir_cache::read_ir_assignment(struct exec_list *list, memory_map &map)
+{
+ unsigned write_mask = 0;
+ int lhs_type = 0;
+ char lhs_name[256];
+
+ ir_assignment *assign = NULL;
+ ir_dereference *lhs_deref = NULL;
+
+ map.read(&write_mask);
+ map.read(&lhs_type);
+
+ CACHE_DEBUG("%s: mask %d lhs_type %d\n", __func__, write_mask, lhs_type);
+
+ map.read_string(lhs_name);
+
+ CACHE_DEBUG("%s : lhs name [%s]\n", __func__, lhs_name);
+
+ switch (lhs_type) {
+ case ir_type_dereference_variable:
+ lhs_deref = read_ir_dereference_variable(map);
+ break;
+ case ir_type_dereference_record:
+ lhs_deref = read_ir_dereference_record(map);
+ break;
+ case ir_type_dereference_array:
+ lhs_deref = read_ir_dereference_array(map);
+ break;
+ default:
+ CACHE_DEBUG("%s: error, unhandled lhs_type %d\n",
+ __func__, lhs_type);
+ }
+
+ if (!lhs_deref) {
+ CACHE_DEBUG("could not find lhs variable, bailing out\n");
+ return -1;
+ }
+
+ /* rvalue for assignment */
+ ir_rvalue *rval = read_ir_rvalue(map);
+
+ /* if we managed to parse rvalue, then we can construct assignment */
+ if (rval) {
+
+ CACHE_DEBUG("%s: lhs type %d\n", __func__, lhs_type);
+
+ assign = new(mem_ctx) ir_assignment(lhs_deref, rval, NULL, write_mask);
+ list->push_tail(assign);
+ return 0;
+ }
+
+ CACHE_DEBUG("error reading assignment rhs\n");
+ return -1;
+}
+
+
+/**
+ * if with condition + then and else branches
+ */
+int
+ir_cache::read_ir_if(struct exec_list *list, memory_map &map)
+{
+ unsigned then_len, else_len;
+
+ CACHE_DEBUG("%s\n", __func__);
+
+ map.read(&then_len);
+ map.read(&else_len);
+
+ ir_rvalue *cond = read_ir_rvalue(map);
+
+ if (!cond) {
+ CACHE_DEBUG("%s: error reading condition\n", __func__);
+ return -1;
+ }
+
+ ir_if *irif = new(mem_ctx) ir_if(cond);
+
+ for (unsigned k = 0; k < then_len; k++)
+ if (read_instruction(&irif->then_instructions, map))
+ goto read_errors;
+
+ for (unsigned k = 0; k < else_len; k++)
+ if (read_instruction(&irif->else_instructions, map))
+ goto read_errors;
+
+ list->push_tail(irif);
+ return 0;
+
+read_errors:
+ CACHE_DEBUG("%s: read errors(then %d else %d)\n",
+ __func__, then_len, else_len);
+ ralloc_free(irif);
+ return -1;
+}
+
+
+int
+ir_cache::read_ir_return(struct exec_list *list, memory_map &map)
+{
+ uint8_t has_rvalue = 0;
+ map.read(&has_rvalue);
+
+ CACHE_DEBUG("%s\n", __func__);
+
+ ir_rvalue *rval = NULL;
+ if (has_rvalue)
+ rval = read_ir_rvalue(map);
+
+ ir_return *ret = new(mem_ctx) ir_return(rval);
+ list->push_tail(ret);
+
+ return 0;
+}
+
+
+/**
+ * read a call to a ir_function, finds the correct function
+ * signature from prototypes list and creates the call
+ */
+int
+ir_cache::read_ir_call(struct exec_list *list, memory_map &map)
+{
+ uint8_t has_return_deref = 0;
+ unsigned list_len = 0;
+ unsigned use_builtin = 0;
+ struct exec_list parameters;
+ char name[256];
+ ir_dereference_variable *return_deref = NULL;
+
+ map.read_string(name);
+
+ map.read(&has_return_deref);
+
+ if (has_return_deref)
+ return_deref = read_ir_dereference_variable(map);
+
+ map.read(&list_len);
+
+ CACHE_DEBUG("call to function %s, %d parameters (ret deref %p)\n",
+ name, list_len, return_deref);
+
+ /* read call parameters */
+ for(unsigned k = 0; k < list_len; k++) {
+
+ ir_rvalue *rval = read_ir_rvalue(map);
+ if (rval) {
+ parameters.push_tail(rval);
+ } else {
+ CACHE_DEBUG("%s: error reading rvalue\n", __func__);
+ return -1;
+ }
+ }
+
+ map.read(&use_builtin);
+
+ if (use_builtin) {
+ ir_function_signature *builtin_sig =
+ _mesa_glsl_find_builtin_function(state, name, ¶meters);
+
+ if (builtin_sig) {
+ CACHE_DEBUG("%s: found function %s from builtins\n", __func__, name);
+
+ ir_function_signature *callee = builtin_sig;
+
+ CACHE_DEBUG("function signature for builtin %s : %p\n", name, callee);
+ if (!callee) {
+ CACHE_DEBUG("sorry, cannot find signature for builtin ..\n");
+ return -1;
+ }
+
+ ir_call *call = new(mem_ctx) ir_call(callee, return_deref,
+ ¶meters);
+
+ call->use_builtin = true;
+
+ list->push_tail(call);
+ return 0;
+ }
+ }
+
+ /* find the function from the prototypes */
+ ir_function *func = search_func(state, prototypes, name, ¶meters);
+
+ if (func) {
+ CACHE_DEBUG("found function with name %s (has user sig %d)\n",
+ name, func->has_user_signature());
+
+ ir_function_signature *callee = func->matching_signature(state,
+ ¶meters);
+
+ /**
+ * This is a workaround for a call to empty user defined function, that
+ * happens with glb2.7 if dumping unlinked shaders, linking would fail
+ * if we would create a call, empty functions get removed only afer linking
+ * .. this may look a bit strange thing todo here but we ignore call here
+ */
+ if (!callee->is_defined)
+ return 0;
+
+ ir_call *call = new(mem_ctx) ir_call(callee, return_deref, ¶meters);
+ list->push_tail(call);
+ return 0;
+ }
+
+ CACHE_DEBUG("%s:function %s not found for ir_call ...\n",
+ __func__, name);
+ return -1;
+}
+
+
+int
+ir_cache::read_ir_discard(struct exec_list *list, memory_map &map)
+{
+ uint8_t has_condition;
+ map.read(&has_condition);
+ ir_rvalue *condition = NULL;
+
+ CACHE_DEBUG("%s\n", __func__);
+
+ if (has_condition)
+ condition = read_ir_rvalue(map);
+ if (!condition)
+ return -1;
+
+ list->push_tail(new(mem_ctx) ir_discard(condition));
+ return 0;
+}
+
+
+/**
+ * read in ir_loop
+ */
+int
+ir_cache::read_ir_loop(struct exec_list *list, memory_map &map)
+{
+ uint8_t has_counter, has_from, has_to, has_increment;
+ uint32_t body_size;
+ int cmp;
+ char counter_name[256];
+ ir_loop *loop = NULL;
+
+ loop = new(mem_ctx) ir_loop;
+
+ map.read(&has_from);
+ map.read(&has_to);
+ map.read(&has_increment);
+ map.read(&has_counter);
+ map.read(&cmp);
+ map.read(&body_size);
+
+ /* comparison operation for loop termination */
+ loop->cmp = cmp;
+
+ /* ir_rvalues: from, to, increment + one ir_variable counter */
+ if (has_from) {
+ loop->from = read_ir_rvalue(map);
+ if (!loop->from)
+ return -1;
+ }
+
+ if (has_to) {
+ loop->to = read_ir_rvalue(map);
+ if (!loop->to)
+ return -1;
+ }
+
+ if (has_increment) {
+ loop->increment = read_ir_rvalue(map);
+ if (!loop->increment)
+ return -1;
+ }
+
+ /* read ir_variable to prototypes list and search from there */
+ if (has_counter) {
+ map.read_string(counter_name);
+ if (read_instruction(prototypes, map))
+ return -1;
+ loop->counter = search_var(prototypes, counter_name);
+ if (!loop->counter)
+ return -1;
+ }
+
+ CACHE_DEBUG("%s: from %p to %p increment %p counter %p size %d\n", __func__,
+ loop->from, loop->to, loop->increment, loop->counter, body_size);
+
+ for (unsigned k = 0; k < body_size; k++) {
+ if (read_instruction(&loop->body_instructions, map))
+ goto read_errors;
+ }
+
+ list->push_tail(loop);
+ return 0;
+
+read_errors:
+ CACHE_DEBUG("%s: read errors\n", __func__);
+ if (loop)
+ ralloc_free(loop);
+ return -1;
+}
+
+
+int
+ir_cache::read_ir_loop_jump(struct exec_list *list, memory_map &map)
+{
+ int32_t mode;
+ map.read(&mode);
+ list->push_tail(new(mem_ctx) ir_loop_jump((ir_loop_jump::jump_mode)mode));
+ return 0;
+}
+
+
+int
+ir_cache::read_emit_vertex(struct exec_list *list, memory_map &map)
+{
+ list->push_tail(new(mem_ctx) ir_emit_vertex);
+ return 0;
+}
+
+
+int
+ir_cache::read_end_primitive(struct exec_list *list, memory_map &map)
+{
+ list->push_tail(new(mem_ctx) ir_end_primitive);
+ return 0;
+}
+
+
+int
+ir_cache::read_instruction(struct exec_list *list, memory_map &map, bool ignore)
+{
+ int ir_type = ir_type_unset;
+ uint32_t inst_dumpsize = 0;
+
+ map.read(&ir_type);
+ map.read(&inst_dumpsize);
+
+ /* reader wants to jump over this instruction */
+ if (ignore) {
+ map.ffwd(inst_dumpsize);
+ return 0;
+ }
+
+ switch(ir_type) {
+ case ir_type_variable:
+ return read_ir_variable(list, map);
+ case ir_type_assignment:
+ return read_ir_assignment(list, map);
+ case ir_type_constant:
+ return (read_ir_constant(map, list)) ? 0 : -1;
+ case ir_type_function:
+ return read_ir_function(list, map);
+ case ir_type_if:
+ return read_ir_if(list, map);
+ case ir_type_return:
+ return read_ir_return(list, map);
+ case ir_type_call:
+ return read_ir_call(list, map);
+ case ir_type_discard:
+ return read_ir_discard(list, map);
+ case ir_type_loop:
+ return read_ir_loop(list, map);
+ case ir_type_loop_jump:
+ return read_ir_loop_jump(list, map);
+ case ir_type_emit_vertex:
+ return read_emit_vertex(list, map);
+ case ir_type_end_primitive:
+ return read_end_primitive(list, map);
+ default:
+ CACHE_DEBUG("%s cannot read type %d, todo...\n",
+ __func__, ir_type);
+ }
+
+ return -1;
+}
+
+
+/**
+ * reads prototypes section of the dump, consists
+ * of variables and functions
+ */
+int
+ir_cache::read_prototypes(memory_map &map)
+{
+ uint32_t total;
+ int ir_type;
+ uint32_t inst_dumpsize;
+
+ map.read(&total);
+
+ prototypes_only = true;
+
+ for (unsigned k = 0; k < total; k++) {
+
+ map.read(&ir_type);
+ map.read(&inst_dumpsize);
+
+ switch (ir_type) {
+ case ir_type_variable:
+ if (read_ir_variable(prototypes, map))
+ return -1;
+ break;
+ case ir_type_function:
+ if (read_ir_function(prototypes, map))
+ return -1;
+ break;
+ default:
+ CACHE_DEBUG("%s: error in cache data (ir %d)\n",
+ __func__, ir_type);
+ return -1;
+ }
+ }
+
+ prototypes_only = false;
+
+ CACHE_DEBUG("%s: done\n", __func__);
+ return 0;
+}
+
+
+static uint8_t
+read_bool(memory_map &map)
+{
+ uint8_t value = 0;
+ map.read(&value);
+ return value;
+}
+
+
+static int
+_read_state(struct _mesa_glsl_parse_state *state, memory_map &map)
+{
+ unsigned language_version;
+ map.read(&language_version);
+
+ /* if cache was produced with different glsl version */
+ if (language_version != state->language_version)
+ return -1;
+
+ /* would be cool to have these in a structure of it own */
+ state->ARB_draw_buffers_enable = read_bool(map);
+ state->ARB_draw_buffers_warn = read_bool(map);
+ state->ARB_draw_instanced_enable = read_bool(map);
+ state->ARB_draw_instanced_warn = read_bool(map);
+ state->ARB_explicit_attrib_location_enable = read_bool(map);
+ state->ARB_explicit_attrib_location_warn = read_bool(map);
+ state->ARB_fragment_coord_conventions_enable = read_bool(map);
+ state->ARB_fragment_coord_conventions_warn = read_bool(map);
+ state->ARB_texture_rectangle_enable = read_bool(map);
+ state->ARB_texture_rectangle_warn = read_bool(map);
+ state->EXT_texture_array_enable = read_bool(map);
+ state->EXT_texture_array_warn = read_bool(map);
+ state->ARB_shader_texture_lod_enable = read_bool(map);
+ state->ARB_shader_texture_lod_warn = read_bool(map);
+ state->ARB_shader_stencil_export_enable = read_bool(map);
+ state->ARB_shader_stencil_export_warn = read_bool(map);
+ state->AMD_conservative_depth_enable = read_bool(map);
+ state->AMD_conservative_depth_warn = read_bool(map);
+ state->ARB_conservative_depth_enable = read_bool(map);
+ state->ARB_conservative_depth_warn = read_bool(map);
+ state->AMD_shader_stencil_export_enable = read_bool(map);
+ state->AMD_shader_stencil_export_warn = read_bool(map);
+ state->OES_texture_3D_enable = read_bool(map);
+ state->OES_texture_3D_warn = read_bool(map);
+ state->OES_EGL_image_external_enable = read_bool(map);
+ state->OES_EGL_image_external_warn = read_bool(map);
+ state->ARB_shader_bit_encoding_enable = read_bool(map);
+ state->ARB_shader_bit_encoding_warn = read_bool(map);
+ state->ARB_uniform_buffer_object_enable = read_bool(map);
+ state->ARB_uniform_buffer_object_warn = read_bool(map);
+ state->OES_standard_derivatives_enable = read_bool(map);
+ state->OES_standard_derivatives_warn = read_bool(map);
+ state->ARB_texture_cube_map_array_enable = read_bool(map);
+ state->ARB_texture_cube_map_array_warn = read_bool(map);
+ state->ARB_shading_language_packing_enable = read_bool(map);
+ state->ARB_shading_language_packing_warn = read_bool(map);
+ state->ARB_texture_multisample_enable = read_bool(map);
+ state->ARB_texture_multisample_warn = read_bool(map);
+ state->ARB_texture_query_lod_enable = read_bool(map);
+ state->ARB_texture_query_lod_warn = read_bool(map);
+ state->ARB_gpu_shader5_enable = read_bool(map);
+ state->ARB_gpu_shader5_warn = read_bool(map);
+ state->AMD_vertex_shader_layer_enable = read_bool(map);
+ state->AMD_vertex_shader_layer_warn = read_bool(map);
+ state->ARB_shading_language_420pack_enable = read_bool(map);
+ state->ARB_shading_language_420pack_warn = read_bool(map);
+ state->EXT_shader_integer_mix_enable = read_bool(map);
+ state->EXT_shader_integer_mix_warn = read_bool(map);
+
+ return 0;
+}
+
+
+struct gl_shader *
+ir_cache::unserialize(void *mem_ctx, memory_map &map, uint32_t shader_size,
+ struct _mesa_glsl_parse_state *state, const char *mesa_sha,
+ int *error_code)
+{
+ int error = 0;
+ uint32_t is_es = 0;
+
+ *error_code = ir_cache::GENERAL_READ_ERROR;
+
+ struct _mesa_glsl_parse_state *_state = state;
+
+ uint32_t type = 0;
+ map.read(&type);
+
+ GET_CURRENT_CONTEXT(ctx);
+ struct gl_shader *shader = ctx->Driver.NewShader(NULL, 0, type);
+
+ if (!shader)
+ return NULL;
+
+ shader->Source = NULL;
+ shader->Label = NULL;
+ shader->InfoLog = ralloc_strdup(mem_ctx, "");
+ shader->ir = NULL;
+
+ if (read_header(shader, map, mesa_sha)) {
+ *error_code = ir_cache::DIFFERENT_MESA_SHA;
+ goto error_unserialize;
+ }
+
+ is_es = shader->IsES ? 1 : 0;
+
+ /* check if cache produced using different language version */
+ if (state) {
+ if (state->language_version != shader->Version ||
+ state->es_shader != is_es) {
+ *error_code = ir_cache::DIFFERENT_LANG_VER;
+ goto error_unserialize;
+ }
+ }
+
+ if (state) {
+ if (_read_state(state, map))
+ goto error_unserialize;
+ _state = state;
+ } else {
+ /* no existing parse state, we need to create one */
+ GET_CURRENT_CONTEXT(ctx);
+ _state = new(mem_ctx) _mesa_glsl_parse_state(ctx,
+ shader->Type, shader);
+ }
+ this->state = _state;
+
+ /* fill parse state from shader header information */
+ switch (shader->Type) {
+ case GL_VERTEX_SHADER:
+ _state->target = vertex_shader;
+ break;
+ case GL_FRAGMENT_SHADER:
+ _state->target = fragment_shader;
+ break;
+ case GL_GEOMETRY_SHADER_ARB:
+ _state->target = geometry_shader;
+ break;
+ }
+
+ _state->num_builtins_to_link = 0;
+
+ _mesa_glsl_initialize_builtin_functions();
+ _mesa_glsl_initialize_types(_state);
+
+ /**
+ * parser state is used to find builtin functions and
+ * existing types during reading
+ */
+ this->state = _state;
+
+ /* allocations during reading */
+ this->mem_ctx = mem_ctx;
+
+ prototypes = new(mem_ctx) exec_list;
+
+ error = read_prototypes(map);
+
+ shader->ir = new(shader) exec_list;
+ top_level = shader->ir;
+
+ /* top level exec_list read loop, constructs a new list */
+ while(map.position() < shader_size && error == 0)
+ error = read_instruction(shader->ir, map);
+
+ ralloc_free(prototypes);
+
+ if (error)
+ goto error_unserialize;
+
+ *error_code = 0;
+
+ shader->CompileStatus = GL_TRUE;
+
+ /* allocates glsl_symbol_table internally */
+ populate_symbol_table(shader);
+
+ memcpy(shader->builtins_to_link, _state->builtins_to_link,
+ sizeof(shader->builtins_to_link[0]) * _state->num_builtins_to_link);
+ shader->num_builtins_to_link = _state->num_builtins_to_link;
+
+ validate_ir_tree(shader->ir);
+
+ CACHE_DEBUG("shader from cache\n");
+
+ return shader;
+
+error_unserialize:
+ if (shader->ir)
+ ralloc_free(shader->ir);
+ ralloc_free(shader);
+ return NULL;
+}
+
+
+extern "C" struct gl_shader *
+_mesa_shader_unserialize(void *mem_ctx, void *blob,
+ const char *mesa_sha, size_t size)
+{
+ int error = 0;
+ ir_cache cache;
+ memory_map map;
+
+ map.map(blob, size);
+
+ return cache.unserialize(mem_ctx, map, size,
+ NULL,
+ mesa_sha,
+ &error);
+}
+
+
+static void
+_read_hash_table(struct string_to_uint_map *hash, memory_map *map)
+{
+ unsigned size;
+ map->read(&size);
+
+ for (unsigned i = 0; i < size; i++) {
+ char key[256];
+ unsigned value;
+
+ map->read_string(key);
+ map->read(&value);
+
+ hash->put(value-1, key);
+ }
+}
+
+
+static void
+_read_uniform_storage(void *mem_ctx, gl_uniform_storage *uni,
+ memory_map &map, struct _mesa_glsl_parse_state *state)
+{
+ ir_cache cache;
+
+ char name[256];
+ map.read_string(name);
+ uni->name = strdup(name);
+
+ uni->type = cache.read_glsl_type(map, NULL);
+
+ map.read(&uni->array_elements);
+ map.read(&uni->initialized);
+ map.read(&uni->block_index);
+ map.read(&uni->offset);
+ map.read(&uni->matrix_stride);
+ map.read(&uni->row_major);
+
+ for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) {
+ map.read(&uni->sampler[i].index);
+ map.read(&uni->sampler[i].active);
+ }
+
+ uint32_t size;
+ map.read(&size);
+
+ CACHE_DEBUG("read uniform storage size %ld\n",
+ size * sizeof(union gl_constant_value));
+
+ uni->storage =
+ rzalloc_array(mem_ctx, union gl_constant_value, size);
+
+ map.read(uni->storage, size * sizeof(union gl_constant_value));
+
+ /* driver uniform storage gets generated and propagated later */
+ uni->driver_storage = NULL;
+ uni->num_driver_storage = 0;
+}
+
+
+extern "C" int
+_mesa_program_unserialize(struct gl_shader_program *prog,
+ const GLvoid *blob, size_t size, const char *mesa_sha)
+{
+ memory_map map;
+ map.map((const void*)blob, size);
+
+ map.read(&prog->Type);
+ map.read(&prog->NumShaders);
+ map.read(&prog->LinkStatus);
+ map.read(&prog->Version);
+ map.read(&prog->IsES);
+
+ prog->NumUserUniformStorage = 0;
+ prog->UniformStorage = NULL;
+ prog->Label = NULL;
+
+ map.read(&prog->NumUserUniformStorage);
+ map.read(&prog->UniformLocationBaseScale);
+
+ /* these already allocated by _mesa_init_shader_program */
+ _read_hash_table(prog->AttributeBindings, &map);
+ _read_hash_table(prog->FragDataBindings, &map);
+ _read_hash_table(prog->FragDataIndexBindings, &map);
+
+ prog->UniformHash = new string_to_uint_map;
+ _read_hash_table(prog->UniformHash, &map);
+
+ /* just zero for now */
+ prog->LinkedTransformFeedback.Outputs = NULL;
+ prog->LinkedTransformFeedback.Varyings = NULL;
+ prog->LinkedTransformFeedback.NumVarying = 0;
+ prog->LinkedTransformFeedback.NumOutputs = 0;
+
+ /* uniform storage */
+ prog->UniformStorage = rzalloc_array(prog, struct gl_uniform_storage,
+ prog->NumUserUniformStorage);
+
+ for (unsigned i = 0; i < prog->NumUserUniformStorage; i++)
+ _read_uniform_storage(prog, &prog->UniformStorage[i], map, NULL);
+
+ GET_CURRENT_CONTEXT(ctx);
+
+ /**
+ * prog->Shaders is not strictly required, however we might want to be
+ * able to recompile and relink these programs? One disadvantage is that
+ * it makes the binary blobs a lot bigger
+ */
+#if 0
+ /* Shaders array (unlinked */
+ prog->Shaders = (struct gl_shader **)
+ _mesa_realloc(prog->Shaders, 0,
+ (prog->NumShaders) * sizeof(struct gl_shader *));
+
+ for (unsigned i = 0; i < prog->NumShaders; i++) {
+ uint32_t shader_size;
+ map.read(&shader_size);
+
+ struct gl_shader *sha = map.read_shader(prog, mesa_sha, shader_size);
+
+ if (sha) {
+ prog->Shaders[i] = NULL; /* alloc did not initialize */
+ _mesa_reference_shader(ctx, &prog->Shaders[i], sha);
+ CACHE_DEBUG("%s: read unlinked shader, index %d (%p) size %d\n",
+ __func__, i, sha, shader_size);
+ }
+ }
+#else
+ prog->Shaders = NULL;
+ prog->NumShaders = 0;
+#endif
+
+ /* init list, cache can contain only some shader types */
+ for (unsigned i = 0; i < MESA_SHADER_TYPES; i++)
+ prog->_LinkedShaders[i] = NULL;
+
+ /* read _LinkedShaders */
+ while(map.position() < size) {
+ unsigned index;
+ map.read(&index);
+
+ uint32_t shader_size;
+ map.read(&shader_size);
+
+ struct gl_shader *sha = map.read_shader(prog, mesa_sha, shader_size);
+
+ if (!sha) {
+ CACHE_DEBUG("failed to read shader (index %d)\n", index);
+ return -1;
+ }
+
+#if 0
+ {
+ GET_CURRENT_CONTEXT(ctx);
+ _mesa_glsl_parse_state *state =
+ new(sha) _mesa_glsl_parse_state(ctx, sha->Type, sha);
+ printf("\n");
+ _mesa_print_ir(sha->ir, state);
+ printf("\n");
+ }
+#endif
+
+ _mesa_reference_shader(ctx, &prog->_LinkedShaders[index], sha);
+ CACHE_DEBUG("%s: read a linked shader, index %d (%p) size %d\n",
+ __func__, index, sha, shader_size);
+ }
+
+ return 0;
+}
--
1.8.1.4
More information about the mesa-dev
mailing list