GitBucket
4.21.2
Toggle navigation
Snippets
Sign in
Files
Branches
1
Releases
Issues
Pull requests
Labels
Priorities
Milestones
Wiki
Forks
nigel.stanger
/
Discussion_Papers
Browse code
- Added DP 2011/07.
master
1 parent
12e2d8f
commit
76cb8156a515aaf5e25b733160f5be737cacc807
nstanger
authored
on 29 Jul 2011
Patch
Showing
1 changed file
INFO_DP.bib
Ignore Space
Show notes
View
INFO_DP.bib
%% This BibTeX bibliography file was created using BibDesk. %% http://bibdesk.sourceforge.net/ %% Created for Nigel Stanger at 2011-07-29 15:45:38 +1200 %% Saved with string encoding Western (Mac OS Roman) @techreport{dp2011-06, Abstract = {Over the last few years, the voluminous increase in the academic research publications has gained significant research attention. Research has been carried out exploring novel ways of providing information services using the research content. However, the task of extracting meaningful information from research documents remains a challenge. This paper presents our research work carried out for developing intelligent information systems, exploiting the research content. We present in this paper, a linked data application which uses a new semantic publishing model for providing value added information services for the research community. The paper presents a conceptual framework for modelling contexts associated with sentences in research articles and discusses the Sentence Context Ontology, which is used to convert the information extracted from research documents into machine-understandable data. The paper also reports on supervised learning experiments carried out using conditional probabilistic models for achieving automatic context identification.}, Address = {Dunedin, New Zealand}, Author = {M.A. Angrosh and Stephen Cranefield and Nigel Stanger}, Date-Added = {2011-07-29 15:44:39 +1200}, Date-Modified = {2011-07-29 15:44:39 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {semantic publishing models, sentence context ontology, linked data application, conditional random fields, maximum entropy markov models, citation classification, sentence context identification}, Month = jul, Number = {2011/06}, Title = {Contextual information retrieval in research articles: Semantic publishing tools for the research community}, Type = {Discussion paper}, Year = {2011}} @techreport{dp2011-05, Abstract = {In Normative Multi-Agent Systems (NorMAS), researchers have investigated several mechanisms for agents to learn norms. In the context of agents learning norms, the objectives of the paper are three-fold. First, this paper aims at providing an overview of different mechanisms employed by researchers for norm learning. Second, it discusses the contributions of different mechanisms to the three aspects of active learning namely learning by doing, observing and com- municating. Third, it compares two normative architectures which have an emphasis on the learning of norms. It also discusses the features that should be considered in future norm learning architectures.}, Address = {Dunedin, New Zealand}, Author = {Bastin Tony Roy Savarimuthu}, Date-Added = {2011-07-25 16:39:52 +1200}, Date-Modified = {2011-07-25 16:39:52 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {norms, learning, agents, mechanisms}, Month = may, Number = {2011/05}, Title = {Norm learning in multi-agent societies}, Type = {Discussion paper}, Year = {2011}} @techreport{dp2011-04, Abstract = {Previous research on modelling and monitoring norms, contracts and commitments has studied the semantics of concepts such as obligation, permission, prohibition and commitment; languages for expressing behavioural constraints (such as norms or contracts) to be followed by agents in specific contexts; and mechanisms for run-time monitoring of fulfilment and violation of these constraints. However, there has been little work that provided all of these features while also allowing the current expectations of agents, and the fulfilment and violation of these expectations to be expressed as first-class constructs in the language. This paper demonstrates the benefits of providing this capability by considering a variety of use cases and demonstrating how these can be addressed as applications of a previously defined temporal logic of expectations and an associated monitoring technique.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Michael Winikoff and Wamberto Vasconcelos}, Date-Added = {2011-05-06 10:36:50 +1200}, Date-Modified = {2011-05-06 10:36:50 +1200}, Institution = {Department of Information Science, University of Otago}, Month = feb, Number = {2011/04}, Title = {Modelling and monitoring interdependent expectations}, Type = {Discussion paper}, Year = {2011}} @techreport{dp2011-03, Abstract = {Second Life is a multi-purpose online virtual world that provides a rich platform for remote human interaction. It is increasingly being used as a simulation platform to model complex human interactions in diverse areas, as well as to simulate multi-agent systems. It would therefore be beneficial to provide techniques allowing high-level agent development tools, especially cognitive agent platforms such as belief-desire-intention (BDI) programming frameworks, to be interfaced to Second Life. This is not a trivial task as it involves mapping potentially unreliable sensor readings from complex Second Life simulations to a domain-specific abstract logical model of observed properties and/or events. This paper investigates this problem in the context of agent interactions in a multi-agent system simulated in Second Life. We present a framework which facilitates the connection of any multi-agent platform with Second Life, and demonstrate it in conjunction with an extension of the Jason BDI interpreter.}, Address = {Dunedin, New Zealand}, Author = {Surangika Ranathunga and Stephen Cranefield and Martin Purvis}, Date-Added = {2011-02-04 15:54:36 +1300}, Date-Modified = {2011-02-09 16:49:21 +1300}, Institution = {Department of Information Science, University of Otago}, Month = feb, Number = {2011/03}, Title = {Interfacing a cognitive agent platform with Second Life}, Type = {Discussion paper}, Year = {2011}} @techreport{dp2011-02, Abstract = {Although expectations play an important role in designing cognitive agents, agent expectations are not explicitly being handled in most common agent programming environments. There are techniques for monitoring fulfilment and violation of agent expectations, however they are not linked with common agent programming environments so that agents can be easily programmed to respond to these circumstances. This paper investigates how expectation monitoring tools can be tightly integrated with the Jason BDI agent interpreter by extending it with built-in actions to initiate and terminate monitoring of expectations, and demonstrates how an external expectation monitor is linked with Jason using these internal actions.}, Address = {Dunedin, New Zealand}, Author = {Surangika Ranathunga and Stephen Cranefield and Martin Purvis}, Date-Added = {2011-02-04 13:15:44 +1300}, Date-Modified = {2011-02-09 16:41:53 +1300}, Institution = {Department of Information Science, University of Otago}, Month = feb, Number = {2011/02}, Title = {Integrating expectation handling into Jason}, Type = {Discussion paper}, Year = {2011}} @techreport{dp2011-01, Abstract = {Container terminals play a critical role in international shipping and are under pressure to cope with increasing container traffic. The problem of managing container terminals effectively has a number of characteristics which make agents a suitable technology to consider applying. Container terminals involve the operation of distributed entities (e.g. quay cranes, straddle carriers) which coordinate to achieve competing goals in a dynamic environment. This paper describes a joint industry-university project which has explored the applicability of agent technology to the domain of container terminal management. We describe an emulation platform of a container terminal based on the JADE agent framework, along with two optimisations that have been developed and integrated with the emulator: allocating container moves to machines through negotiation, and allocating containers to yard locations through an evolutionary algorithm.}, Address = {Dunedin, New Zealand}, Author = {Michael Winikoff and Hanno-Felix Wagner and Thomas Young and Stephen Cranefield and Roger Jarquin and Guannan Li and Brent Martin and Rainer Unland}, Date-Added = {2011-02-04 13:15:06 +1300}, Date-Modified = {2011-02-04 13:15:06 +1300}, Institution = {Department of Information Science, University of Otago}, Keywords = {container terminal management, container terminal optimisation, logistics}, Month = jan, Number = {2011/01}, Title = {Agent-based container terminal optimisation}, Type = {Discussion paper}, Year = {2011}} @techreport{dp2010-07, Abstract = {In this article we review contemporary multi-agent system architectures and implementations. We particularly focus on asynchronous message passing mechanisms. Our motivation is to explore two main areas in the context of multi-agent systems: the concept of micro-agents and the asynchronous message passing architectures. In the article we take a close look at the emerging area of micro-agent-based systems and contrast them with selected representatives from the general field of agent architectures. We provide historical references and examples of contemporary implementations supporting the hierarchical micro-agent-based software engineering paradigm. In addition, we also investigate various implementation mechanisms for efficient asynchronous message passing between large numbers of small interacting software components with regards to their use in the context of multi-agent systems. The results show a trade-off between performance, fairness and usability as key problem when selecting an appropriate solution. Future investigations into alternative concurrency handling mechanisms for better support of micro-agent architectures are suggested.}, Address = {Dunedin, New Zealand}, Author = {Christopher Frantz and Mariusz Nowostawski and Martin Purvis}, Date-Added = {2011-01-21 13:20:28 +1300}, Date-Modified = {2011-01-21 13:20:28 +1300}, Institution = {Department of Information Science, University of Otago}, Keywords = {multi-agent systems, micro-agents, asynchronous, communication, message passing}, Month = nov, Number = {2010/07}, Title = {Multi-agent platforms and asynchronous message passing: {F}rameworks overview}, Type = {Discussion paper}, Year = {2010}} @techreport{dp2011-07, Abstract = {Second Life is a multi-purpose online virtual world that is increasingly being used for applications and simulations in diversified areas such as education, training, entertainment, and even for applications related to Artificial Intelligence. For the successful implementation and analysis of most of these applications, it is important to have a robust mechanism to extract low-level data from Second Life in high frequency and high accuracy. However, currently Second Life does not have a reliable or scalable inbuilt data extraction mechanism, nor the related research provides a better alternative. This paper presents a robust and reliable data extraction mechanism from Second Life. We also investigate the currently existing data extraction mechanisms in detail, identifying their limitations in extracting data with high accuracy and high frequency.}, Address = {Dunedin, New Zealand}, Author = {Surangika Ranathunga and Stephen Cranefield and Martin Purvis}, Date-Added = {2010-11-17 14:50:10 +1300}, Date-Modified = {2011-07-29 15:45:23 +1200}, Institution = {Department of Information Science, University of Otago}, Month = jul, Number = {2011/07}, Title = {Extracting data from Second Life}, Type = {Discussion paper}, Year = {2011}} @techreport{dp2010-06, Abstract = {Changes in population demographics and lifestyle choices have led to an increased risk of higher mortality from house fires. The current average of 27 house fire related deaths per year is likely to be exceeded in the following years. The aging population with its natural increase in age related hearing loss and the younger demographic only having mobile phones and no land-lines means there is a need for alternative warning methods of smoke alarm activation. This project develops a proof of concept application that runs on a smart phone and detects an activated smoke alarm. If there is no response by the occupants automatically trigger an alarm to a predefined contact group. This application can reduce the possibility of death or injury by persons unable to respond to an activated alarm.}, Address = {Dunedin, New Zealand}, Author = {Alan Woods and Mariusz Nowostawski}, Date-Added = {2010-11-12 12:38:29 +1300}, Date-Modified = {2010-12-03 09:43:34 +1300}, Institution = {Department of Information Science, University of Otago}, Month = nov, Number = {2010/06}, Title = {Smoke alarm detection, broadcast notifications and social implications}, Type = {Discussion paper}, Year = {2010}} @techreport{dp2010-05, Address = {Dunedin, New Zealand}, Author = {Alan Woods and Mariusz Nowostawski}, Date-Added = {2010-11-12 12:38:12 +1300}, Date-Modified = {2010-11-12 12:38:12 +1300}, Institution = {Department of Information Science, University of Otago}, Month = nov, Number = {2010/05}, Title = {Feasibility study of fall detection with the use of mobile smartphones}, Type = {Discussion paper}, Year = {2010}} @techreport{dp2010-04, Abstract = {Modeling of financial market data for detecting important market characteristics as well as their abnormalities plays a key role in identifying their behavior. Researchers have proposed different types of techniques to model market data. One such model proposed by Sergie Maslov, models the behavior of a limit order book. Being a very simple and interesting model, it has several drawbacks and limitations. This paper analyses the behavior of the Maslov model and proposes several variants of it to make the original Maslov model more realistic. The price signals generated from these models are analyzed by comparing with real life stock data and it was shown that the proposed variants of the Maslov model are more realistic than the original Maslov model.}, Address = {Dunedin, New Zealand}, Author = {Rasika M. Withanawasam and Peter A. Whigham and Timothy Crack and I. M. Premachandra}, Date-Added = {2010-11-12 12:37:26 +1300}, Date-Modified = {2010-11-12 12:37:26 +1300}, Institution = {Department of Information Science, University of Otago}, Month = feb, Number = {2010/04}, Size = {832 KB}, Title = {An empirical investigation of the {M}aslov limit order market model}, Type = {Discussion paper}, Year = {2010}} @techreport{dp2010-03, Abstract = {In normative multi-agent systems, the question of ``how an agent identifies a norm in an agent society'' has not received much attention. This paper aims at addressing this question. To this end, this paper proposes an architecture for norm identification for an agent. The architecture is based on observation of interactions between agents. This architecture enables an autonomous agent to identify the norms in a society using the Candidate Norm Inference (CNI) algorithm. The CNI algorithm uses association rule mining approach to identify sequences of events as candidate norms. When a norm changes, the agent using our architecture will be able to modify the norm and also remove a norm if it does not hold in its society. Using simulations we demonstrate how an agent makes use of the norm identification framework.}, Address = {Dunedin, New Zealand}, Author = {Bastin Tony Roy Savarimuthu and Stephen Cranefield and Maryam A. Purvis and Martin K. Purvis}, Date-Added = {2010-04-12 11:59:13 +1200}, Date-Modified = {2010-04-12 11:59:13 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {norms, agents, architecture, identification, simulation, societies}, Month = feb, Number = {2010/03}, Size = {562 KB}, Title = {Norm identification in multi-agent societies}, Type = {Discussion paper}, Year = {2010}} @techreport{dp2010-02, Abstract = {Novelty detection is an important functionality that has found many applications in information retrieval and processing. In this paper we propose a novel framework that deals with novelty detection for multiple-scene image sets. Working with wildlife image data, the framework starts with image segmentation, followed by feature extraction and classification of the image blocks extracted from image segments. The labelled image blocks are then scanned through to generate a co-occurrence matrix of object labels, representing the semantic context within the scene. The semantic co-occurrence matrices then undergo binarization and principal component analysis for dimension reduction, forming the basis for constructing one-class models for each scene category. An algorithm for outlier detection that employs multiple one-class models is proposed. An advantage of our approach is that it can be used for scene classification and novelty detection at the same time. Our experiments show that the proposed approach algorithm gives favourable performance for the task of detecting novel wildlife scenes, and binarization of the label co-occurrence matrices helps to significantly increase the robustness in dealing with the variation of scene statistics.}, Address = {Dunedin, New Zealand}, Author = {Suet-Peng Yong and Jeremiah D. Deng and Martin K. Purvis}, Date-Added = {2010-02-01 13:15:53 +1300}, Date-Modified = {2010-05-04 11:15:34 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {context, co-occurrence matrix, semantics, novel image, multi-class}, Month = jan, Number = {2010/02}, Size = {3.4 MB}, Title = {Modelling semantic context for novelty detection in wildlife scenes}, Type = {Discussion paper}, Year = {2010}} @techreport{dp2010-01, Abstract = {In this paper we consider the broader issue of gaining assurance that an agent system will behave appropriately when it is deployed. We ask to what extent this problem is addressed by existing research into formal verification. We identify a range of issues with existing work which leads us to conclude that, broadly speaking, verification approaches on their own are too narrowly focussed. We argue that a shift in direction is needed, and outline some possibilities for such a shift in direction.}, Address = {Dunedin, New Zealand}, Author = {Michael Winikoff}, Date-Added = {2010-02-01 13:03:08 +1300}, Date-Modified = {2010-02-01 13:03:08 +1300}, Institution = {Department of Information Science, University of Otago}, Month = jan, Number = {2010/01}, Size = {296 KB}, Title = {Assurance of agent systems: What role should formal verification play?}, Type = {Discussion paper}, Year = {2010}} @techreport{dp2009-01, Abstract = {In this paper we discuss a tag-based model that facilitates knowledge sharing in the context of agents playing the knowledge sharing game. Sharing the knowledge incurs a cost for the sharing agent, and thus non-sharing is the preferred option for selfish agents. Through agent-based simulations we show that knowledge sharing is possible even in the presence of non-sharing agents in the population. We also show that the performance of an agent society can be better when some agents bear the cost of sharing instead of the whole group sharing the cost.}, Address = {Dunedin, New Zealand}, Author = {Sharmila Savarimuthu and Maryam Purvis and Martin Purvis}, Date-Added = {2010-01-11 14:04:00 +1300}, Date-Modified = {2010-01-11 14:04:00 +1300}, Institution = {Department of Information Science, University of Otago}, Keywords = {cooperation, altruism, tags, knowledge sharing, multi-agent based simulation, artificial society}, Month = feb, Number = {2009/01}, Size = {424 KB}, Title = {Tag based model for knowledge sharing in agent society}, Type = {Discussion paper}, Year = {2009}} @techreport{dp2009-07, Abstract = {While wireless sensor networks (WSN) are increasingly equipped to handle more complex functions, in-network processing still require the battery powered sensors to judiciously use their constrained energy so as to prolong the effective network life time.There are a few protocols using sensor clusters to coordinate the energy consumption in a WSN. To cope with energy heterogeneity among sensor nodes, a modified clustering algorithm is proposed with a three-tier sensor node setting. Simulation has been conducted to evaluate the new clustering algorithm and favorable results are obtained especially in heterogeneous energy settings.}, Address = {Dunedin, New Zealand}, Author = {Femi A. Aderohunmu and Jeremiah D. Deng and Martin K. Purvis}, Date-Added = {2009-10-05 16:49:19 +1300}, Date-Modified = {2010-02-01 13:51:51 +1300}, Institution = {Department of Information Science, University of Otago}, Keywords = {wireless sensor network, heterogeneous settings, clustering}, Month = oct, Number = {2009/07}, Size = {881 KB}, Title = {Enhancing clustering in wireless sensor networks with energy heterogeneity}, Type = {Discussion paper}, Year = {2009}} @techreport{dp2009-06, Abstract = {This research focuses on the design and development of an IBIS-based tool called IBISMod, which facilitates a distributed and collaborative decision-making process. IBIS-based systems help analysts and designers in the process of formulating the requirements and design issues associated with complex problems that are difficult to specify. In particular, it captures the rationale behind group decision-making process. The group members are usually distributed over a network and may be working together concurrently. IBISMod is based on Rittel's Issue-Based Information System. This particular implementation is a web-based tool that makes it possible for the participants to work together on a specific problem while they may be physically present in different locations. In order to improve the interactivity, speed and usability of the framework, the AJAX approach has been adopted.}, Address = {Dunedin, New Zealand}, Author = {Toktam Ebadi and Maryam A. Purvis and Martin K. Purvis}, Date-Added = {2009-10-01 18:01:45 +1300}, Date-Modified = {2009-10-01 18:01:45 +1300}, Institution = {Department of Information Science, University of Otago}, Month = jun, Number = {2009/06}, Size = {500 KB}, Title = {A collaborative Web-based issue based information system (IBIS) framework}, Type = {Discussion paper}, Year = {2009}} @techreport{dp2009-05, Abstract = {Software development effort estimation is important for quality management in the software development industry, yet its automation still remains a challenging issue. Applying machine learning algorithms alone often can not achieve satisfactory results. In this paper, we present an integrated data mining framework that incorporates domain knowledge into a series of data analysis and modeling processes, including visualization, feature selection, and model validation. An empirical study on the software effort estimation problem using a benchmark dataset shows the effectiveness of the proposed approach.}, Address = {Dunedin, New Zealand}, Author = {Jeremiah D. Deng and Martin K. Purvis and Maryam A. Purvis}, Date-Added = {2009-09-17 15:46:48 +1200}, Date-Modified = {2009-09-17 15:46:48 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {software effort estimation, machine learning}, Month = jun, Number = {2009/05}, Size = {260 KB}, Title = {Software effort estimation: {H}armonizing algorithms and domain knowledge in an integrated data mining approach}, Type = {Discussion paper}, Year = {2009}} @techreport{dp2009-04, Abstract = {Sapstain is considered a defect that must be removed from processed wood. So far, research in automatic wood inspection systems has been mostly limited to dealing with knots. In this paper, we extract a number of colour and texture features from wood pictures. These features are then assessed using machine learning techniques via feature selection, visualization, and finally classification. Apart from average colour and colour opponents, texture features are also found to be useful in classifying sapstain. This implies a significant modification to the domain understanding that sapstain is mainly a discolourization effect. Preliminary results are presented, with satisfactory classification performance using only a few selected features. It is promising that a real world wood inspection system with the functionality of sapstain detection can be developed.}, Address = {Dunedin, New Zealand}, Author = {Jeremiah D. Deng}, Date-Added = {2009-06-08 14:57:36 +1200}, Date-Modified = {2009-06-09 16:58:39 +1200}, Institution = {Department of Information Science, University of Otago}, Month = apr, Number = {2009/04}, Size = {884 KB}, Title = {Automatic sapstain detection in processed timber through image feature analysis}, Type = {Discussion paper}, Year = {2009}} @techreport{dp2008-04, Abstract = {In a multi-agent system, a single agent may not be capable of completing complex tasks. Therefore agents are required to form a team to fulfill the task requirements. In this paper an agent model is introduced that facilitates cooperation among agents. A multi-threaded multi-agent simulation framework is designed to test the model. The experimental results demonstrate that the model is significantly useful in achieving cooperation under various environmental constraints. It also allows agents to adjust their teammate selection strategies according to environmental constraints.}, Address = {Dunedin, New Zealand}, Author = {Toktam Ebadi and Maryam Purvis and Martin Purvis}, Date-Added = {2009-06-08 13:59:38 +1200}, Date-Modified = {2009-06-09 16:58:39 +1200}, Institution = {Department of Information Science, University of Otago}, Month = oct, Number = {2008/04}, Size = {176 KB}, Title = {Partner selection mechanisms for agent cooperation}, Type = {Discussion paper}, Year = {2008}} @techreport{dp2008-03, Abstract = {Before deploying a software system we need to assure ourselves (and stake-holders) that the system will behave correctly. This assurance is usually done by testing the system. However, it is intuitively obvious that adaptive systems, including agent-based systems, can exhibit complex behaviour, and are thus harder to test. In this paper we examine this ``obvious intuition'' in the case of Belief-Desire-Intention (BDI) agents. We analyse the size of the behaviour space of BDI agents and show that although the intuition is correct, the factors that influence the size are not what we expected them to be; specifically, we found that the introduction of failure handling had a much larger effect on the size of the behaviour space than we expected. We also discuss the implications of these findings on the testability of BDI agents.}, Address = {Dunedin, New Zealand}, Author = {Michael Winikoff and Stephen Cranefield}, Date-Added = {2009-06-08 13:58:46 +1200}, Date-Modified = {2009-06-09 16:58:39 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {testing, complexity, validation, belief-desire-intention (BDI)}, Month = nov, Number = {2008/03}, Size = {472 KB}, Title = {On the testability of {BDI} agent systems}, Type = {Discussion paper}, Year = {2008}} @techreport{dp2008-01, Abstract = {This aim of this project was to the explore JAIN SLEE standard 1.1 programming model and SIMPLE (Sip for Instant Messaging and Presence Leveraging Extensions) protocols, developing a Voice over Internet Protocol (VoIP) application with functions that include making a phone call, instant messaging to peers, and at the same time providing users with buddy list information of their peers. The JAIN SLEE platform RhinoSDK 2.0 (developed by OpenCloud) was to be used and an example application that is included with RhinoSDK 2.0 was to be extended. During the project the phone call functionality was scoped out of the project and the focus was set on implementing the instant messaging and presence functionality. This report describes the functions that have been implemented on the server side and client side of this VoIP application.}, Address = {Dunedin, New Zealand}, Author = {Dee Milic and Dong Zhou and Hailing Situ}, Date-Added = {2009-06-08 13:57:39 +1200}, Date-Modified = {2009-06-10 15:56:39 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2008/01}, Size = {221 KB}, Title = {{VoIP} application development using {SIP} protocol}, Type = {Discussion paper}, Year = {2008}} @techreport{dp2008-02, Abstract = {Gartner has for some time been reporting the potential for virtual world technology to become the next wave of the Internet, delivering what is known as the Web3.D environment. This is characterised by a high level of user participation through immersion in the virtual world. Gartner has predicted that by 2011, 80% of internet users will be regular users of Web3.D technology. Project LifeLink was initiated to discover what opportunities for Telecom might exist in the growth of business and consumer interest in virtual worlds. This has focused on a number of technologies, in particular Second Life, OpenSimulator (OpenSIM) and JAIN SLEE. The project has been run by Telecom with coordination and support from MediaLab, and with researchers at Canterbury and Otago Universities. This report describes the work undertaken at Otago University to implement a gateway to enable demonstration of communications between an object in Second Life and the JAIN SLEE environment in order to interoperate with external network services.}, Address = {Dunedin, New Zealand}, Author = {Nathan Lewis and Hailing Situ and Melanie Middlemiss}, Date-Added = {2009-06-08 13:56:48 +1200}, Date-Modified = {2009-06-10 15:56:55 +1200}, Institution = {Department of Information Science, University of Otago}, Month = jul, Number = {2008/02}, Size = {1.3 MB}, Title = {Report of {O}tago contributions to {T}elecom {L}ife{L}ink {P}roject}, Type = {Discussion paper}, Year = {2008}} @techreport{dp2008-05, Abstract = {Most people hold a variety of opinions on different topics ranging from sports, entertainment, spiritual beliefs to moral principles. These can be based on a personal reflection and evaluation or on their interactions with others. How do we influence others in our social network and how do they influence us and how do we reach consensus? In this paper, we present our investigations based on the use of multiple opinions (a vector of opinions) that should be considered to determine consensus in a society. We have extended Deffuant model and have tested our model on top of two well-known network topologies the Barabasi-Albert network and the Erdos-Renyi network. We have implemented a two phase filtering process determining the consensus.}, Address = {Dunedin, New Zealand}, Author = {Alya Alaali and Maryam Purvis and Bastin Tony Roy Savarimuthu}, Date-Added = {2009-06-08 13:54:29 +1200}, Date-Modified = {2009-06-09 16:58:40 +1200}, Institution = {Department of Information Science, University of Otago}, Month = oct, Number = {2008/05}, Size = {696 KB}, Title = {Vector opinion dynamics: {A}n extended model for consensus in social networks}, Type = {Discussion paper}, Year = {2008}} @techreport{dp2009-08, Abstract = {Telecommunications technologies and Internet services are experiencing unprecedented growth. Technological advances together with the growing scale of deployments are driving rapid change in the telecommunications arena. All these factors contribute to the push towards convergence on the network core. Next generation networks, programmable networks, and the converged core opens up and provides new network architectures and new converged service opportunities. The Global Network Interconnectivity (GNI) Project was established at the University of Otago in 2006 to develop expertise, provide knowledge sharing and conduct activities supporting new ICT technologies that contribute to telecommunications, multimedia, and information systems convergence. The aim of the GNI Symposium was to bring together academic and industry leaders for one day to discuss current and future issues relating to convergence in the ICT and Telecommunications arena. This report provides a summary of the day's presentations and discussion sessions.}, Address = {Dunedin, New Zealand}, Annote = {Problem with the original file: PDFLaTeX doesn't like PDF 1.5! Constructed manually using Preview instead.}, Author = {Melanie Middlemiss}, Date-Added = {2009-06-07 21:50:57 +1200}, Date-Modified = {2009-10-05 16:56:53 +1300}, Institution = {Department of Information Science, University of Otago}, Month = oct, Number = {2009/08}, Size = {632 KB}, Title = {2009 Global Network Interconnectivity (GNI) Symposium}, Type = {Discussion paper}, Year = {2009}} @techreport{dp2009-03, Abstract = {The problem with the uptake of new technologies such as ZigBee is the lack of development environments that help in faster application software development. This paper describes a software framework for application development using ZigBee wireless protocol. The architecture is based on defining XML based design interfaces that represent the profiles of ZigBee nodes that are used in the application.}, Address = {Dunedin, New Zealand}, Author = {Bastin Tony Roy Savarimuthu and Morgan Bruce and Maryam Purvis}, Date-Added = {2009-06-07 21:49:30 +1200}, Date-Modified = {2009-06-10 15:57:21 +1200}, Institution = {Department of Information Science, University of Otago}, Month = apr, Number = {2009/03}, Size = {168 KB}, Title = {A software framework for application development using {Z}ig{B}ee protocol}, Type = {Discussion paper}, Year = {2009}} @techreport{dp2009-02, Abstract = {Online virtual worlds such as Second Life provide a rich medium for unstructured human interaction in a shared simulated 3D environment. However, many human interactions take place in a structured social context where participants play particular roles and are subject to expectations governing their behaviour, and current virtual worlds do not provide any support for this type of interaction. There is therefore an opportunity to adapt the tools developed in the MAS community for structured social interactions between software agents (inspired by human society) and adapt these for use with the computer-mediated human communication provided by virtual worlds. This paper describes the application of one such tool for use with Second Life. A model checker for online monitoring of social expectations defined in temporal logic has been integrated with Second Life, allowing users to be notified when their expectations of others have been fulfilled or violated. Avatar actions in the virtual world are detected by a script, encoded as propositions and sent to the model checker, along with the social expectation rules to be monitored. Notifications of expectation fulfilment and violation are returned to the script to be displayed to the user. This utility of this tool is reliant on the ability of the Linden scripting language (LSL) to detect events of significance in the application domain, and a discussion is presented on how a range of monitored structured social scenarios could be realised despite the limitations of LSL.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Guannan Li}, Date-Added = {2009-06-07 21:45:12 +1200}, Date-Modified = {2009-06-09 17:16:19 +1200}, Institution = {Department of Information Science, University of Otago}, Month = may, Number = {2009/02}, Size = {188 KB}, Title = {Monitoring social expectations in {S}econd {L}ife}, Type = {Discussion paper}, Year = {2009}} @techreport{dp2007-02, Abstract = {In this paper we propose a mechanism for norm emergence based on role models. The mechanism uses the concept of normative advice whereby the role models provide advice to the follower agents. Our mechanism is built using two layers of networks, the social link layer and the leadership layer. The social link network represents how agents are connected to each other. The leadership network represents the network that is formed based on the role played by each agent on the social link network. The two kinds of roles are leaders and followers. We present our findings on how norms emerge on the leadership network when the topology of the social link network changes. The three kinds of social link networks that we have experimented with are fully connected networks, random networks and scale-free networks.}, Address = {Dunedin, New Zealand}, Author = {Bastin Tony Roy Savarimuthu and Stephen Cranefield and Maryam Purvis and Martin Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:41 +1200}, Institution = {Department of Information Science, University of Otago}, Month = jul, Number = {2007/02}, Size = {488 KB}, Title = {Role model based mechanism for norm emergence in artificial agent societies}, Type = {Discussion paper}, Year = {2007}} @techreport{dp2000-14, Abstract = {Electronic medical consultation is available worldwide through access to the World Wide Web (WWW). This article outlines a research study on the adoption of electronic medical consultation as a means of health delivery. It focuses on the delivery of healthcare specifically for New Zealanders, by New Zealanders. It is acknowledged that the WWW is a global market place and it is therefore difficult to identify New Zealanders' use of such a global market, but we have attempted to provide a New Zealand perspective on electronic medical consultation.}, Address = {Dunedin, New Zealand}, Author = {Brebner, C. and Jones, R. and Krisjanous, J. and Marshall, W. and Parry, G. and A. Holt}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:41 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {electronic medical consultation, on-line health, New Zealand}, Month = oct, Number = {2000/14}, Size = {80 KB}, Title = {Electronic medical consultation: {A} {N}ew {Z}ealand perspective}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2003-01, Abstract = {The process for requirements elicitation has traditionally been based on textual descriptions or graphical models using UML. While these may have worked for the design of desktop-based systems, we argue, that these notations are not adequate for a dialog with mobile end users, in particular for end users in ``blue collar'' application domains. We propose an alternative modelling technique ``Software Cinema'' based on the use of digital videos. We discuss one particular example of using Software cinema in the design of a user interface for a navigation system of a mobile end user.}, Address = {Dunedin, New Zealand}, Author = {Bernd Bruegge and Martin Purvis and Oliver Creighton and Christian Sandor}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:41 +1200}, Institution = {Department of Information Science, University of Otago}, Month = apr, Number = {2003/01}, Size = {301 KB}, Title = {Software cinema}, Type = {Discussion paper}, Year = {2003}} @techreport{dp2006-02, Abstract = {The purpose of this study was to create a ubiquitous proximity activated interactive digital display system providing adjusted artworks as content for evaluating viewer reactions and opinions to determine if similar interactive ubiquitous systems are a beneficial, enjoyable and even an appropriate way to display art. Multimedia used in galleries predominately provides content following set patterns and disregards the viewer. Interactive displays using viewer location usually require the viewer's conscious participation through carrying some form of hardware or using expensive sensing equipment. We created an inexpensive, simple system that reacts to the user in a ubiquitous manner, allowing the evaluation of the usability and suitability of such systems in the context of viewing art. Results from testing show that interactive displays are generally enjoyed and wanted for displaying art, however even simple ubiquitous displays can cause user difficulty due to the transparency of their interaction.}, Address = {Dunedin, New Zealand}, Author = {Gary Burrows}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:41 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {interactive, digital displays, art, proximity, ubiquitous, gallery, intuitive interfaces}, Month = jan, Number = {2006/02}, Size = {496 KB}, Title = {Ubiquitous interactive art displays: {A}re they wanted, are they intuitive?}, Type = {Discussion paper}, Year = {2006}} @techreport{dp2001-02, Abstract = {Agent-oriented software engineering is a promising new approach to software engineering that uses the notion of an agent as the primary entity of abstraction. The development of methodologies for agent-oriented software engineering is an area that is currently receiving much attention, there have been several agent-oriented methodologies proposed recently and survey papers are starting to appear. However the authors feel that there is still much work necessary in this area; current methodologies can be improved upon. This paper presents a new methodology, the Styx Agent Methodology, which guides the development of collaborative agent systems from the analysis phase through to system implementation and maintenance. A distinguishing feature of Styx is that it covers a wider range of software development life-cycle activities than do other recently proposed agent-oriented methodologies. The key areas covered by this methodology are the specification of communication concepts, inter-agent communication and each agent's behaviour activation---but it does not address the development of application-specific parts of a system. It will be supported by a software tool which is currently in development.}, Address = {Dunedin, New Zealand}, Author = {Geoff Bush and Stephen Cranefield and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:51:16 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {agent-based software engineering methodologies for agent-oriented software development}, Month = jan, Number = {2001/02}, Size = {153 KB}, Title = {The {S}tyx agent methodology}, Type = {Discussion paper}, Year = {2001}} @techreport{dp2006-01, Abstract = {Health care has entered the electronic domain. This domain has improved data collection and storage abilities while allowing almost instantaneous access and results to data queries. Furthermore it allows direct communication between healthcare providers and health consumers. The development of privacy, confidentiality and security principles are necessary to protect consumers' interests against inappropriate access. The electronic health systems vendors have dominated the transition of media, claiming it will improve the quality and coherence of the care process. However, numerous studies show that the health consumer is the important stakeholder in this process, and their views are suggesting that the electronic medium is the way forward, but not just yet. With the international push towards Electronic Health Records (EHRs) by the Health and Human Services (United States of America), National Health Service (United Kingdom), Health Canada (Canada) and more recently the Ministry of Health (New Zealand), this paper presents the consumers' role with a focus on their perceptions on the security of EHRs. A description of a study, looking at the New Zealand health consumer, is given.}, Address = {Dunedin, New Zealand}, Author = {Prajesh Chhanabhai and Alec Holt and Inga Hunter}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:41 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {electronic health records, New Zealand health system, consumer, security}, Month = jan, Number = {2006/01}, Size = {291 KB}, Title = {Consumers, security and electronic health records}, Type = {Discussion paper}, Year = {2006}} @techreport{dp2000-19, Abstract = {In any data set there some of the data will be bad or noisy. This study identifies two types of noise and investigates the effect of each in the training data of backpropagation neural networks. It also compares the mean square error function with a more robust alternative advocated by Huber.}, Address = {Dunedin, New Zealand}, Author = {David Clark}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:51:03 +1200}, Institution = {Department of Information Science, University of Otago}, Month = dec, Number = {2000/19}, Size = {360 KB}, Title = {Comparing {H}uber's {M}-{E}stimator function with the mean square error in backpropagation networks when the training data is noisy}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2000-17, Abstract = {In a consensus ensemble all members must agree before they classify a data point. But even when they all agree some data is still misclassified. In this paper we look closely at consistently misclassified data to investigate whether some of it may be outliers or may have been mislabeled.}, Address = {Dunedin, New Zealand}, Author = {David Clark}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:42 +1200}, Institution = {Department of Information Science, University of Otago}, Month = nov, Number = {2000/17}, Size = {331 KB}, Title = {Using consensus ensembles to identify suspect data}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2001-04, Abstract = {This paper discusses technology to support the use of UML for representing ontologies and domain knowledge in the Semantic Web. Two mappings have been defined and implemented using XSLT to produce Java classes and an RDF schema from an ontology represented as a UML class diagram and encoded using XMI. A Java application can encode domain knowledge as an object diagram realised as a network of instances of the generated classes. Support is provided for marshalling and unmarshalling this object-oriented knowledge to and from an RDF/XML serialisation.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:51:43 +1200}, Institution = {Department of Information Science, University of Otago}, Month = feb, Number = {2001/04}, Size = {482 KB}, Title = {{UML} and the {S}emantic {W}eb}, Type = {Discussion paper}, Year = {2001}} @techreport{dp2004-03, Abstract = {The use of asynchronous communication is traditionally seen to be an important element of an agent's autonomy. This paper argues that groups of agents within a society need the ability to choose forms of communication with stronger guarantees for particular interactions, and in particular, focuses on the use of reliable group communication. An example electronic trading scenario---the game of Pit---is presented, and it is shown how a formal institution for a particular critical phase of Pit can be built on top of the semantics for totally ordered and virtually synchronous multicasting.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:42 +1200}, Institution = {Department of Information Science, University of Otago}, Month = dec, Number = {2004/03}, Size = {183 KB}, Title = {Reliable group communication and institutional action in a multi-agent trading scenario}, Type = {Discussion paper}, Year = {2004}} @techreport{dp2005-01, Abstract = {This paper proposes a rule language for defining social expectations based on a metric interval temporal logic with past and future modalities and a current time binding operator. An algorithm for run-time monitoring compliance of rules in this language based on formula progression is also presented.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:42 +1200}, Institution = {Department of Information Science, University of Otago}, Month = feb, Number = {2005/01}, Size = {188 KB}, Title = {A rule language for modelling and monitoring social expectations in multi-agent systems}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2001-07, Abstract = {Ontologies play an important role in defining the terminology that agents use in the exchange of knowledge-level messages. As object-oriented modelling, and the Unified Modeling Language (UML) in particular, have built up a huge following in the field of software engineering and are widely supported by robust commercial tools, the use of UML for ontology representation in agent systems would help to hasten the uptake of agent-based systems concepts into industry. This paper examines the potential for UML to be used for ontology modelling, compares it to traditional description logic formalisms and discusses some further possibilities for applying UML-based technologies to agent communication systems.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Stefan Haustein and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:51:53 +1200}, Institution = {Department of Information Science, University of Otago}, Month = may, Number = {2001/07}, Size = {143 KB}, Title = {{UML}-based ontology modelling for software agents}, Type = {Discussion paper}, Year = {2001}} @techreport{dp2005-12, Abstract = {This paper discusses the potential benefits to ontology engineering in making the toolset of the Object Management Group's model-driven architecture (MDA) applicable to ontology modelling, and describes the design of an MDA-based tool to convert ontologies expressed in any language having a metamodel defined used the OMG's MOF model to an equivalent representation in RDF but with the same metamodel. It is shown how this representation, compared to the XMI format, provides a higher level generic serialisation format for MDA models (especially ontologies) that is amenable to analysis and transformation using existing RDF tools. This helps to bridge the gap between the MDA and ontology engineering by providing a route for ontologies in various ontology modelling languages to be imported into industrial-strength MDA model repositories and other tools, and by allowing these ontologies to be transformed to and from other forms of model.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Jin Pan}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:55:06 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {Model-driven Architecture (MDA), ontologies, MOF, JMI, RDF, Jena, NetBeans MDR, ODM}, Month = dec, Number = {2005/12}, Size = {416 KB}, Title = {Bridging the gap between the {M}odel-{D}riven {A}rchitecture and ontology engineering}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2000-07, Abstract = {It is canonical practice in agent-based systems to use a declarative format for the exchange of information. The increasing usage and facility of object-oriented tools and techniques, however, suggests there may be benefits in combining the use of object-oriented modelling approaches with agent-based messaging. In this paper we outline our efforts in connection with the New Zealand Distributed Information Systems project to use object-oriented knowledge representation in an agent-based architecture. Issues and tradeoffs are discussed, as well as the possible extensions to current agent-based message protocols that may be necessary in order to support object-oriented information exchange.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:49:52 +1200}, Institution = {Department of Information Science, University of Otago}, Month = apr, Number = {2000/07}, Size = {85 KB}, Title = {Extending agent messaging to enable {OO} information exchange}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2000-02, Abstract = {An approach is presented for incorporating metatata constraints into queries to be processed by a distributed environmental information system. The approach, based on a novel metamodel unifying concepts from the Unified Modelling Language (UML), the Object Query Language (OQL), and the Resource Description Framework (RDF), allows metadata information to be represented and processed in combination with regular data queries.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:43 +1200}, Institution = {Department of Information Science, University of Otago}, Month = feb, Number = {2000/02}, Size = {134 KB}, Title = {Integrating environmental information: {I}ncorporating metadata in a distributed information systems architecture}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2001-08, Abstract = {This paper examines a recent trend amongst software agent application and platform developers to desire the ability to send domain-specific objects within inter-agent messages. If this feature is to be supported without departing from the notion that agents communicate in terms of knowledge, it is important that the meaning of such objects be well understood. Using an object-oriented metamodelling approach, the relationships between ontologies and agent communication and content languages in FIPA-style agent systems are examined. It is shown how object structures in messages can be considered as expressions in ontology-specific extensions of standard content languages. It is also argued that ontologies must distingish between objects with and objects without identity.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2010-10-20 14:59:05 +1300}, Institution = {Department of Information Science, University of Otago}, Month = may, Number = {2001/08}, Size = {171 KB}, Title = {Generating ontology-specific content languages}, Type = {Discussion paper}, Year = {2001}} @techreport{dp2000-08, Abstract = {This paper describes a system of interlinked ontologies to describe the concepts underlying FIPA agent communication. A meta-modelling approach is used to relate object-oriented domain ontologies and abstract models of agent communication and content languages and to describe them in a single framework. The modelling language used is the Unified Modeling Language, which is extended by adding the concepts of resource and reference. The resulting framework provides an elegant basis for the development of agent systems that combine object-oriented information representation with agent messaging protocols.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Martin K. Purvis and Mariusz Nowostawski}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:50:01 +1200}, Institution = {Department of Information Science, University of Otago}, Month = apr, Number = {2000/08}, Size = {100 KB}, Title = {Is it an ontology or an abstract syntax? {M}odelling objects, knowledge and agent messages}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2001-03, Abstract = {This paper proposes the use of the Unified Modelling Language (UML) as a formalism for defining an abstract syntax for Agent Communication Languages (ACLs) and their associated content languages. It describes an approach supporting an automatic mapping from high-level abstract specifications of language structures to specific computer language bindings that can be directly used by an agent platform. Some advantages of this approach are that it provides a framework for specifying and experimenting with alternative agent communication languages and reduces the error-prone manual process of generating compatible bindings and grammars for different syntaxes. A prototype implementation supporting an automatic conversion from an abstract communication language expressed in UML to a native Java API and a Resource Description Framework (RDF) serialisation format is described.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Martin K. Purvis and Mariusz Nowostawski}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:51:28 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {agent communication languages, abstract syntax, UML, XMI, Java binding, marshalling, RDF}, Month = feb, Number = {2001/03}, Size = {488 KB}, Title = {Implementing agent communication languages directly from {UML} specifications}, Type = {Discussion paper}, Year = {2001}} @techreport{dp2007-08, Abstract = {One approach to moderating the behaviour of agents in open societies is the use of explicit languages for defining norms, conditional commitments and/or social expectations, together with infrastructure supporting conformance checking and the identification and possible punishment of anti-social agents. This paper presents a logical account of the creation, fulfilment and violation of social expectations modelled as conditional rules over a hybrid propositional temporal logic. The semantics are designed to allow model checking over finite histories to be used to check for fulfilment and violation of expectations in both online and offline modes. For online checking, expectations are always considered at the last state in the history, but in the offline mode expectations in previous states are also checked. At each past state, the then active expectations must be checked for fulfilment without recourse to information from later states: the truth of a future-oriented temporal proposition at state s over the full history does not imply the fulfilment at s of an expectation with content . This issue is addressed by defining fulfilment and violation in terms of an extension of Eisner et al.'s weak/strong semantics for LTL over truncated paths. The update of expectations from one state to the next is based on formula progression and the approach has been implemented by extending the MCLITE and MCFULL algorithms of the Hybrid Logic Model Checker.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Michael Winikoff}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:43 +1200}, Institution = {Department of Information Science, University of Otago}, Month = dec, Number = {2007/08}, Size = {240 KB}, Title = {Verifying social expectations by model checking truncated paths}, Type = {Discussion paper}, Year = {2007}} @techreport{dp2005-11, Abstract = {Progresses made on content-based image retrieval has reactivated the research on image analysis and similarity-based approaches have been investigated to assess the similarity between images. In this paper, the content-based approach is extended towards the problem of image collection summarization and comparison. For these purposes we propose to carry out clustering analysis on visual features using self-organizing maps, and then evaluate their similarity using a few dissimilarity measures implemented on the feature maps. The effectiveness of these dissimilarity measures is then examined with an empirical study.}, Address = {Dunedin, New Zealand}, Author = {Da Deng}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:43 +1200}, Institution = {Department of Information Science, University of Otago}, Month = dec, Number = {2005/11}, Size = {1.3 MB}, Title = {Content-based image collection summarization and comparison using self-organizing maps}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2000-16, Abstract = {Although widely studied for many years, colour image quantisation remains a challenging problem. We propose to use an evolving self-organising map model for the on-line image quantisation tasks. Encouraging results are obtained in experiments and we look forward to implementing the algorithm in real world applications with further improvement.}, Address = {Dunedin, New Zealand}, Author = {Da Deng and Nikola Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:43 +1200}, Institution = {Department of Information Science, University of Otago}, Month = nov, Number = {2000/16}, Size = {2.2 MB}, Title = {Evolving localised learning for on-line colour image quantisation}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2000-06, Abstract = {In real world information systems, data analysis and processing are usually needed to be done in an on-line, self-adaptive way. In this respect, neural algorithms of incremental learning and constructive network models are of increased interest. In this paper we present a new algorithm of evolving self-organizing map (ESOM), which features fast one-pass learning, dynamic network structure, and good visualisation ability. Simulations have been carried out on some benchmark data sets for classification and prediction tasks, as well as on some macroeconomic data for data analysis. Compared with other methods, ESOM achieved better classification with much shorter learning time. Its performance for time series modelling is also comparable, requiring more hidden units but with only one-pass learning. Our results demonstrate that ESOM is an effective computational model for on-line learning, data analysis and modelling.}, Address = {Dunedin, New Zealand}, Author = {Da Deng and Nikola Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:44 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2000/06}, Size = {233 KB}, Title = {Evolving self-organizing maps for on-line learning, data analysis and modelling}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2007-04, Abstract = {In tackling data mining and pattern recognition tasks, finding a compact but effective set of features has often been found to be a crucial step in the overall problem-solving process. In this paper we present an empirical study on feature analysis for classical instrument recognition, using machine learning techniques to select and evaluate features extracted from a number of different feature schemes. It is revealed that there is significant redundancy between and within feature schemes commonly used in practice. Our results suggest that further feature analysis research is necessary in order to optimize feature selection and achieve better results for the instrument recognition problem.}, Address = {Dunedin, New Zealand}, Author = {Da Deng and Christian Simmermacher and Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:44 +1200}, Institution = {Department of Information Science, University of Otago}, Month = aug, Number = {2007/04}, Size = {204 KB}, Title = {A study on feature analysis for musical instrument classification}, Type = {Discussion paper}, Year = {2007}} @techreport{dp2006-09, Abstract = {Along with the progress of the content-based image retrieval research and the development of the MPEG-7 XM feature descriptors, there has been an increasing research interest on object recognition and semantics extraction from images and videos. In this paper, we revisit an old problem of indoor versus outdoor scene classification. By introducing a precision-boosted combination scheme of multiple classifiers trained on several global and regional feature descriptors, our experiment has led to better results compared with conventional approaches.}, Address = {Dunedin, New Zealand}, Author = {Xianglin Deng and Jianhua Zhang}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:44 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {scene classification, classifier combination}, Month = may, Number = {2006/09}, Size = {843 KB}, Title = {Combining multiple precision-boosted classifiers for indoor-outdoor scene classification}, Type = {Discussion paper}, Year = {2006}} @techreport{dp2007-03, Abstract = {This paper proposes to design a mechanism that will allow M{\=a}ori users to specify their privacy preferences related to their culture when a software system asks for culturally sensitive information. We first identify various concepts associated with sensitive aspects of M{\=a}ori culture, such as tapu. We propose to build an ontology that describes these concepts and the relations between them in a formal way. This ontology will help service providers integrate M{\=a}ori cultural protocols in order to make M{\=a}ori users more confident about the use of the sensitive information related to their culture.}, Address = {Dunedin, New Zealand}, Author = {Xianglin Deng and Noria Foukia and Bastin Tony Roy Savarimuthu}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:44 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {privacy, M{\=a}ori culturally sensitive information}, Month = jul, Number = {2007/03}, Size = {308 KB}, Title = {Building privacy infrastructure for culturally sensitive information of {N}ew {Z}ealand {M}{\=a}ori}, Type = {Discussion paper}, Year = {2007}} @techreport{dp2002-08, Abstract = {(No abstract.)}, Address = {Dunedin, New Zealand}, Author = {Grant Dick and Peter Whigham}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:44 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {evolutionary computation, selection, spatial patterns}, Month = nov, Number = {2002/08}, Size = {255 KB}, Title = {Population density and spatially constrained selection in evolutionary computation}, Type = {Discussion paper}, Year = {2002}} @techreport{dp2006-06, Abstract = {This paper presents an implementation of the first stage of a Virtual Organization (VO) life cycle, which is the VO's creation. This implementation is based on previous work by one of the authors describing a framework which facilitates the establishment of VO agreements. In accordance with the framework, the implementation makes the VO's creation fully automated, thereby reducing its duration considerably. This is beneficial for the VO, which should only exist for the limited period needed to satisfy its goal. The VO is implemented as a Multi-Agent System (MAS), where autonomous agents negotiate the agreement leading the the VO's establishment. The Opal FIPA-compliant MAS platform was used to implement the VO agents. Different scenarios and evaluations provide a clear demonstration of the implementation, showing how agents dynamically negotiate the establishment of the agreement and how opportunistic agents' behavior affect the trust level during the negotiation process.}, Address = {Dunedin, New Zealand}, Author = {Noria Foukia and Pierre-Etienne Mallet}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:55:31 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {virtual organization, trust, autonomy, agent}, Month = mar, Number = {2006/06}, Size = {336 KB}, Title = {Establishing dynamic trust in virtual organization by means of {MAS}}, Type = {Discussion paper}, Year = {2006}} @techreport{dp2002-05, Abstract = {The technology sector of healthcare is entering a new evolutionary phase. The medical community has an obligation to the public to provide the safest, most effective healthcare possible. This is more achievable with the use of computer technology at the point of care, and small, portable devices could fulfil this role. A Modern Physician/PricewaterhouseCoopers 2001 survey on information technology in Physician practices found that 60% of respondents say that physicians in their organisation use PDAs, compare this with 26% in the 2000 technology survey. This trend is expected to continue to the point where these devices will have their position on a physician's desk next to their stethoscope. Once this electronic evolution occurs, the practice of medicine will change. Doctors will be able to practice medicine with greater ease and safety. In our opinion, the new generation of PDA mobile devices will be the tools to enable a transformation of healthcare to a paperless, wireless world. This article focuses on uses for PDAs in health care. Healthcare software is categorised into the following groups; reference/text book, calculators, patient management/logbook and personal clinical/study notebook. With a focus on the healthcare audience (the user), which can be registrar, consultant, nurse, student, teacher, patient, medical director and surgical.}, Address = {Dunedin, New Zealand}, Author = {Wayne Gillingham and Alec Holt and John Gillies}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:45 +1200}, Institution = {Department of Information Science, University of Otago}, Month = jul, Number = {2002/05}, Size = {864 KB}, Title = {Hand-held computers in health care: {W}hat software programs are available?}, Type = {Discussion paper}, Year = {2002}} @techreport{dp2004-02, Abstract = {Touch screens are a popular method of interaction with information systems embedded in public kiosks. Typical information systems are used on desktop PCs and therefore restricted to having a mouse as the selection device used to interact with the system. The purpose of this paper is to investigate how effective a touch screen overlay is in selecting typical graphical user interface (GUI) items used in information systems. A series of tests were completed involving multi-directional point and select tasks. A mouse, being the standard selection device, was also tested so the results of the touch screen could be compared. The GUI items tested were a button, check box, combo box and a text box. The results showed that the touch screen overlay was not suitable in terms of selecting small targets with a size of 4mm or less. The touch screen overlay was slower and had higher error rate compared to the mouse. There was no significant difference in throughput between a touch screen overlay and mouse. The mouse was rated easier to use and easier to make accurate selections with. The touch screen had higher arm, wrist and finger fatigue. This indicates that a touch screen overlay used only with a finger is not a practical selection device to use with interfaces containing small targets.}, Address = {Dunedin, New Zealand}, Author = {Matthew Gleeson and Nigel Stanger and Elaine Ferguson}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:54:04 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {touch screen overlay, mouse, pointing devices, Fitts' Law, performance evaluation, GUI items}, Month = dec, Number = {2004/02}, Size = {610 KB}, Title = {Design strategies for {GUI} items with touch screen based information systems: {A}ssessing the ability of a touch screen overlay as a selection device}, Type = {Discussion paper}, Year = {2004}} @techreport{dp2002-07, Abstract = {The St John's Ambulance Service, Southern Region Control Centre (the control centre) is located in Dunedin City and controls 56 ambulances based in 26 regional stations. The Southern Region covers an area of approximately 54,000 square kilometres, which has a usually resident population of 272,541 (Census, 2001). This report focuses on the dispatch and workload profile of the control centre between the 1st January 1997 and the 31st December 2001. During this period the control centre dispatched ambulances on approximately 135,822 occasions to a total of 118,759 incidents (this includes both emergency incidents and patient transfers). Based on an analysis of these incidents several key findings are discussed in this report. These include: * A 21.8% increase in the total number of incidents handled in the control centre between 1997 and 2001 * A 44 second increase in average activation times between 1997 and 2001 * A strong correlation between increased workload and increased activation times * A large increase in activation times during low and medium workload periods}, Address = {Dunedin, New Zealand}, Author = {Jared Hayes}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:52:34 +1200}, Institution = {Department of Information Science, University of Otago}, Month = nov, Number = {2002/07}, Size = {180 KB}, Title = {St {J}ohn's {A}mbulance {S}ervice, {S}outhern {R}egion: {C}ontrol centre dispatch profile (1997--2001)}, Type = {Discussion paper}, Year = {2002}} @techreport{dp2000-01, Abstract = {This article outlines similarity applied to the general environment and geographical information domains. The hypothesis is if physical and social sciences manifest similar amenities, then similarity would be a generative technique to analyse the cached information inherent in the data retrieved. Similarity is examined concerning the spatial grouping of natural kinds in a complex environment.}, Address = {Dunedin, New Zealand}, Author = {Alec Holt}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:45 +1200}, Institution = {Department of Information Science, University of Otago}, Month = feb, Number = {2000/01}, Size = {206 KB}, Title = {Investigating complexities through computational techniques}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2001-09, Abstract = {This paper proposes a novel View-based Consistency model for Distributed Shared Memory. A view is a set of ordinary data objects that a processor has the right to access in a data-race-free program. The View-based Consistency model only requires that the data objects of a view are updated before a processor accesses them. Compared with other memory consistency models, the View-based Consistency model can achieve data selection without user annotation and can reduce much false-sharing effect. This model has been implemented based on TreadMarks. Performance results have shown that for all our applications the View-based Consistency model outperforms the Lazy Release Consistency model.}, Address = {Dunedin, New Zealand}, Author = {Zhiyi Huang and Chengzheng Sun and Martin K. Purvis and Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:45 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {distributed shared memory, sequential consistency, false sharing}, Month = may, Number = {2001/09}, Size = {139 KB}, Title = {View-based consistency and its implementation}, Type = {Discussion paper}, Year = {2001}} @techreport{dp2001-01, Abstract = {Fuzzy neural networks are connectionist systems that facilitate learning from data, reasoning over fuzzy rules, rule insertion, rule extraction, and rule adaptation. The concept evolving fuzzy neural networks (EFuNNs), with respective algorithms for learning, aggregation, rule insertion, rule extraction, is further developed here and applied for on-line knowledge discovery on both prediction and classification tasks. EFuNNs operate in an on-line mode and learn incrementally through locally tuned elements. They grow as data arrive, and regularly shrink through pruning of nodes, or through node aggregation. The aggregation procedure is functionally equivalent to knowledge abstraction. The features of EFuNNs are illustrated on two real-world application problems---one from macroeconomics and another from Bioinformatics. EFuNNs are suitable for fast learning of on-line incoming data (e.g., financial and economic time series, biological process control), adaptive learning of speech and video data, incremental learning and knowledge discovery from growing databases (e.g. in Bioinformatics), on-line tracing of processes over time, life-long learning. The paper includes also a short review of the most common types of rules used in the knowledge-based neural networks for knowledge discovery and data mining.}, Address = {Dunedin, New Zealand}, Author = {Nikola Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:45 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {fuzzy rules evolving fuzzy neural networks on-line learning macroeconomics bioinformatics}, Month = jan, Number = {2001/01}, Size = {707 KB}, Title = {Evolving fuzzy neural networks for on-line knowledge discovery}, Type = {Discussion paper}, Year = {2001}} @techreport{dp2000-15, Abstract = {The paper applies novel techniques for on-line, adaptive learning of macroeconomic data and a consecutive analysis and prediction. The evolving connectionist system paradigm (ECOS) is used in its two versions---unsupervised (evolving self-organised maps), and supervised (evolving fuzzy neural networks---EFuNN). In addition to these techniques self-organised maps (SOM) are also employed for finding clusters of countries based on their macroeconomic parameters. EFuNNs allow for modelling, clustering, prediction and rule extraction. The rules that describe future annual values for the consumer price index (CPI), interest rate, unemployment and GDP per capita are extracted from data and reported in the paper for both global---EU-Asia block of countries, and for smaller groups---EU, EU-candidate countries, Asia-Pacific countries. The analysis and prediction models proof to be useful tools for the analysis of trends in macroeconomic development of clusters of countries and their future prediction.}, Address = {Dunedin, New Zealand}, Author = {Nikola Kasabov and H. Akpinar and L. Rizzi and Da Deng}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:50:41 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {neural networks, fuzzy rules, evolving connectionist systems, macroeconomic clusters}, Month = oct, Number = {2000/15}, Title = {Analysis of the macroeconomic development of {E}uropean and {A}sia-{P}acific countries with the use of connectionist models}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2000-18, Abstract = {Elsewhere Kennedy describes three elementary structures to be found in entity-relationship diagrams. Here, each of these structures is considered in the context of a transaction processing system and a specific set of components that can be associated with the structure is described. Next, an example is given illustrating the use of elementary structures as an analytical tool for data modelling and a diagnostic tool for the identification of errors in the resulting data model. It is conjectured that the amount of effort associated with each structure can be measured. A new approach for the estimation of the total effort required to develop a system, based on a count of the elementary structures present in the entity-relationship diagram, is then proposed. The approach is appealing because it can be automated and because it can be applied earlier in the development cycle than other estimation methods currently in use. The question of a suitable counting strategy remains open.}, Address = {Dunedin, New Zealand}, Author = {Geoffrey Kennedy}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:46 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {data modelling, design tools and techniques, entity-relationship model, software metrics}, Month = dec, Number = {2000/18}, Size = {112 KB}, Title = {Elementary structures in entity-relationship diagrams as a diagnostic tool in data modelling and a basis for effort estimation}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2000-05, Abstract = {M{\=a}ori speech data collection and analysis is an ongoing process, as new and existing data sets are continuously accessed for many different experimental speech perception and generation processing tasks. A data management system is an important tool to facilitate the systematic techniques applied to the speech and language data. Identification of the core components for M{\=a}ori speech and language databases, translation systems, speech recognition and speech synthesis have been undertaken as research themes. The latter component will be the main area of discussion here. So to hasten the development of M{\=a}ori speech synthesis, joint collaborative research with established international projects has begun. This will allow the M{\=a}ori language to be presented to the wider scientific community well in advance of other similar languages, many times it's own size and distribution. Propagation of the M{\=a}ori language via the information communication technology (ICT) medium is advantageous to it's long term survival.}, Address = {Dunedin, New Zealand}, Author = {Mark Laws}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:46 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2000/05}, Size = {243 KB}, Title = {Development of a {M}{\=a}ori database for speech perception and generation}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2000-04, Abstract = {The English and M{\=a}ori word translator ng{\=a} aho whakam{\=a}ori-{\=a}-tuhi was designed to provide single head-word translations to on-line web users. There are over 13,000 words all based on traditional text sources, derived because of their high frequency used within each of the respective languages. The translator has been operational for well over a year now, and it has had the highest web traffic usage in the Department of Information Science. Two log files were generated to record domain hits and language translations, both provided the up-to-date data for analysis contained in this paper.}, Address = {Dunedin, New Zealand}, Author = {Mark Laws and Richard Kilgour and Michael Watts}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:46 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2000/04}, Size = {202 KB}, Title = {Analysis of the {N}ew {Z}ealand and {M}{\=a}ori on-line translator}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2007-07, Abstract = {This paper presents a new approach of dynamic symmetric key distribution for encrypting the communication between two nodes in a Wireless Sensor Network (WSN). The distribution of a shared key can be performed by any sensor node and does not always require that it is performed by the base station (BS). Each node can be selected by one of its neighbor nodes in order to distribute a pair-wise key for a communication between two nodes. The selection is based on the local computation of a trust value granted by the requesting nodes. This scheme considerably reduces the cost of communication between the BS and the nodes when setting up pair-wise keys between neighboring nodes. This paper also describes a dynamic route selection mechanisms based on trust and cost, that each node performs to route data to neighbor nodes and to the BS.}, Address = {Dunedin, New Zealand}, Author = {Nathan Lewis and Noria Foukia and Donovan G. Govan}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:46 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {key distribution, trust, wireless sensor network, route selection,}, Month = sep, Number = {2007/07}, Size = {448 KB}, Title = {Using trust for key distribution and route selection in wireless sensor networks}, Type = {Discussion paper}, Year = {2007}} @techreport{dp2005-07, Abstract = {The immune system is a complex and distributed system. It provides a multilevel form of defence, capable of identifying and reacting to harmful pathogens that it does not recognise as being part of its ``self''. The framework proposed in this paper incorporates a number of immunological principles, including the multilevel defence and the cooperation between cells in the adaptive immune system. It is proposed that this approach could be used to provide a high level of intrusion detection, while minimising the level of false negative detections.}, Address = {Dunedin, New Zealand}, Author = {Melanie Middlemiss}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:46 +1200}, Institution = {Department of Information Science, University of Otago}, Month = jul, Number = {2005/07}, Size = {264 KB}, Title = {Framework for intrusion detection inspired by the immune system}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2006-03, Abstract = {The immune system is a complex and distributed system. It provides a multilayered form of defence, capable of identifying and responding to harmful pathogens that it does not recognise as ``self''. The framework proposed in this paper incorporates a number of immunological concepts and principles, including the multilayered defence and the cooperation between cells in the adaptive immune system. An alternative model of positive selection is also presented. It is suggested that the framework discussed here could lead to reduced false positive responses in anomaly detection tasks, such as intrusion detection, as well being extended to a population of computational immune systems that are able to maintain population diversity of recognition and response.}, Address = {Dunedin, New Zealand}, Author = {Melanie Middlemiss}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:47 +1200}, Institution = {Department of Information Science, University of Otago}, Month = jan, Number = {2006/03}, Size = {444 KB}, Title = {Positive and negative selection in a multilayer artificial immune system}, Type = {Discussion paper}, Year = {2006}} @techreport{dp2002-03, Abstract = {The use of computer software as an aid to rugby, and sports coaching in general, is becoming increasingly utilized. Videoed sport is the most widely used form of raw data for sports analysis, though it is currently not being used to its full potential. Patterns of player movement and position, both for individuals and groupings of players, are important for understanding the complexities of professional team sports, and yet are not being adequately addressed. This paper outlines a project that aims to support coaching and/or commentary by visualizing and measuring the similarity of video-derived spatiotemporal information, and enabling timely access to relevant video clips. Specifically, methods by which a user of spatially-enabled sports software can visualize spatio-temporal and rugby object information will be discussed. Two issues are examined: (1) powerful spatio-temporal representation techniques for rugby constructs (such as the pitch, players and amalgamations of players: team, scrum, lineout, backline) and (2) user interface design and how it enables rugby object representation alongside the spatio-temporal visualization facility.}, Address = {Dunedin, New Zealand}, Author = {Antoni Moore and Peter Whigham and Colin Aldridge and Alec Holt and Ken Hodge}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:47 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {spatial, temporal, video, representation, object, rugby}, Month = jun, Number = {2002/03}, Size = {2.3 MB}, Title = {Spatio-temporal and object visualization in rugby union}, Type = {Discussion paper}, Year = {2002}} @techreport{dp2003-05, Abstract = {It is a standard aim to complete tasks efficiently and effectively. When technology is involved, the tools must be designed to facilitate optimal performance. The ActualDepth{\texttrademark} Multi-Layer Display (MLD{\texttrademark}) is a `new generation' display, consisting of two layered Liquid Crystal Displays (LCDs), with a region of space between them. The top LCD displays transparently, allowing both layers to be viewed simultaneously. This paper describes an experiment that investigated relative reading speeds, error detection, comprehension speeds and comprehension accuracy on the MLD{\texttrademark}, including a comparison with standard single layered displays. A framework pertaining to colour and transparency usage on the MLD{\texttrademark} was then developed, which is intended to enhance the usability and effectiveness of the display. In general, it was found that overall readability was improved on the MLD{\texttrademark}, compared to a standard display, and different transparency levels and colours should be employed, depending on the purpose of reading the text.}, Address = {Dunedin, New Zealand}, Author = {Anna Nees and Rochelle Villanueva and William Wong}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:53:50 +1200}, Institution = {Department of Information Science, University of Otago}, Month = nov, Number = {2003/05}, Size = {249 KB}, Title = {Colour and transparency on the {M}ulti-{L}ayer {D}isplay ({MLD}{\texttrademark})}, Type = {Discussion paper}, Year = {2003}} @techreport{dp2001-10, Abstract = {The purpose of this document is to describe the key technology issues for distributed information access in New Zealand. It is written from an industrial and public sector perspective, representing the views and findings of a wide cross-section of institutions in public and private sectors. It is an output of Objective 2 of the Distributed Information Systems project funded under contract UO0621 by the New Zealand Foundation for Research, Science and Technology (FRST). It complements other project material produced by the academic research team at the University of Otago and its collaborators. It focuses on requirements and applications, and is intended to provide a real-world, New Zealand-oriented context for the research in distributed information technologies (DIST). The report represents the culmination of a series of workshops, industrial consultations, a questionnaire, and the experiences of the authors' institutions during the project, and therefore it supplements any previously produced material.}, Address = {Dunedin, New Zealand}, Author = {Howard Nicholls and Robert Gibb}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:47 +1200}, Institution = {Department of Information Science, University of Otago}, Month = sep, Number = {2001/10}, Size = {1.3 MB}, Title = {Distributed information access in {N}ew {Z}ealand}, Type = {Discussion paper}, Year = {2001}} @techreport{dp2006-08, Abstract = {The notion that all (or in weaker sense, some) natural phenomena can be modelled as a computable process, some kind of algorithm is recently gaining scientific recognition, and more research is dedicated to the rigorous explorations of the mapping between natural phenomena and the formalised computational systems. There is some debate and controversy as to how much of the natural can be expressed in the models of the artificial, although due to formalised nature of mathematics and physics itself, it is generally accepted that computation is viable way to model physical reality. Contemporary developments in computer science and in physics not only do no refute computationalism -- they provide more data and evidence in support of the basic theses. In this article we discuss some of the aspects of contemporary computationalist efforts based on the traditional notions of Turning Machine computation. Then we present an extended notion of computation, that goes beyond the traditional Turing limit. We propose a new interactive computation model called Evolvable Virtual Machines (EVMs). The EVM model uses the notion of many independently asynchronously executing processes, that communicate between each other and with the outside environment. We present some of the pitfalls of traditional computationalism, and compare it to our new, extended computationalist model, based on the notion of massively concurrent interactive computation (hypercomputation). We argue, that hypercomputationalism based on the collection of asynchronously concurrently communicating computational machines is a more compact and more appropriate way of representing natural phenomena (or the Universe in general). It is theoretically sound, and does not violate any of the current state-of-the-art physical theories. We discuss the details of our computational architecture, and present some of the implications of the hypercomputationalism on contemporary physical, life sciences, and computer science.}, Address = {Dunedin, New Zealand}, Author = {Mariusz Nowostawski}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:55:48 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2006/08}, Size = {477 KB}, Title = {The {EVM}'s universe and the {U}niverse}, Type = {Discussion paper}, Year = {2006}} @techreport{dp2000-13, Abstract = {The use of modelling abstractions to map from items in the real-world to objects in the computational domain is useful both for the effective implementation of abstract problem solutions and for the management of software complexity. This paper discusses the new approach of agent-oriented software engineering (AOSE), which uses the notion of an autonomous agent as its fundamental modelling abstraction. For the AOSE approach to be fully exploited, software engineers must be able to gain leverage from an agent software architecture and framework, and there are several such frameworks now publicly available. At the present time, however, there is little information concerning the options that are available and what needs to be considered when choosing or developing an agent framework. We consider three different agent software architectures that are (or will be) publicly available and evaluate some of the design and architectural differences and trade-offs that are associated with them and their impact on agent-oriented software development. Our discussion examines these frameworks in the context of an example in the area of distributed information systems.}, Address = {Dunedin, New Zealand}, Author = {Mariusz Nowostawski and Geoff Bush and Martin K. Purvis and Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:47 +1200}, Institution = {Department of Information Science, University of Otago}, Month = aug, Number = {2000/13}, Size = {222 KB}, Title = {Platforms for agent-oriented software}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2001-06, Abstract = {An architecture, and the accompanying infrastructural support, for agent-based software developement is described which supports the use of agent-oriented ideas at multiple levels of abstraction. At the lowest level are micro-agents, which are robust and efficient implementations of streamlined agents that can be used for many conventional programming tasks. Agents with more sophisticated functionality can be constructed by combining these micro-agents into more complicated agents. Consequently the system supports the consistent use of agent-based ideas throughout the software engineering process, since higher level agents may be hierarchically refined into more detailed agent implementations. We outline how micro-agents are implemented in Java and how they have been used to construct the Opal framework for the construction of more complex agents that are based on the FIPA specifications.}, Address = {Dunedin, New Zealand}, Author = {Mariusz Nowostawski and Geoff Bush and Martin K. Purvis and Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2010-10-20 14:50:25 +1300}, Institution = {Department of Information Science, University of Otago}, Keywords = {agents, multi-agent system, multi-agent platform scalability}, Month = mar, Number = {2001/06}, Size = {293 KB}, Title = {A multi-level approach and infrastructure for agent-oriented software development}, Type = {Discussion paper}, Year = {2001}} @techreport{dp2003-02, Abstract = {In FIPA-style multi-agent systems, agents coordinate their activities by sending messages representing particular communicative acts (or performatives). Agent communication languages must strike a balance between simplicity and expressiveness by defining a limited set of communicative act types that fit the communication needs of a wide set of problems. More complex requirements for particular problems must then be handled by defining domain-specific predicates and actions within ontologies. This paper examines the communication needs of a multi-agent distributed information retrieval system and discusses how well these are met by the FIPA ACL.}, Address = {Dunedin, New Zealand}, Author = {Mariusz Nowostawski and Daniel Carter and Stephen Cranefield and Martin Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:48 +1200}, Institution = {Department of Information Science, University of Otago}, Month = may, Number = {2003/02}, Size = {340 KB}, Title = {Communicative acts and interaction protocols in a distributed information system}, Type = {Discussion paper}, Year = {2003}} @techreport{dp2005-03, Abstract = {Increasing complexity of software applications forces researchers to look for automated ways of programming and adapting these systems. Self-adapting, self-organising software system is one of the possible ways to tackle and manage higher complexity. A set of small independent problem solvers, working together in a dynamic environment, solving multiple tasks, and dynamically adapting to changing requirements is one way of achieving true self-adaptation in software systems. Our work presents a dynamic multi-task environment and experiments with a self-adapting software system. The Evolvable Virtual Machine (EVM) architecture is a model for building complex hierarchically organised software systems. The intrinsic properties of EVM allow the independent programs to evolve into higher levels of complexity, in a way analogous to multi-level, or hierarchical evolutionary processes. The EVM is designed to evolve structures of self-maintaining, self-adapting ensembles, that are open-ended and hierarchically organised. This article discusses the EVM architecture together with different statistical exploration methods that can be used with it. Based on experimental results, certain behaviours that exhibit self-adaptation in the EVM system are discussed.}, Address = {Dunedin, New Zealand}, Author = {Mariusz Nowostawski and Lucien Epiney and Martin Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:48 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2005/03}, Size = {877 KB}, Title = {Self-adaptation and dynamic environment experiments with evolvable virtual machines}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2007-05, Abstract = {Referrals are used in multi-agent systems, network agents and peer-to-peer systems for the purpose of global or local information spreading to facilitate trust relationships and reciprocal interactions. Based on referral local interactions can be altered with a purpose to maximise the utility function of each of the participants, which in many cases requires mutual co-operation of participants. The referral system is often based on the global detailed or statistical behaviour of the overall society. Traditionally, referrals are collected by referring agents and the information is provided upon request to individuals. In this article, we provide a simple taxonomy of referral systems and on that basis we discuss three distinct ways information can be collected and aggregated. We analyse the effects of global vs. local information spreading, in terms of individual and global performance of a population based on the maximisation of a utility function of each of the agents. Our studies show that under certain conditions such as large number of non uniformly acting autonomous agents the spread of global information is undesirable. Collecting and providing local information only yields better overall results. In some experimental setups however, it might be necessary for global information to be available otherwise global stable optimal behaviour cannot be achieved. We analyse both of these extreme cases based on simple game-theoretic setup. We analyse and relate our results in the context of e-mail relying and spam filtering.}, Address = {Dunedin, New Zealand}, Author = {Mariusz Nowostawski and Noria Foukia}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:48 +1200}, Institution = {Department of Information Science, University of Otago}, Month = aug, Number = {2007/05}, Size = {568 KB}, Title = {Social collaboration, stochastic strategies and information referrals}, Type = {Discussion paper}, Year = {2007}} @techreport{dp2007-06, Abstract = {The concept of autonomy is a central concept in distributed computational systems and in multi-agent systems in particular. With diverse implications in philosophy and despite frequent use in social sciences and the theory of computation, autonomy remains somewhat a vague notion. Most researchers do not discuss the details of this concept, but rather assume a general, common-sense understanding of autonomy in the context of computational multi-agent systems. We will review the existing definitions and formalisms related to the notion of autonomy. We re-introduce two concepts: relative autonomy and absolute autonomy. We argue that even though the concept of absolute autonomy does not make sense in computational settings, it is useful if treated as an assumed property of computational units. For example, the concept of autonomous agents may facilitate more flexible and robust abstract architectures. We adopt and discuss a new formalism based on results from the study of massively parallel multi-agent systems in the context of evolvable virtual machines. We also present the architecture for building such architectures based on our multi-agent system KEA, where we use the extended notion of dynamic linking. We augment our work with theoretical results from cham algebra for concurrent and asynchronous information processing systems. We argue that for open distributed systems, entities must be connected by multiple computational dependencies and a system as a whole must be subjected to influence from external sources. However, the exact linkages are not directly known to the computational entities themselves. This provides a useful notion and the necessary means to establish an relative autonomy in such systems.}, Address = {Dunedin, New Zealand}, Author = {Mariusz Nowostawski and Martin Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:48 +1200}, Institution = {Department of Information Science, University of Otago}, Month = aug, Number = {2007/06}, Size = {528 KB}, Title = {The concept of autonomy in distributed computation and multi-agent systems}, Type = {Discussion paper}, Year = {2007}} @techreport{dp2004-01, Abstract = {Contemporary software systems are exposed to demanding, dynamic, and unpredictable environments where the traditional adaptability mechanisms may not be sufficient. To imitate and fully benefit from life-like adaptability in software systems, that might come closer to the complexity levels of biological organisms, we seek a formal mathematical model of certain fundamental concepts such as: life, organism, evolvability and adaptation. In this work we will concentrate on the concept of software evolvability. Our work proposes an evolutionary computation model, based on the theory of hypercycles and autopoiesis. The intrinsic properties of hypercycles allow them to evolve into higher levels of complexity, analogous to multi-level, or hierarchical evolutionary processes. We aim to obtain structures of self-maintaining ensembles, that are hierarchically organised, and our primary focus is on such open-ended hierarchically organised evolution.}, Address = {Dunedin, New Zealand}, Author = {Mariusz Nowostawski and Martin Purvis and Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:48 +1200}, Institution = {Department of Information Science, University of Otago}, Month = nov, Number = {2004/01}, Size = {349 KB}, Title = {An architecture for self-organising evolvable virtual machines}, Type = {Discussion paper}, Year = {2004}} @techreport{dp2001-05, Abstract = {Although the notion of conversations has been discussed for some time as a way in which to provide an abstract representation of extended agent message exchange, there is still no consensus established concerning how to use these abstractions effectively. This paper describes a layered approach based on coloured Petri Nets that can be used for modelling complex, concurrent conversations among agents in a multi-agent system. The approach can be used both to define simple conversation protocols and to define more complex conversation protocols composed of a number of simpler conversations. With this method it is possible (a) to capture the concurrent characteristics of a conversation, (b) to capture the state of a conversation at runtime, and (c) to reuse conversation structures for the processing of multiple concurrent messages. A prototype implementation of such a system with some examples is described.}, Address = {Dunedin, New Zealand}, Author = {Mariusz Nowostawski and Martin K. Purvis and Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:49 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {agent communication languages, conversations, conversation protocols, Petri Nets, conversation monitoring and visualising}, Month = mar, Number = {2001/05}, Size = {216 KB}, Title = {A layered approach for modelling agent conversations}, Type = {Discussion paper}, Year = {2001}} @techreport{dp2005-06, Abstract = {In this paper we describe a graphical notation for physical database modelling. This notation provides database administrators with a means to model the physical structure of new and existing databases, thus enabling them to make more proactive and informed tuning decisions, compared to existing database monitoring tools.}, Address = {Dunedin, New Zealand}, Author = {Antonia Pillay and Nigel Stanger}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:49 +1200}, Institution = {Department of Information Science, University of Otago}, Month = jun, Number = {2005/06}, Size = {337 KB}, Title = {A graphical notation for physical database modelling}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2002-01, Abstract = {The Opal architecture for software development is described that supports the use of agent-oriented concepts at multiple levels of abstraction. At the lowest level are micro-agents, streamlined agents that can be used for conventional, system-level programming tasks. More sophisticated agents may be constructed by assembling combinations of micro-agents. The architecture consequently supports the systematic use of agent-based notions throughout the software development process. The paper describes (a) the implementation of micro-agents in Java, (b) how they have been used to fashion the Opal framework for the construction of more complex agents based on the Foundation for Intelligent Physical Agents (FIPA) specifications, and (c) the Opal Conversation Manager that facilitates the capability of agents to conduct complex conversations with other agents.}, Address = {Dunedin, New Zealand}, Author = {Martin Purvis and Stephen Cranefield and Mariusz Nowostawski and Dan Carter}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:49 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2002/01}, Size = {537 KB}, Title = {Opal: {A} multi-level infrastructure for agent-oriented software development}, Type = {Discussion paper}, Year = {2002}} @techreport{dp2002-04, Abstract = {An area where multi-agent systems can be put to effective use is for the case of an open collection of autonomous problem solvers in a dynamically changing environment. One example of such a situation is that of environmental management and emergency response, which can require the joint cooperation of a distributed set of components, each one of which may be specialised for a specific task or problem domain. The various stakeholders in the process can all be represented and interfaced by software agents which collaborate with each other toward achieving a particular goal. For such situations new agents that arrive on the scene must be apprised of the group interaction protocols so that they can cooperate effectively with the existing agents. In this paper we show how this can be done by using coloured Petri net representations for each role in an interaction protocol and passing these nets dynamically to new agents that wish to participate in a group interaction. We argue that multi-agent systems are particularly suited for such dynamically changing environments, but their effectiveness depends on the their ability to use adaptive interaction protocols.}, Address = {Dunedin, New Zealand}, Author = {Martin Purvis and Stephen Cranefield and Maryam Purvis and Mariusz Nowostawski}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:49 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {multi-agent systems, agent conversations, adaptive systems}, Month = jul, Number = {2002/04}, Size = {150 KB}, Title = {Multi-agent system interaction protocols in a dynamically changing environment}, Type = {Discussion paper}, Year = {2002}} @techreport{dp2002-02, Abstract = {Environmental management and emergency response often involves the joint cooperation of a network of distributed problem solvers, each of which may be specialised for a specific task or problem domain. Some of these problem solvers could be human, others could be `intelligent' environmental monitoring and control systems. Environmental software systems are needed not only for the provision of basic environmental information but also to support the coordination of these problem solvers. An agent architecture can support the requirement associated with disparate problem solvers. The various stakeholders in the process are represented by software agents which can collaborate with each other toward achieving a particular goal. The communication between agents can be accomplished by using interaction protocols which are represented by coloured Petri nets (CPN). This paper describes an approach for providing this support by employing a software agent framework for the modelling and execution of environmental process tasks in a networked environment.}, Address = {Dunedin, New Zealand}, Author = {Martin Purvis and Peter Hwang and Maryam Purvis and Stephen Cranefield and Martin Schievink}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:49 +1200}, Institution = {Department of Information Science, University of Otago}, Month = apr, Number = {2002/02}, Size = {121 KB}, Title = {Interaction protocols for a network of environmental problem solvers}, Type = {Discussion paper}, Year = {2002}} @techreport{dp2000-12, Abstract = {Workflow management systems are increasingly used to assist the automation of business processes that involve the exchange of documents, information, or task execution results. Recent developments in distributed information system technology now make it possible to extend the workflow management system idea to much wider spheres of activity in the industrial and commercial world. This paper describes a framework under development that employs such technology so that software tools and processes may interoperate in a distributed and dynamic environment. Key technical elements of the framework include the use of coloured Petri nets and distributed object technology (CORBA).}, Address = {Dunedin, New Zealand}, Author = {Martin K. Purvis and Maryam A. Purvis and Selena Lemalu}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:49 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {distributed systems, workflow, process modelling}, Month = aug, Number = {2000/12}, Size = {195 KB}, Title = {An adaptive distributed workflow system framework}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2000-20, Abstract = {Workflow management systems (WFMS) are being adopted to assist the automation of business processes that involve the exchange of information. As a result of developments in distributed information system technology, it is now possible to extend the WFMS idea to wider spheres of activity in the industrial and commercial world and thereby to encompass the increasingly sprawling nature of modern organisations. This paper describes a framework under development that employs such technology so that software tools and processes may interoperate in a distributed and dynamic environment. The framework employs Petri nets to model the interaction between various sub-processes. CORBA technology is used to enable different participants who are physically disparate to monitor activity in and make resource-level adaptations to their particular subnet.}, Address = {Dunedin, New Zealand}, Author = {Martin K. Purvis and Maryam A. Purvis and Selena Lemalu}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:50 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {distributed systems, workflow, process modelling, Petri nets}, Month = dec, Number = {2000/20}, Size = {199 KB}, Title = {A framework for distributed workflow systems}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2006-05, Abstract = {This paper describes efforts to facilitate collaborative work in a distributed environment by providing infrastructure that facilitates the understanding of inter-connected processes involved and how they interact. In this work we describe how our agent-based framework supports these. This distributed work environment makes use of both P2P and client-server architectures. Using an example of developing an open source software system, we explain how a collaborative work environment can be achieved. In particular we address how the support for coordination, collaboration and communication are provided using our framework.}, Address = {Dunedin, New Zealand}, Author = {Maryam Purvis and Martin Purvis and Bastin Tony Roy Savarimuthu}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:55:24 +1200}, Institution = {Department of Information Science, University of Otago}, Month = feb, Number = {2006/05}, Size = {361 KB}, Title = {Facilitating collaboration in a distributed software development environment using {P2P} architecture}, Type = {Discussion paper}, Year = {2006}} @techreport{dp2007-01, Abstract = {Norms are shared expectations of behaviours that exist in human societies. Norms help societies by increasing the predictability of individual behaviours and by improving co-operation and collaboration among members. Norms have been of interest to multi-agent system researchers as software agents intend to follow certain norms. But, owing to their autonomy, agents sometimes violate norms which needs monitoring. There are two main branches of research in normative agent systems. One of the branches focuses on normative agent architectures, norm representations, norm adherence and the associated punitive or incentive measures. The other branch focuses on two main issues. The first issue is on the study of spreading and internalization of norms. The second issue that has not received much attention is the emergence of norms in agent societies. Our objective in this paper is to propose mechanisms for norm emergence in artificial agent societies and provide initial experimental results.}, Address = {Dunedin, New Zealand}, Author = {Bastin Tony Roy Savarimuthu and Maryam Purvis and Stephen Cranefield and Martin Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:56:07 +1200}, Institution = {Department of Information Science, University of Otago}, Month = feb, Number = {2007/01}, Size = {216 KB}, Title = {How do norms emerge in multi-agent societies? {M}echanisms design}, Type = {Discussion paper}, Year = {2007}} @techreport{dp2006-04, Abstract = {With the advent of Web Services, more and more business organizations make their services available on the Internet through Web Services and also use other services that are available on the corporate Intranet. From the viewpoint of workflow systems, these freely available Web Services and the proprietary intranet-based services should be integrated into individual businesses for their day-to-day workflows. Businesses that use Web Services not only provide the services to their customers but can also use Web Services to customize their internal processing, such as online order placement for raw materials. In this paper we describe the architecture of our agent-based workflow system that can be used for Web Service composition. In the context of an example from the apparel manufacturing industry, we demonstrate how Web Services can be composed and used.}, Address = {Dunedin, New Zealand}, Author = {Bastin Tony Roy Savarimuthu and Maryam Purvis and Martin Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:50 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {web services, multi-agent systems, workflow systems}, Month = feb, Number = {2006/04}, Size = {565 KB}, Title = {Agent based web service composition in the context of a supply-chain based workflow}, Type = {Discussion paper}, Year = {2006}} @techreport{dp2005-05, Abstract = {Rapid changes in the business environment call for more flexible and adaptive workflow systems. Researchers have proposed that Workflow Management Systems (WfMSs) comprising multiple agents can provide these capabilities. We have developed a multi-agent based workflow system, JBees, which supports distributed process models and the adaptability of executing processes. Modern workflow systems should also have the flexibility to integrate available web services as they are updated. In this paper we discuss how our agent-based architecture can be used to bind and access web services in the context of executing a workflow process model. We use an example from the diamond processing industry to show how our agent architecture can be used to integrate web services with WfMSs.}, Address = {Dunedin, New Zealand}, Author = {Bastin Tony Roy Savarimuthu and Maryam Purvis and Martin Purvis and Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:50 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2005/05}, Size = {433 KB}, Title = {Agent-based integration of web services with workflow management systems}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2006-10, Abstract = {We present an empirical study on classical music instrument classification. A methodology with feature extraction and evaluation is proposed and assessed with a number of experiments, whose final stage is to detect instruments in solo passages. In feature selection it is found that similar but different rankings for individual tone classification and solo passage instrument recognition are reported. Based on the feature selection results, excerpts from concerto and sonata files are processed, so as to detect and distinguish four ma jor instruments in solo passages: trumpet, flute, violin, and piano. Nineteen features selected from the Mel-frequency cepstral coefficients (MFCC) and the MPEG-7 audio descriptors achieve a recognition rate of around 94% by the best classifier assessed by cross validation.}, Address = {Dunedin, New Zealand}, Author = {Christian Simmermacher and Da Deng and Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:51 +1200}, Institution = {Department of Information Science, University of Otago}, Month = may, Number = {2006/10}, Size = {206 KB}, Title = {Feature analysis and classification of classical musical instruments: {A}n empirical study}, Type = {Discussion paper}, Year = {2006}} @techreport{dp2000-11, Abstract = {An important part of the systems development process is building models of real-world phenomena. These phenomena are described by many different kinds of information, and this diversity has resulted in a wide variety of modelling representations. Some types of information are better expressed by some representations than others, so it is sensible to use multiple representations to describe a real-world phenomenon. The author has developed an approach to facilitating the use of multiple representations within a single viewpoint by translating descriptions of the viewpoint among different representations. An important issue with such translations is their quality, or how well they map constructs of one representation to constructs of another representation. Two possible methods for improving translation quality, heuristics and enrichment, are proposed in this paper, and a preliminary metric for measuring relative translation quality is described.}, Address = {Dunedin, New Zealand}, Author = {Nigel Stanger}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:51 +1200}, Institution = {Department of Information Science, University of Otago}, Month = may, Number = {2000/11}, Size = {474 KB}, Title = {Translating descriptions of a viewpoint among different representations}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2000-09, Abstract = {When modelling a real-world phenomenon, it can often be useful to have multiple descriptions of the phenomenon, each expressed using a different modelling approach or representation. Different representations such as entity-relationship modelling, data flow modelling and use case modelling allow analysts to describe different aspects of real-world phenomena, thus providing a more thorough understanding than if a single representation were used. Researchers working with multiple representations have approached the problem from many different fields, resulting in a diverse and potentially confusing set of terminologies. In this paper is described a viewpoint-based framework for discussing the use of multiple modelling representations to describe real-world phenomena. This framework provides a consistent and integrated terminology for researchers working with multiple representations. An abstract notation is also defined for expressing concepts within the framework.}, Address = {Dunedin, New Zealand}, Author = {Nigel Stanger}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:51 +1200}, Institution = {Department of Information Science, University of Otago}, Month = apr, Number = {2000/09}, Size = {478 KB}, Title = {A viewpoint-based framework for discussing the use of multiple modelling representations}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2006-07, Abstract = {A fully functional and publicly available, digital institutional repository (IR) in the space of just ten days? The technology was available, the time was right, the team was right and technical assistance from colleagues in Australia was on hand a mere cyber call away. This paper reports on how we were able to ``hit the ground running'' in building an open access IR in such a short space of time. What has taken our breath away is not so much the speed of the process, but the scale of responsiveness from the Internet community. Consequently, we also consider the research impact of more than 18,000 downloads from eighty countries, less than three months into the project!}, Address = {Dunedin, New Zealand}, Author = {Nigel Stanger and Graham McGregor}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:51 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2006/07}, Size = {327 KB}, Title = {Hitting the ground running: {B}uilding {N}ew {Z}ealand's first publicly available institutional repository}, Type = {Discussion paper}, Year = {2006}} @techreport{dp2000-03, Abstract = {We report on the clustering of nodes in internally represented acoustic space. Learners of different languages partition perceptual space distinctly. Here, an Evolving Connectionist-Based System (ECOS) is used to model the perceptual space of New Zealand English. Currently, the system evolves in an unsupervised, self-organising manner. The perceptual space can be visualised, and the important features of the input patterns analysed. Additionally, the path of the internal representations can be seen. The results here will be used to develop a supervised system that can be used for speech recognition based on the evolved, internal sub-word units.}, Address = {Dunedin, New Zealand}, Author = {John Taylor and Nikola Kasabov and Richard Kilgour}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:51 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2000/03}, Size = {125 KB}, Title = {Modelling the emergence of speech sound categories in evolving connectionist systems}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2003-04, Abstract = {Accurate effort prediction is often an important factor for successful software development. However, the diversity of software development tools observed today has resulted in a situation where existing effort prediction models' applicability appears to be limited. Data-centred fourth-generation-language (4GL) software development provides one such difficulty. This paper aims to construct an accurate effort prediction model for data-centred 4GL development where a specific tool suite is used. Using historical data collected from 17 systems developed in the target environment, several linear regression models are constructed and evaluated in terms of two commonly used prediction accuracy measures, namely the mean magnitude of relative error (MMRE) and pred measures. In addition, R2, the maximum value of MRE, and statistics of the absolute residuals are used for comparing the models. The results show that models consisting of specification-based software size metrics, which were derived from Entity Relationship Diagrams (ERDs) and Function Hierarchy Diagrams (FHDs), achieve good prediction accuracy in the target environment. The models' good effort prediction ability is particularly beneficial because specification-based metrics usually become available at an early stage of development. This paper also investigates the effect of developers' productivity on effort prediction and has found that inclusion of productivity improves the models' prediction accuracy further. However, additional studies will be required in order to establish the best productivity inclusive effort prediction model.}, Address = {Dunedin, New Zealand}, Author = {Chikako van Koten}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:51 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {prediction systems, 4GL, effort, metrics, empirical analysis}, Month = nov, Number = {2003/04}, Size = {398 KB}, Title = {An effort prediction model for data-centred fourth-generation-language software development}, Type = {Discussion paper}, Year = {2003}} @techreport{dp2005-08, Abstract = {Constructing an accurate effort prediction model is a challenge in Software Engineering. This paper presents new Bayesian statistical models, in order to predict development effort of software systems in the International Software Benchmarking Standards Group (ISBSG) dataset. The first model is a Bayesian linear regression (BR) model and the second model is a Bayesian multivariate normal distribution (BMVN) model. Both models are calibrated using subsets randomly sampled from the dataset. The models' predictive accuracy is evaluated using other subsets, which consist of only the cases unknown to the models. The predictive accuracy is measured in terms of the absolute residuals and magnitude of relative error. They are compared with the corresponding linear regression models. The results show that the Bayesian models have predictive accuracy equivalent to the linear regression models, in general. However, the advantage of the Bayesian statistical models is that they do not require a calibration subset as large as the regression counterpart. In the case of the ISBSG dataset it is confirmed that the predictive accuracy of the Bayesian statistical models, in particular the BMVN model is significantly better than the linear regression model, when the calibration subset consists of only five or smaller number of software systems. This finding justifies the use of Bayesian statistical models in software effort prediction, in particular, when the system of interest has only a very small amount of historical data.}, Address = {Dunedin, New Zealand}, Author = {Chikako van Koten}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:52 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {effort prediction, Bayesian statistics, regression, software metrics}, Month = oct, Number = {2005/08}, Size = {287 KB}, Title = {Bayesian statistical models for predicting software development effort}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2005-02, Abstract = {As the number of object-oriented software systems increases, it becomes more important for organizations to maintain those systems effectively. However, currently only a small number of maintainability prediction models are available for objectoriented systems. This paper presents a Bayesian network maintainability prediction model for an object-oriented software system. The model is constructed using object-oriented metric data in Li and Henry's datasets, which were collected from two different object-oriented systems. Prediction accuracy of the model is evaluated and compared with commonly used regression-based models. The results suggest that the Bayesian network model can predict maintainability more accurately than the regression-based models for one system, and almost as accurately as the best regression-based model for the other system.}, Address = {Dunedin, New Zealand}, Author = {Chikako van Koten and Andrew Gray}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:54:16 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2005/02}, Size = {287 KB}, Title = {An application of {B}ayesian network for predicting object-oriented software maintainability}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2005-09, Abstract = {Constructing an accurate effort prediction model is a challenge in Software Engineering. This paper presents three Bayesian statistical software effort prediction models for database-oriented software systems, which are developed using a specific 4GL tool suite. The models consist of specification-based software size metrics and development team's productivity metric. The models are constructed based on the sub jective knowledge of human expert and calibrated using empirical data collected from 17 software systems developed in the target environment. The models' predictive accuracy is evaluated using subsets of the same data, which were not used for the models' calibration. The results show that the models have achieved very good predictive accuracy in terms of MMRE and pred measures. Hence it is confirmed that the Bayesian statistical models can predict effort successfully in the target environment. In comparison with commonly used multiple linear regression models, the Bayesian statistical models' predictive accuracy is equivalent in general. However, when the number of software systems used for the models' calibration becomes smaller than five, the predictive accuracy of the best Bayesian statistical models are significantly better than the multiple linear regression model. This result suggests that the Bayesian statistical models would be a better choice when software organizations/practitioners do not posses sufficient empirical data for the models' calibration. The authors expect those findings encourage more researchers to investigate the use of Bayesian statistical models for predicting software effort.}, Address = {Dunedin, New Zealand}, Author = {Chikako van Koten and Andrew Gray}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:54:38 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {effort prediction, 4GL, Bayesian statistics, regression, software metrics}, Month = oct, Number = {2005/09}, Size = {331 KB}, Title = {Bayesian statistical effort prediction models for data-centred {4GL} software development}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2003-03, Abstract = {A modularised connectionist model, based on the Mixture of Experts (ME) algorithm for time series prediction, is introduced. A set of connectionist modules learn to be local experts over some commonly appearing states of a time series. The dynamics for mixing the experts is a Markov process, in which the states of a time series are regarded as states of a HMM. Hence, there is a Markov chain along the time series and each state associates to a local expert. The state transition on the Markov chain is the process of activating a different local expert or activating some of them simultaneously by different probabilities generated from the HMM. The state transition property in the HMM is designed to be time-variant and conditional on the first order dynamics of the time series. A modified Baum--Welch algorithm is introduced for the training of the time-variant HMM and it has been proved that by EM process the likelihood function will converge to a local minimum. Experiments, with two time series, show this approach achieves significant improvement in the generalisation performance over global models.}, Address = {Dunedin, New Zealand}, Author = {Xin Wang and Peter Whigham and Da Deng}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:53:27 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {series prediction, Mixture of Experts, HMM, connectionist model, expectation and maximization, Gauss probability density distribution}, Month = jun, Number = {2003/03}, Size = {486 KB}, Title = {Time-line {H}idden {M}arkov {E}xperts and its application in time series prediction}, Type = {Discussion paper}, Year = {2003}} @techreport{dp2000-10, Abstract = {Most applications of Genetic Programming to time series modeling use a fitness measure for comparing potential solutions that treat each point in the time series independently. This non-temporal approach can lead to some potential solutions being given a relatively high fitness measure even though they do not correspond to the training data when the overall shape of the series is taken into account. This paper develops two fitness measures which emphasize the concept of shape when measuring the similarity between a training and evolved time series. One approach extends the root mean square error to higher dimensional derivatives of the series. The second approach uses a simplified derivative concept that describes shape in terms of positive, negative and zero slope.}, Address = {Dunedin, New Zealand}, Author = {Peter Whigham and Colin Aldridge}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:52 +1200}, Institution = {Department of Information Science, University of Otago}, Month = apr, Number = {2000/10}, Size = {561 KB}, Title = {A shape metric for evolving time series models}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2005-04, Abstract = {Cost is a major obstacle to the adoption of large-scale data integration solutions by small to medium enterprises (SME's). We therefore propose a lightweight data integration architecture built around the Atom XML syndication format, which may provide a cost-effective alternative technology for SME's to facilitate data integration, compared to expensive enterprise grade systems. The paper discusses the underlying principles and motivation for the architecture, the structure of the architecture itself, and our research goals.}, Address = {Dunedin, New Zealand}, Author = {David Williamson and Nigel Stanger}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:54:25 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2005/04}, Size = {301 KB}, Title = {A lightweight data integration architecture using {A}tom}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2005-10, Abstract = {In visual perception, finding regions of interest in a scene is very important in the carrying out visual tasks. Recently there have been a number of works proposing saliency detectors and visual attention models. In this paper, we propose an extensible visual attention framework based on MPEG-7 descriptors. Hotspots in an image are detected from the combined saliency map obtained from multiple feature maps of multi-scales. The saliency concept is then further extended and we propose a saliency index for the ranking of images on their interestingness. Simulations on hotspots detection and automatic image ranking are conducted and statistically tested with a user test. Results show that our method captures more important regions of interest and the automatic ranking positively agrees to user rankings.}, Address = {Dunedin, New Zealand}, Author = {Heiko Wolf and Da Deng}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:54:51 +1200}, Institution = {Department of Information Science, University of Otago}, Month = dec, Number = {2005/10}, Size = {3.1 MB}, Title = {Image saliency mapping and ranking using an extensible visual attention model based on {MPEG}-7 feature descriptors}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2001-11, Abstract = {This paper reports on a field study into the nature of decision making in the command and control of emergency ambulances at the London Ambulance Service (LAS). This paper will describe how real-time decisions are made by emergency medical dispatchers and the decision strategies they invoke as they assess the situation, plan and co-ordinate the dispatch of emergency ambulances. A cognitive task analysis approach known as the Critical Decision Method (Hoffman et al., 1998; Klein et al., 1989) was used in the study. The study showed that decision making in emergency ambulance command and control involves four major processes---assessment of the situation, assessment of resources, planning, and co-ordinating and control. These four processes function within an awareness of goings-on in and around the sectors that the dispatchers operate in. This awareness is referred to as situation awareness and is being reported elsewhere (Wong {\&} Blandford, submitted). The decision making process resembles the decision making described by naturalistic decision making models (see (Zsambok {\&} Klein, 1997) for an extensive discussion on the topic) and is an extension of the Integrated Decision Model (Wong, 1999). The study also suggested that a lot of effort was directed at understanding and assessing the situation and in maintaining a constant awareness of the situation. These observations have significant implications for the design of information systems for command and control purposes. These implications will be discussed separately in another paper. The paper will first introduce the domain of EMD at the LAS, then explain how the Critical Decision Method was used in the data collection and in the data anlaysis. It will then describe how decisions are made, particularly during major incidents, and then discuss the implications of those findings for the design of command and control systems.}, Address = {Dunedin, New Zealand}, Author = {William Wong and Ann Blandford}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:53 +1200}, Institution = {Department of Information Science, University of Otago}, Month = oct, Number = {2001/11}, Size = {204 KB}, Title = {Naturalistic decision making in emergency ambulance command and control}, Type = {Discussion paper}, Year = {2001}} @techreport{dp1999-09, Abstract = {The speech recognition field is one of the most challenging fields that has faced scientists for a long time. The complete solution is still far from reach. The efforts are concentrated with huge funds from the companies to different related and supportive approaches to reach the final goal. Then, apply it to the enormous applications that are still waiting for the successful speech recognisers that are free from the constraints of speakers, vocabularies or environment. This task is not an easy one due to the interdisciplinary nature of the problem and as it requires speech perception to be implied in the recogniser (Speech Understanding Systems) which in turn point strongly to the use of intelligence within the systems. The bare techniques of recognisers (without intelligence) are following wide varieties of approaches with different claims of success by each group of authors who put their faith in their favourite way. However, the sole technique that gains the acceptance of the researchers to be the state of the art is the Hidden Markov Model (HMM) technique. HMM is agreed to be the most promising one. It might be used successfully with other techniques to improve the performance, such as hybridising the HMM with Artificial Neural Networks (ANN) algorithms. This does not mean that the HMM is pure from approximations that are far from reality, such as the successive observations independence, but the results and potential of this algorithm is reliable. The modifications on HMM take the burden of releasing it from these poorly representative approximations hoping for better results. In this report we are going to describe the backbone of the HMM technique with the main outlines for successful implementation. The representation and implementation of HMM varies in one way or another but the main idea is the same as well as the results and computation costs, it is a matter of preferences to choose one. Our preference here is that adopted by Ferguson and Rabiner et al. In this report we will describe the Markov Chain, and then investigate a very popular model in the speech recognition field (the Left-Right HMM Topology). The mathematical formulations needed to be implemented will be fully explained as they are crucial in building the HMM. The prominent factors in the design will also be discussed. Finally we conclude this report by some experimental results to see the practical outcomes of the implemented model.}, Address = {Dunedin, New Zealand}, Author = {Waleed Abdulla and Nikola Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:53 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {May}, Number = {99/09}, Size = {556 KB}, Title = {The concepts of hidden Markov model in speech recognition}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1995-11, Abstract = {Fuzzy concepts might have potential for protecting and preserving land which has special cultural or spiritual significance for indigenous peoples, because it might support any tangata whenua (indigenous peoples) desires for secrecy and confidentiality. These issues are examined in terms of New Zealand and from the technical perspective of Information Science. The various meanings of fuzzy are discussed. Some pertinent questions are: Is a fuzzy concept a useful tool to apply? Do the tangata whenua wish to make use of this tool?}, Address = {Dunedin, New Zealand}, Author = {Brian A. Ballantyne and George L. Benwell and Neil C. Sutherland}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:53 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {August}, Number = {95/11}, Title = {Fuzzy concepts, land and cultural confidentiality}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1993-02, Abstract = {This paper discusses the method of determining heights of mountains during the original geodetic survey of Victoria. From 1840 to 1875, more particularly the 1860s, geodetic surveyors were charged with the responsibility of mapping the colony. The subject of this paper is their efforts to determine the elevations by barometric heighting. A brief introduction to other methods is given while particular attention is paid to the determination of the height of Mount Sabine in the Otway Ranges, Victoria, by Surveyor Irwin in 1865. Attempts are made to recompute his original observations.}, Address = {Dunedin, New Zealand}, Author = {George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:54 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {November}, Number = {93/2}, Size = {770 KB}, Title = {Recomputing historical barometric heighting}, Type = {Discussion paper}, Year = {1993}} @techreport{dp1994-06, Abstract = {This paper describes the creation of a system development methodology suitable for spatial information systems. The concept is substantiated on the fact that spatial systems are similar to information systems in general. The subtle difference being the fact that spatial systems are not yet readily supported by large digital data bases. This fact has diverted attention away from system development to data collection. A spatial system development methodology is derived, based on a historical review of information systems methodologies and the coupling of same with a data collection and integration methodology for the spatially referenced digital data.}, Address = {Dunedin, New Zealand}, Author = {George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:54 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {February}, Number = {94/6}, Size = {1.3 MB}, Title = {A system development methodology for geomatics as derived from informatics}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1996-25a, Abstract = {There is continuing pressure to develop spatial information systems. This paper develops two concepts that could emerge. The first is a new spatial paradigm---an holistic model---which is less of an abstraction from reality than current models. Second, is the concept of federated databases for the improved and transparent access to data by disparate users. The latter concept is hardly new and is included in this paper to emphasize its growing importance. These two developments are presented after a introductory discussion of the present state of the discipline of geographical information systems and spatial analysis.}, Address = {Dunedin, New Zealand}, Author = {George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 17:08:00 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Note = {(Not in electronic version.)}, Number = {96/25a}, Title = {Spatial databases---{C}reative future concepts and use}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1994-03, Abstract = {Petri nets, as a modelling formalism, are utilised for the analysis of processes, whether for explicit understanding, database design or business process re-engineering. The formalism, however, can be represented on a virtual continuum from highly graphical to largely algorithmic. The use and understanding of the formalism will, in part, therefore depend on the resultant complexity and power of the representation and, on the graphical or algorithmic preference of the user. This paper develops a metric which will indicate the graphical or algorithmic tendency of hierarchical coloured Petri nets.}, Address = {Dunedin, New Zealand}, Author = {George L. Benwell and Stephen G. MacDonell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:20:30 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {February}, Number = {94/3}, Size = {427 KB}, Title = {Assessing the graphical and algorithmic structure of hierarchical coloured {P}etri net models}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1996-01, Abstract = {The creation of spatial information systems can be viewed from many directions. One such view is to see the creation in terms of data collection, data modelling, codifying spatial processes, information management, analysis and presentation. The amount of effort to create such systems is frequently under-estimated; this is true for each aspect of the above view. The accuracy of the assessment of effort will vary for each aspect. This paper concentrates on the effort required to create the code for spatial processes and analysis. Recent experience has indicated that this is an area where considerable under-estimation is occurring. Function point analysis presented in this paper provides a reliable metric for spatial systems developers to assess required effort based on spatial data models.}, Address = {Dunedin, New Zealand}, Author = {George L. Benwell and Stephen G. MacDonell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:54 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {January}, Number = {96/01}, Title = {Using data models to estimate required effort in creating a spatial information system}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1995-14, Abstract = {In New Zealand the management of the environment is now largely embodied in the Resource Management Act. Within this context there is a clear need to support regionally significant decisions. Furthermore it is important that such decisions are scale invariant, that is, they are appropriately implementable at the micro and macro levels. This demands that decision makers at these diametrically opposed levels are cognisant of the influence of their domain on other domains. A difficult concept. It also implies that there is consensus on what are the significant regional decisions and also how decisions and consequences interact across all scales and, possibly, even regions. As a region is a scale dependent term it is important that the different views can be perceived and conveyed to the different proponents and opponents. This paper develops the case that it is important to make appropriate use of technology when attempting to make decisions at the regional level. This is particularly so in the fragile environments of the high country of southern New Zealand. Furthermore, this paper embodies a concept of the Six Thinking Hats of E. de Bono in developing a simulation modelling tool which presents interactive management scenarios of agricultural areas of the high country. The modelling concept is presented along with the reasons for adopting the de Bono concept.}, Address = {Dunedin, New Zealand}, Author = {George L. Benwell and Tim Fletcher and Carolyne B. Smith}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:55 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {September}, Number = {95/14}, Title = {Integrating modelling and simulation into a problem solving paradigm for improved regional and environmental decision making}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1996-22, Abstract = {The Soft Systems Methodology (SSM) was used to identify requirements for the development of one or more information systems for a local company. The outcome of using this methodology was the development of three multimedia information systems. This paper discusses the use of the SSM when developing for multimedia environments. Namely, this paper covers the problems with traditional methods of requirements analysis (which the SSM addresses), how the SSM can be used to elicit multimedia information system requirements, and our personal experience of the method. Our personal experience is discussed in terms of the systems we developed using the SSM.}, Address = {Dunedin, New Zealand}, Author = {Da'oud Z. Butt and Tim Fletcher and Stephen G. MacDonell and Brian E. Norris and William B.L. Wong}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:55 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {multimedia information systems, Soft Systems methodology, systems development lifecycle}, Month = {October}, Number = {96/22}, Title = {Applying soft systems methodology to multimedia systems requirements analysis}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1995-04, Abstract = {The design of spatial information systems has traditionally been carried out independently of mainstream database developments. It is contended that the adoption of mainstream database design techniques is important to progress in the spatial information systems development field. An accepted approach to the development of information systems is through an integrated development environment with a design repository at its core. This paper proposes a skeleton model for the design of a repository to store spatial metadata. An object oriented modelling approach is adopted in preference to an entity relationship approach because of its ability to model functional and dynamic aspects of the repository.}, Address = {Dunedin, New Zealand}, Author = {S.K. Sophie Cockcroft}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:15:36 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {95/4}, Size = {184 KB}, Title = {An object repository model for the storage of spatial metadata}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1996-02, Abstract = {Database schemas currently used to define spatial databases are deficient in that they do not incorporate facilities to specify business rules/integrity constraints. This shortcoming has been noted by G{\"u}nther and Lamberts [G{\"u}nther {\&} Lamberts, 1994] who commented that geographical information systems (GIS) do not generally offer any functionality to preserve semantic integrity. It is desirable that this functionality be incorporated for reasons of consistency and so that an estimate of the accuracy of data entry can be made. Research into constraints upon spatial relationships at the conceptual level is well documented. A number of researchers have shown that the transition from conceptual to logical spatial data models is possible [Firns, 1994; Hadzilacos {\&} Tryfona, 1995]. The algorithmic accomplishment of this transition is a subject of current research. This paper presents one approach to incorporating spatial business rules in spatially referenced database schemas by means of a repository. It is demonstrated that the repository has an important role to play in spatial data management and in particular automatic schema generation for spatially referenced databases.}, Address = {Dunedin, New Zealand}, Author = {S.K. Sophie Cockcroft}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:39:16 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {February}, Number = {96/02}, Size = {188 KB}, Title = {The use of a metadata repository in spatial database development}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1997-05, Abstract = {Spatial data quality has become an issue of increasing concern to researchers and practitioners in the field of Spatial Information Systems (SIS). Clearly the results of any spatial analysis are only as good as the data on which it is based. There are a number of significant areas for data quality research in SIS. These include topological consistency; consistency between spatial and attribute data; and consistency between spatial objects' representation and their true representation on the ground. The last category may be subdivided into spatial accuracy and attribute accuracy. One approach to improving data quality is the imposition of constraints upon data entered into the database. This paper presents a taxonomy of integrity constraints as they apply to spatial database systems. Taking a cross disciplinary approach it aims to clarify some of the terms used in the database and SIS fields for data integrity management. An overview of spatial data quality concerns is given and each type of constraint is assessed regarding its approach to addressing these concerns. Some indication of an implementation method is also given for each.}, Address = {Dunedin, New Zealand}, Author = {S.K. Sophie Cockcroft}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:56 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {database constraints, spatial data quality, system development, rules}, Month = {May}, Number = {97/05}, Size = {128 KB}, Title = {A taxonomy of spatial data integrity constraints}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1996-25b, Abstract = {Integrated software engineering environments (ISEE) for traditional non spatial information systems are well developed, incorporating Database Management Systems (DBMS) and Computer Aided Software Engineering (CASE) tools. The core component of the ISEE is the repository. It brings all the other components together and provides a common area to which all tools can link. In this fashion it also provides a central point for control. No such facility exists for the management of spatial data. This paper describes the development of such a facility in the form of a spatial metadata repository.}, Address = {Dunedin, New Zealand}, Author = {Sophie Cockcroft}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 17:10:26 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Note = {(Not in electronic version.)}, Number = {96/25b}, Title = {First experiences in implementing a spatial metadata repository}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1998-01, Abstract = {The application of business rules as a means of ensuring data quality is an accepted approach in information systems development. Rules, defined by the user, are stored and manipulated by a repository or data dictionary. The repository stores the system design, including rules which result from constraints in the user's environment, and enforces these rules at runtime. The work presented here represents the application of this approach to spatial information system design using an integrated spatial software engineering tool (ISSET) with a repository at its core.}, Address = {Dunedin, New Zealand}, Author = {Sophie Cockcroft}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:56 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {spatial information systems development, integrity constraints, business rules, topological relationships}, Month = {March}, Number = {98/01}, Title = {User defined spatial business rules: {S}torage, management and implementation---{A} pipe network case study}, Type = {Discussion paper}, Year = {1998}} @techreport{dp1999-25, Abstract = {Web based approaches to tracking students on placement are receiving much interest in the field of medical education The work presented here describes a web-based solution to the problem of managing data collection from student encounters with patients whilst on placement. The solution has been developed by postgraduate students under the direction of staff of the health informatics diploma. Specifically, the system allows undergraduate students on placement or in the main hospital to access a web-based front end to a database designed to store the data that they are required to gather. The system also has the important effect of providing a rationale for the provision of electronic communication to the undergraduate students within the context of healthcare delivery. We believe that an additional effect will be to expose practicing healthcare providers to electronic information systems, along with the undergraduates who are trained to use them, and increase the skill base of the practitioners.}, Address = {Dunedin, New Zealand}, Author = {Sophie Cockcroft and David Parry and Alice Breton and David Abernethy and John Gillies}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:48:55 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {99/25}, Size = {92 KB}, Title = {Infiltrating {IT} into primary care: {A} case study}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-18, Abstract = {This paper discusses the problem of integrated planning and execution for tasks that involve the consumption, production and alteration of relational information. Unlike information retrieval problems, the information processing domain requires explicit modelling of the changing information state of the domain and how the validity of resources changes as actions are performed. A solution to this problem is presented in the form of a specialised hierarchical task network planning model. A distinction is made between the information processing effects of an action (modelled in terms of constraints relating the domain information before and after the action) and the actions' preconditions and effects which are expressed in terms of required, produced and invalidated resources. The information flow between tasks is explicitly represented in methods and plans, including any required information-combining operations such as selection and union. The paper presents the semantics of this model and discusses implementation issues arising from the extension of an existing HTN planner (SHOP) to support this model of planning.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:48:17 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {HTN planning, information processing, integrated planning and execution}, Month = {September}, Number = {99/18}, Size = {188 KB}, Title = {{HTN} planning for information processing tasks}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1994-16, Abstract = {Recently there has been a resurgence of interest in the deductive approach to planning. There are many benefits of this approach but one shortcoming is the difficulty of performing nonlinear planning in this framework. This paper argues that these problems are caused by a flaw in the partial order approach---the lack of structure in such a representation---and proposes an alternative, dynamic programming style approach based on a more structured representation of plans.}, Address = {Dunedin, New Zealand}, Author = {Stephen J.S. Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:57 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {November}, Number = {94/16}, Title = {Towards the deductive synthesis of nonlinear plans}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1997-01, Abstract = {In today's open, distributed environments, there is an increasing need for systems to assist the interoperation of tools and information resources. This paper describes a multi-agent system, DALEKS, that supports such activities for the information processing domain. With this system, information processing tasks are accomplished by the use of an agent architecture incorporating task planning and information agent matchmaking components. We discuss the characteristics of planning in this domain and describe how information processing tools are specified for the planner. We also describe the manner in which planning, agent matchmaking, and information task execution are interleaved in the DALEKS system. An example application taken from the domain of university course administration is provided to illustrate some of the activities performed in this system.}, Address = {Dunedin, New Zealand}, Author = {Stephen J.S. Cranefield and Aurora Diaz and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:57 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {February}, Number = {97/01}, Size = {84 KB}, Title = {Planning and matchmaking for the interoperation of information processing agents}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1995-07, Abstract = {The concept of an intelligent software agent has emerged from its origins in artificial intelligence laboratories to become an important basis for the development of distributed systems in the mainstream computer science community. This paper provides a review of some of the ideas behind the intelligent agent approach and addresses the question ``what is an agent?'' Some principal application areas for agent-based computing are outlined and related research programmes at the University of Otago are discussed.}, Address = {Dunedin, New Zealand}, Author = {Stephen J.S. Cranefield and Paul Gorman and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:16:39 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {July}, Number = {95/7}, Size = {188 KB}, Title = {Communicating agents: {A}n emerging approach for distributed heterogeneous systems}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1995-15, Abstract = {Agent-Based Software Integration (ABSI) entails the development of intelligent software agents and knowledge-sharing protocols that enhance interoperability of multiple software packages. Although some past ABSI projects reported in the literature have been concerned with the integration of relatively large software frameworks from separate engineering disciplines, the discussion in this paper concerns the integration of general-purpose software utilities and hand-crafted tools. With such smaller-scale ABSI projects, it may be difficult to justify the expense of constructing an overall ontology for the application. There are cases, however, when the project involves general-purpose tools that manipulate the same general entity types (such as files) but at different levels of abstraction. In such cases it is appropriate to have ontologies appropriate for the general usage of each tool and constraint descriptions that enable the ontological specifications to be mapped across the various levels of abstraction. This paper discusses issues associated with this type of ABSI project and describes an example information management application associated with university course administration. For the information management application presented the key issues are the provision of standard agent wrappers for standard desktop information management tools and the design of standard ontologies describing information stored in relational databases as well as in structured text files. Examples of a conceptual model describing such a database ontology are presented in connection with the example application. It is also suggested that a general planning agent, distinct from the notion of a facilitator agent, be employed in this context to assist in the use of various agents to manipulate information and move items from one data format to another.}, Address = {Dunedin, New Zealand}, Author = {Stephen J.S. Cranefield and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:57 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {October}, Number = {95/15}, Title = {Agent-based integration of general-purpose tools}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1996-24, Abstract = {This paper presents a practical multi-agent architecture for assisting users to coordinate the use of both special and general purpose software tools for performing tasks in a given problem domain. The architecture is open and extensible being based on the techniques of agent-based software interoperability (ABSI), where each tool is encapsulated by a KQML-speaking agent. The work reported here adds additional facilities for the user to describe the problem domain, the tasks that are commonly performed in that domain and the ways in which various software tools are commonly used by the user. Together, these features provide the computer with a degree of autonomy in the user's problem domain in order to help the user achieve tasks through the coordinated use of disparate software tools. This research focuses on the representational and planning capabilities required to extend the existing benefits of the ABSI architecture to include domain-level problem-solving skills. In particular, the paper proposes a number of standard ontologies that are required for this type of problem, and discusses a number of issues related to planning the coordinated use of agent-encapsulated tools.}, Address = {Dunedin, New Zealand}, Author = {Stephen J.S. Cranefield and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:41:01 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {96/24}, Size = {72 KB}, Title = {An agent-based architecture for software tool coordination}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1998-07, Abstract = {This paper describes an agent-based architecture designed to provide automation support for users who perform information processing tasks using a collection of distributed and disparate software tools and on-line resources. The architecture extends previous work on agent-based software interoperability. The unique features of the information processing domain compared to distributed information retrieval are discussed and a novel extension of hierarchical task network (HTN) planning to support this domain is presented.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Bryce McKinlay and Emanuela Moreale and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-11 09:58:54 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {98/07}, Size = {172 KB}, Title = {Automating information processing tasks: {A}n agent-based architecture}, Type = {Discussion paper}, Year = {1998}} @techreport{dp1999-01, Abstract = {Current tools and techniques for ontology development are based on the traditions of AI knowledge representation research. This research has led to popular formalisms such as KIF and KL-ONE style languages. However, these representations are little known outside AI research laboratories. In contrast, commercial interest has resulted in ideas from the object-oriented programming community maturing into industry standards and powerful tools for object-oriented analysis, design and implementation. These standards and tools have a wide and rapidly growing user community. This paper examines the potential for object-oriented standards to be used for ontology modelling, and in particular presents an ontology representation language based on a subset of the Unified Modeling Language together with its associated Object Constraint Language.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:47:08 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {January}, Number = {99/01}, Size = {204 KB}, Title = {{UML} as an ontology modelling language}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1994-15, Abstract = {This paper deals with matters relating to toponymy. The concept of indigenous place names is discussed. A view is presented, based on empirical evidence, that current processes for the official recording of names are detrimental to a fair and reasonable representation of indigenous names. Historical events in Aotearoa are examined as well as the existing place name recording process. Research is outlined as to what can be done to examine and redress this situation. A proposition is tendered whereby names can be recorded via a process which is people based and not government based. Research matters surrounding this concept are discussed.}, Address = {Dunedin, New Zealand}, Author = {Iaean J. Cranwell and George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:22:23 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {August}, Number = {94/15}, Size = {1.1 MB}, Title = {Recording, placement and presentation of {M}{\=a}ori place names in a spatial information system}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1997-06, Abstract = {Computer users employ a collection of software tools to support their day-to-day work. Often the software environment is dynamic with new tools being added as they become available and removed as they become obsolete or outdated. In today's systems, the burden of coordinating the use of these disparate tools, remembering the correct sequence of commands, and incorporating new and modified programs into the daily work pattern lies with the user. This paper describes a multi-agent system, DALEKS, that assists users in utilizing diverse software tools for their everyday work. It manages work and information flow by providing a coordination layer that selects the appropriate tool(s) to use for each of the user's tasks and automates the flow of information between them. This enables the user to be concerned more with what has to be done, rather than with the specifics of how to access tools and information. Here we describe the system architecture of DALEKS and illustrate it with an example in university course administration.}, Address = {Dunedin, New Zealand}, Author = {Aurora Diaz and Stephen J.S. Cranefield and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:58 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {agent architecture, software interoperability}, Month = {June}, Number = {97/06}, Size = {72 KB}, Title = {Planning and matchmaking in a multi-agent system for software integration}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1993-05, Abstract = {This paper argues that the introduction of western cadastral concepts into communities with different land tenure systems have involved ``cultural costs.'' The paper discusses these cultural costs and concludes that cadastral reformers need to re-design their product to fit the communities.}, Address = {Dunedin, New Zealand}, Author = {I. Chukwudozie Ezigbalike and George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 14:46:52 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {93/5}, Size = {1.3 MB}, Title = {Cadastral ``reform''---{A}t what cost to developing countries?}, Type = {Discussion paper}, Year = {1993}} @techreport{dp1993-03, Abstract = {Semantic data models comprise formally defined abstractions for representing real world relationships and aspects of the structure of real world phenomena so as to aid database design. While previous research in spatial database design has shown that semantic data models are amenable to explicitly representing some spatial concepts, this paper shows that semantic data models may usefully be applied to the design of spatial databases even without explicitly representing spatial concepts. Specifically, an entity-relationship model comprising only ``is-associated-with'' relationships is used as the basis from which to define thematic layers for a layer based spatial database.}, Address = {Dunedin, New Zealand}, Author = {Peter G. Firns}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:59 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {November}, Number = {93/3}, Size = {1.2 MB}, Title = {The derivation of thematic map layers from entity-relationship data models}, Type = {Discussion paper}, Year = {1993}} @techreport{dp1994-14, Abstract = {Semantic data models comprise abstractions used, in conceptual database design, to represent real world relationships and aspects of the structure of real world phenomena. Such abstractions have previously been applied to the modelling of spatial concepts, but in the process their semantics are implicitly extended. This paper explicitly extends the semantics of the entity relationship model, defining two specific types of entity set to enable the notion of a thematic layer to be incorporated in entity relationship schemas. It places this in the context of a conceptual modelling framework to be used in the design of spatially referenced databases.}, Address = {Dunedin, New Zealand}, Author = {Peter G. Firns}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:59 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {August}, Number = {94/14}, Size = {1.1 MB}, Title = {A conceptual data modelling framework incorporating the notion of a thematic layer}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1996-21, Abstract = {The development of multimedia information systems must be managed and controlled just as it is for other generic system types. This paper proposes an approach for assessing multimedia component and system characteristics with a view to ultimately using these features to estimate the associated development effort. Given the different nature of multimedia systems, existing metrics do not appear to be entirely useful in this domain; however, some general principles can still be applied in analysis. Some basic assertions concerning the influential characteristics of multimedia systems are made and a small preliminary set of data is evaluated.}, Address = {Dunedin, New Zealand}, Author = {Tim Fletcher and William B.L. Wong and Stephen G. MacDonell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2011-01-24 17:25:34 +1300}, Institution = {Department of Information Science, University of Otago}, Keywords = {multimedia, management, metrics}, Month = {October}, Number = {96/21}, Size = {220 KB}, Title = {Early experiences in measuring multimedia systems development effort}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1994-10, Abstract = {This paper investigates statistical models for the understanding of the behaviour of scrubweeds in Southland and Otago. Data pertaining to eight scrubweed species have been collected along four transects together with the environmental factors, altitude, slope, aspect and land use classification. Each transect is approximately 80km by 2km, with data being held for every 1ha so that there are approximately 16,000 pixels for each transect. It is important to understand the relationship between the species so that interpolation and extrapolation can be performed. The initial survey, completed in 1992, will be repeated in 1995 and 1998. These surveys will then form the baseline for an understanding of the spread or contraction of the species in farmlands of the South Island. This in turn will assist policy makers in formulating management plans which relate eradication to farmland productivity. This paper deals in detail with one of the transects---Balclutha to Katiki Point.}, Address = {Dunedin, New Zealand}, Author = {Liliana Gonzalez and George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:21:15 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {canonical correlation, kriging, log-linear models, logistic regression, spatial correlation, variogram analysis}, Month = {May}, Number = {94/10}, Title = {Stochastic models of the behaviour of scrubweeds in {S}outhland and {O}tago}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1999-20, Abstract = {One problem faced by managers who are using project management models is the elicitation of numerical inputs. Obtaining these with any degree of confidence early in a project is not always feasible. Related to this difficulty is the risk of precisely specified outputs from models leading to overcommitment. These problems can be seen as the collective failure of software measurements to represent the inherent uncertainties in managers' knowledge of the development products, resources, and processes. It is proposed that fuzzy logic techniques can help to overcome some of these difficulties by representing the imprecision in inputs and outputs, as well as providing a more expert-knowledge based approach to model building. The use of fuzzy logic for project management however should not be the same throughout the development life cycle. Different levels of available information and desired precision suggest that it can be used differently depending on the current phase, although a single model can be used for consistency.}, Address = {Dunedin, New Zealand}, Author = {Andrew Gray and Stephen MacDonell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:00 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {September}, Number = {99/20}, Size = {148 KB}, Title = {Fuzzy logic for software metric models throughout the development life-cycle}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-11, Abstract = {Whilst some software measurement research has been unquestionably successful, other research has struggled to enable expected advances in project and process management. Contributing to this lack of advancement has been the incidence of inappropriate or non-optimal application of various model-building procedures. This obviously raises questions over the validity and reliability of any results obtained as well as the conclusions that may have been drawn regarding the appropriateness of the techniques in question. In this paper we investigate the influence of various data set characteristics and the purpose of analysis on the effectiveness of four model-building techniques---three statistical methods and one neural network method. In order to illustrate the impact of data set characteristics, three separate data sets, drawn from the literature, are used in this analysis. In terms of predictive accuracy, it is shown that no one modeling method is best in every case. Some consideration of the characteristics of data sets should therefore occur before analysis begins, so that the most appropriate modeling method is then used. Moreover, issues other than predictive accuracy may have a significant influence on the selection of model-building methods. These issues are also addressed here and a series of guidelines for selecting among and implementing these and other modeling techniques is discussed.}, Address = {Dunedin, New Zealand}, Author = {Andrew Gray and Stephen MacDonell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:00 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {software metrics, analysis, statistical methods, connectionist methods}, Month = {June}, Number = {99/11}, Size = {292 KB}, Title = {Software metrics data analysis---{E}xploring the relative performance of some commonly used modeling techniques}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-16, Abstract = {Software metric-based estimation of project development effort is most often performed by expert judgment rather than by using an empirically derived model (although such may be used by the expert to assist their decision). One question that can be asked about these estimates is how stable are they with respect to characteristics of the development process and product? This stability can be assessed in relation to the degree to which the project has advanced over time, the type of module for which the estimate is being made, and the characteristics of that module. In this paper we examine a set of expert-derived estimates for the effort required to develop a collection of modules from a large health-care system. Statistical tests are used to identify relationships between the type (screen or report) and characteristics of modules and the likelihood of the associated development effort being under-estimated, approximately correct, or over-estimated. Distinct relationships are found that suggest that the estimation process being examined was not unbiased to such characteristics.}, Address = {Dunedin, New Zealand}, Author = {Andrew Gray and Stephen MacDonell and Martin Shepperd}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:00 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {99/16}, Size = {236 KB}, Title = {Factors systematically associated with errors in subjective estimates of software development effort: {T}he stability of expert judgment}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1996-05, Abstract = {The almost exclusive use of regression analysis to derive predictive equations for software development metrics found in papers published before 1990 has recently been complemented by increasing numbers of studies using non-traditional methods, such as neural networks, fuzzy logic models, case-based reasoning systems, rule-based systems, and regression trees. There has also been an increasing level of sophistication in the regression-based techniques used, including robust regression methods, factor analysis, resampling methods, and more effective and efficient validation procedures. This paper examines the implications of using these alternative methods and provides some recommendations as to when they may be appropriate. A comparison between standard linear regression, robust regression, and the alternative techniques is also made in terms of their modelling capabilities with specific reference to software metrics.}, Address = {Dunedin, New Zealand}, Author = {Andrew R. Gray and Stephen G. MacDonell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:00 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {96/05}, Title = {A comparison of alternatives to regression analysis as model building techniques to develop predictive equations for software metrics}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1997-10, Abstract = {Software metrics are measurements of the software development process and product that can be used as variables (both dependent and independent) in models for project management. The most common types of these models are those used for predicting the development effort for a software system based on size, complexity, developer characteristics, and other metrics. Despite the financial benefits from developing accurate and usable models, there are a number of problems that have not been overcome using the traditional techniques of formal and linear regression models. These include the non-linearities and interactions inherent in complex real-world development processes, the lack of stationarity in such processes, over-commitment to precisely specified values, the small quantities of data often available, and the inability to use whatever knowledge is available where exact numerical values are unknown. The use of alternative techniques, especially fuzzy logic, is investigated and some usage recommendations are made.}, Address = {Dunedin, New Zealand}, Author = {Andrew R. Gray and Stephen G. MacDonell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:00 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {July}, Number = {97/10}, Size = {88 KB}, Title = {Applications of fuzzy logic to software metric models for development effort estimation}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1997-14, Abstract = {The number of occurrences and severity of computer-based attacks such as viruses and worms, logic bombs, trojan horses, computer fraud, and plagiarism of code have become of increasing concern. In an attempt to better deal with these problems it is proposed that methods for examining the authorship of computer programs are necessary. This field is referred to here as software forensics. This involves the areas of author discrimination, identification, and characterisation, as well as intent analysis. Borrowing extensively from the existing fields of linguistics and software metrics, this can be seen as a new and exciting area for forensics to extend into.}, Address = {Dunedin, New Zealand}, Author = {Andrew R. Gray and Philip J. Sallis and Stephen G. MacDonell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:01 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {authorship analysis, computer programming, malicious programs, software forensics, software metrics, source code}, Month = {December}, Number = {97/14}, Title = {Software forensics: {E}xtending authorship analysis techniques to computer programs}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1996-25c, Abstract = {Decision support systems, statistics and expert systems were some of the mainstay techniques used for modelling environmental phenomena. Now modelling systems utilise artificial intelligence (AI) techniques for the extra computational analysis they provide. Whilst operating in a toolbox environment and by adopting AI techniques, the geographic information system (GIS) modellers have greater options available for solving problems. This paper outlines a new approach in applying artificial intelligence techniques to solve spatial problems. The approach combines case-based reasoning (CBR) with geographic information systems and allows both techniques to be applied to solve spatial problems. More specifically this paper examines techniques applied to the problem of soil classification. Spatial cases are defined and analysed using the case-based reasoning techniques of retrieve, reuse, revise and retain. Once the structure of cases are defined a case base is compiled. When the case base is of sufficient size, the problem of soil classification is tested using this new approach. The problem is solved by searching the case base for another spatial phenomena similar to that which exists. Then the knowledge from that searched case is used to formulate an answer to the problem. A comparison of the results obtained by this approach and a traditional method of soil classification is then undertaken. This paper also documents the saving data concept in translating from decision trees to CBR. The logistics of the problems that are characteristic of case-based reasoning systems are discussed, for example, how should the spatial domain of an environmental phenomena be best represented in a case base? What are the constraints of CBR, what data are lost, and what functions are gained? Finally, the following question is posed: ``to what real world level can the environment be modelled using GIS and case-based reasoning techniques''?}, Address = {Dunedin, New Zealand}, Author = {Alec Holt}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:35:27 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {96/25c}, Size = {1 MB}, Title = {Incorporating a new computational reasoning approach to spatial modelling}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1995-16, Abstract = {This paper outlines a unique approach applying artificial intelligence techniques to the solving of environmental problems. The approach combines case-based reasoning with spatial information systems, enabling technologies and techniques from each domain to be applied to environmental problems. This paper defines a possible case-based reasoning/spatial information system hybrid that would allow spatial cases to be defined and analysed by both technologies. The example used in this paper involves soil series classification which, using case-based reasoning, is performed according to spatial criteria. Evaluations and spatial criteria are then used to predict properties of new cases based on similar previous spatial cases.}, Address = {Dunedin, New Zealand}, Author = {Alec Holt and George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:01 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {95/16}, Title = {Applying case-based reasoning to spatial phenomena}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1995-09, Abstract = {This paper brings emphasis to the plausible concept of case-based reasoning being integrated with spatial information systems, and the adaptation of artificial intelligence techniques to improve the analytical strength in spatial information systems. This adaptation of artificial intelligence techniques may include examples of expert systems, fuzzy logic, hybrid connectionist systems and neural networks, all integrated with spatial information systems. The unique process of case-based reasoning is described. The research into the possible integration of case-based reasoning and spatial information systems is outlined. The benefits of a case-based reasoning spatial information hybrid system are discussed.}, Address = {Dunedin, New Zealand}, Author = {Alec Holt and George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:01 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {August}, Number = {95/9}, Title = {Case-based reasoning and spatial analysis}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1998-08, Abstract = {This research continues with current innovative geocomputational research trends that aim to provide enhanced spatial analysis tools. The coupling of case-based reasoning (CBR) with GIS provides the focus of this paper. This coupling allows the retrieval, reuse, revision and retention of previous similar spatial cases. CBR is therefore used to develop more complex spatial data modelling methods (by using the CBR modules for improved spatial data manipulation) and provide enhanced exploratory geographical analysis tools (to find and assess certain patterns and relationships that may exist in spatial databases). This paper details the manner in which spatial similarity is assessed, for the purpose of re-using previous spatial cases. The authors consider similarity assessment a useful concept for retrieving and analysing spatial information as it may help researchers describe and explore a certain phenomena, its immediate environment and its relationships to other phenomena. This paper will address the following questions: What makes phenomena similar? What is the definition of similarity? What principles govern similarity? and How can similarity be measured? Generally, phenomena are similar when they share common attributes and circumstances. The degree of similarity depends on the type and number of commonalties they share. Within this research, similarity is examined from a spatial perspective. Spatial similarity is broadly defined by the authors as the spatial matching and ranking according to a specific context and scale. More specifically, similarity is governed by context (function, use, reason, goal, users frame-of mind), scale (coarse or fine level), repository (the application, local domain, site and data specifics), techniques (the available technology for searching, retrieving and recognising data) and measure and ranking systems. The degree of match is the score between a source and a target. In spatial matching a source and a target could be a pixel, region or coverage. The principles that govern spatial similarity are not just the attributes but also the relationships between two phenomena. This is one reason why CBR coupled with a GIS is fortuitous. A GIS is used symbiotically to extract spatial variables that can be used by CBR to determine similar spatial relations between phenomena. These spatial relations are used to assess the similarity between two phenomena (for example proximity and neighborhood analysis). Developing the concept of spatial similarity could assist with analysing spatial databases by developing techniques to match similar areas. This would help maximise the information that could be extracted from spatial databases. From an exploratory perspective, spatial similarity serves as an organising principle by which spatial phenomena are classified, relationships identified and generalisations made from previous bona fide experiences or knowledge. This paper will investigate the spatial similarity concept.}, Address = {Dunedin, New Zealand}, Author = {Alec Holt and Stephen MacDonell and George Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-11 09:58:44 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {98/08}, Size = {456 KB}, Title = {Spatial isomorphism}, Type = {Discussion paper}, Year = {1998}} @techreport{dp1996-09, Abstract = {This paper explores two different methods for improved learning in multimodular fuzzy neural network systems for classification. It demonstrates these methods on a case study of satellite image classification using 3 spectral inputs and 10 coastal vegetation covertype outputs. The classification system is a multimodular one; it has one fuzzy neural network per output. All the fuzzy neural networks are trained in parallel for a small number of iterations. Then, the system performance is tested on new data to determine the types of interclass confusion. Two strategies are developed to improve classification performance. First, the individual modules are additionally trained for a very small number of iterations on a subset of the data to decrease the false positive and the false negative errors. The second strategy is to create new units, `experts', which are individually trained to discriminate only the ambiguous classes. So, if the main system classifies a new input into one of the ambiguous classes, then the new input is passed to the `experts' for final classification. Two learning techniques are presented and applied to both classification performance enhancement strategies; the first one reduces omission, or false negative, error; the second reduces comission, or false positive, error. Considerable improvement is achieved by using these learning techniques and thus, making it feasible to incorporate them into a real adaptive system that improves during operation.}, Address = {Dunedin, New Zealand}, Author = {Steven A. Israel and Nikola K. Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:39:57 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {96/09}, Size = {440 KB}, Title = {Improved learning strategies for multimodular fuzzy neural network systems: {A} case study on image classification}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1998-03, Abstract = {The paper introduces one paradigm of neuro-fuzzy techniques and an approach to building on-line, adaptive intelligent systems. This approach is called evolving connectionist systems (ECOS). ECOS evolve through incremental, on-line learning, both supervised and unsupervised. They can accommodate new input data, including new features, new classes, etc. The ECOS framework is presented and illustrated on a particular type of evolving neural networks---evolving fuzzy neural networks. ECOS are three to six orders of magnitude faster than the multilayer perceptrons, or the fuzzy neural networks, trained with the backpropagation algorithm, or with a genetic programming technique. ECOS belong to the new generation of adaptive intelligent systems. This is illustrated on several real world problems for adaptive, on-line classification, prediction, decision making and control: phoneme-based speech recognition; moving person identification; wastewater flow time-series prediction and control; intelligent agents; financial time series prediction and control. The principles of recurrent ECOS and reinforcement learning are outlined.}, Address = {Dunedin, New Zealand}, Author = {Nikola Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:46:38 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {evolving neuro-fuzzy systems, fuzzy neural networks, on-line adaptive control, on-line decision making, intelligent agents}, Month = {March}, Number = {98/03}, Title = {Looking for a new {AI} paradigm: {E}volving connectionist and fuzzy connectionist systems---{T}heory and applications for adaptive, on-line intelligent systems}, Type = {Discussion paper}, Year = {1998}} @techreport{dp1999-02, Abstract = {The paper introduces evolving connectionist systems (ECOS) as an effective approach to building on-line, adaptive intelligent systems. ECOS evolve through incremental, hybrid (supervised/unsupervised), on-line learning. They can accommodate new input data, including new features, new classes, etc. through local element tuning. New connections and new neurons are created during the operation of the system. The ECOS framework is presented and illustrated on a particular type of evolving neural networks---evolving fuzzy neural networks (EFuNNs). EFuNNs can learn spatial-temporal sequences in an adaptive way, through one pass learning. Rules can be inserted and extracted at any time of the system operation. The characteristics of ECOS and EFuNNs are illustrated on several case studies that include: adaptive pattern classification; adaptive, phoneme-based spoken language recognition; adaptive dynamic time-series prediction; intelligent agents.}, Address = {Dunedin, New Zealand}, Author = {Nikola Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:02 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {evolving connectionist systems, evolving fuzzy neural networks, on-line learning, spatial-temporal adaptation, adaptive speech recognition}, Month = {March}, Number = {99/02}, Size = {944 KB}, Title = {Evolving connectionist systems for on-line, knowledge-based learning: {P}rinciples and applications}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-08, Abstract = {The chapter presents a new methodology for building adaptive, incremental learning systems for image pattern classification. The systems are based on dynamically evolving fuzzy neural networks that are neural architectures to realise connectionist learning, fuzzy logic inference, and case-based reasoning. The methodology and the architecture are applied on two sets of real data---one of satellite image data, and the other of fruit image data. The proposed method and architecture encourage fast learning, life-long learning and on-line learning when the system operates in a changing environment of image data.}, Address = {Dunedin, New Zealand}, Author = {Nikola Kasabov and Steven Israel and Brendon Woodford}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:02 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {image classification, evolving fuzzy neural networks, case-based reasoning}, Month = {May}, Number = {99/08}, Size = {1.3 MB}, Title = {Adaptive, evolving, hybrid connectionist systems for image pattern recognition}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1994-12, Abstract = {A new type of generalised fuzzy rule and generalised fuzzy production system and a corresponding reasoning method are developed. They are implemented in a connectionist architecture and are called connectionist fuzzy production systems. They combine all the features of symbolic AI production systems, fuzzy production systems and connectionist systems. A connectionist method for learning generalised fuzzy productions from raw data is also presented. The main conclusion reached is that connectionist fuzzy production systems are very powerful as fuzzy reasoning machines and they may well inspire new methods of plausible representation of inexact knowledge and new inference techniques for approximate reasoning.}, Address = {Dunedin, New Zealand}, Author = {Nikola K. Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:03 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {94/12}, Title = {Connectionist fuzzy production systems}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1994-17, Abstract = {The paper presents the major principles of building complex hybrid systems for knowledge engineering where at the centre of the design process is the task of learning (extracting) fuzzy rules from data. An experimental environment FuzzyCOPE, which facilitates this process, is described. It consists of a fuzzy rules extraction module, a neural networks module, module fuzzy inference methods and a production rules module. Such an environment makes possible the use of the three paradigms, i.e. fuzzy rules, neural networks and symbolic production rules, in one system. Automatic rules extraction from data and choosing the most appropriate reasoning mechanism is also provided. Using FuzzyCOPE for building hybrid systems for decision making and speech recognition is discussed and illustrated.}, Address = {Dunedin, New Zealand}, Author = {Nikola K. Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:03 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {November}, Number = {94/17}, Title = {Hybrid fuzzy connectionist rule-based systems and the role of fuzzy rules extraction}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1994-11, Abstract = {The paper presents a novel approach towards solving different speech recognition tasks, i.e. phoneme recognition, ambiguous words recognition, continuous speech to text conversion, learning fuzzy rules for language processing. The model uses a standard connectionist system for initial recognition and a connectionist rule-based system for a higher level recognition. The higher level is realised as a Connectionist Fuzzy Production System (CFPS) which makes possible introducing different parameters to the higher level production rules, like: degrees of importance, dynamic sensitivity factors, noise tolerance factors, certainty degrees and reactiveness factors. It provides different approximate chain reasoning techniques. The CFPS helps to solve many of the ambiguities in speech recognition tasks. Experiments on phoneme recognition in the English language are reported. This approach facilitates a connectionist implementation of the whole process of speech recognition (at a low level and at a higher logical level) which used to be performed in hybrid environments. It also facilitates the process of learning fuzzy rules for language processing. All the language processing tasks and subtasks are realised in a homogeneous connectionist environment. This brings all the benefits of connectionist systems to practical applications in the speech recognition area.}, Address = {Dunedin, New Zealand}, Author = {Nikola K. Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:03 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {94/11}, Title = {Towards using hybrid connectionist fuzzy production systems for speech recognition}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1996-23, Abstract = {Fuzzy neural networks have several features that make them well suited to a wide range of knowledge engineering applications. These strengths include fast and accurate learning, good generalisation capabilities, excellent explanation facilities in the form of semantically meaningful fuzzy rules, and the ability to accommodate both data and existing expert knowledge about the problem under consideration. This paper investigates adaptive learning, rule extraction and insertion, and neural/fuzzy reasoning for a particular model of a fuzzy neural network called FuNN. As well as providing for representing a fuzzy system with an adaptable neural architecture, FuNN also incorporates a genetic algorithm in one of its adaptation modes. A version of FuNN---FuNN/2, which employs triangular membership functions and correspondingly modified learning and adaptation algorithms, is also presented in the paper.}, Address = {Dunedin, New Zealand}, Author = {Nikola K. Kasabov and Jaesoo Kim and Michael J. Watts and Andrew R. Gray}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:42:44 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {96/23}, Title = {{FuNN}/2---{A} fuzzy neural network architecture for adaptive learning and knowledge acquisition}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1994-05, Abstract = {A Content-Addressable Model of Production Systems, `CAMPUS', has been developed. The main idea is to achieve high execution performance in production systems by exploiting the potential fine-grain data parallelism. The facts and the rules of a production system are uniformly represented as CAM tables. CAMPUS differs from other CAM-inspired models in that it is based on a non-state saving and `lazy' matching algorithm. The production system execution cycle is represented by a small number of associative search operations over the CAM tables which number does not depend, or depends slightly, on the number of the rules and the number of the facts in the production system. The model makes efficient implementation of large production systems on fast CAM possible. An experimental CAMPUS realisation of the production language CLIPS is also reported. The production systems execution time for large number of processed facts is about 1,000 times less than the corresponding CLIPS execution time on a standard computer architecture.}, Address = {Dunedin, New Zealand}, Author = {Nikola K. Kasabov and S.H. Lavington and S. Lin and C. Wang}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:20:43 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {associative matching, content-addressable memory (CAM), production systems}, Month = {February}, Number = {94/5}, Size = {301 KB}, Title = {A model for exploiting associative matching in {AI} production systems}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1994-04, Abstract = {Neural networks (NN) have been intensively used for speech processing. This paper describes a series of experiments on using a single Kohonen Self Organizing Map (KSOM), hierarchically organised KSOM, a backpropagation-type neural network with fuzzy inputs and outputs, and a fuzzy system, for continuous speech recognition. Experiments with different non-linear transformations on the signal before using a KSOM have been done. The results obtained by using different techniques on the case study of phonemes in Bulgarian language are compared.}, Address = {Dunedin, New Zealand}, Author = {Nikola K. Kasabov and E. Peev}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:04 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {February}, Number = {94/4}, Size = {258 KB}, Title = {Phoneme recognition with hierarchical self organised neural networks and fuzzy systems}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1996-03, Abstract = {General Characteristics of the Theme * Emerging technology with rapidly growing practical applications * Nationally and internationally recognised leadership of the University of Otago * Already established organisation for research and working teams * Growing number of postgraduate students working on the theme * Growing number of research projects in this area * Growing number of publications by members of the team}, Address = {Dunedin, New Zealand}, Author = {Nikola K. Kasabov and Martin K. Purvis and Philip J. Sallis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:39:27 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {February}, Number = {96/03}, Size = {180 KB}, Title = {Connectionist-based information systems: {A} proposed research theme}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1996-08, Abstract = {This paper proposes neuro-fuzzy engineering as a novel approach to spatial data analysis and for building decision making systems based on spatial information processing, and the development of this approach by the authors is presented in this paper. It has been implemented as a software environment and is illustrated on a case study problem.}, Address = {Dunedin, New Zealand}, Author = {Nikola K. Kasabov and Martin K. Purvis and Feng Zhang and George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:39:49 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {96/08}, Size = {376 KB}, Title = {Neuro-fuzzy engineering for spatial information processing}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1994-18, Abstract = {The paper presents a framework of an integrated environment for speech recognition and a methodology of using such environment. The integrated environment includes a signal processing unit, neural networks and fuzzy rule-based systems. Neural networks are used for ``blind'' pattern recognition of the phonemic labels of the segments of the speech. Fuzzy rules are used for reducing the ambiguities of the correctly recognised phonemic labels, for final recognition of the phonemes, and for language understanding. The fuzzy system part is organised as multi-level, hierarchical structure. As an illustration, a model for phoneme recognition of New Zealand English is developed which exploits the advantages of the integrated environment. The model is illustrated on a small set of phonemes.}, Address = {Dunedin, New Zealand}, Author = {Nikola K. Kasabov and Catherine I. Watson and Stephen Sinclair and Richard Kilgour}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:04 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {November}, Number = {94/18}, Title = {Integrating neural networks and fuzzy systems for speech recognition}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1999-07, Abstract = {This paper discusses the problem of adaptation in automatic speech recognition systems (ASRS) and suggests several strategies for adaptation in a modular architecture for speech recognition. The architecture allows for adaptation at different levels of the recognition process, where modules can be adapted individually based on their performance and the performance of the whole system. Two realisations of this architecture are presented along with experimental results from small-scale experiments. The first realisation is a hybrid system for speaker-independent phoneme-based spoken word recognition, consisting of neural networks for recognising English phonemes and fuzzy systems for modelling acoustic and linguistic knowledge. This system is adjustable by additional training of individual neural network modules and tuning the fuzzy systems. The increased accuracy of the recognition through appropriate adjustment is also discussed. The second realisation of the architecture is a connectionist system that uses fuzzy neural networks FuNNs to accommodate both a prior linguistic knowledge and data from a speech corpus. A method for on-line adaptation of FuNNs is also presented.}, Address = {Dunedin, New Zealand}, Author = {Nikola Kasabov and Richard Kilgour and Stephen Sinclair}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:05 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {pattern recognition, artificial intelligence, neural networks, speech recognition}, Month = {April}, Number = {99/07}, Title = {From hybrid adjustable neuro-fuzzy systems to adaptive connectionist-based systems for phoneme and word recognition}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-04, Abstract = {The paper introduces a new type of evolving fuzzy neural networks (EFuNNs), denoted as mEFuNNs, for on-line learning and their applications for dynamic time series analysis and prediction. mEFuNNs evolve through incremental, hybrid (supervised/unsupervised), on-line learning, like the EFuNNs. They can accommodate new input data, including new features, new classes, etc. through local element tuning. New connections and new neurons are created during the operation of the system. At each time moment the output vector of a mEFuNN is calculated based on the m-most activated rule nodes. Two approaches are proposed: (1) using weighted fuzzy rules of Zadeh-Mamdani type; (2) using Takagi-Sugeno fuzzy rules that utilise dynamically changing and adapting values for the inference parameters. It is proved that the mEFuNNs can effectively learn complex temporal sequences in an adaptive way and outperform EFuNNs, ANFIS and other neural network and hybrid models. Rules can be inserted, extracted and adjusted continuously during the operation of the system. The characteristics of the mEFuNNs are illustrated on two bench-mark dynamic time series data, as well as on two real case studies for on-line adaptive control and decision making. Aggregation of rule nodes in evolved mEFuNNs can be achieved through fuzzy C-means clustering algorithm which is also illustrated on the bench mark data sets. The regularly trained and aggregated in an on-line, self-organised mode mEFuNNs perform as well, or better, than the mEFuNNs that use fuzzy C-means clustering algorithm for off-line rule node generation on the same data set.}, Address = {Dunedin, New Zealand}, Author = {Nikola Kasabov and Qun Song}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:05 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {dynamic evolving fuzzy neural networks, on-line learning, adaptive control, dynamic time series prediction, fuzzy clustering}, Month = {March}, Number = {99/04}, Size = {2 MB}, Title = {Dynamic evolving fuzzy neural networks with `m-out-of-n' activation nodes for on-line adaptive systems}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-03, Abstract = {The paper is a study on a new class of spatial-temporal evolving fuzzy neural network systems (EFuNNs) for on-line adaptive learning, and their applications for adaptive phoneme recognition. The systems evolve through incremental, hybrid (supervised / unsupervised) learning. They accommodate new input data, including new features, new classes, etc. through local element tuning. Both feature-based similarities and temporal dependencies, that are present in the input data, are learned and stored in the connections, and adjusted over time. This is an important requirement for the task of adaptive, speaker independent spoken language recognition, where new pronunciations and new accents need to be learned in an on-line, adaptive mode. Experiments with EFuNNs, and also with multi-layer perceptrons, and fuzzy neural networks (FuNNs), conducted on the whole set of New Zealand English phonemes, show the superiority and the potential of EFuNNs when used for the task. Spatial allocation of nodes and their aggregation in EFuNNs allow for similarity preserving and similarity observation within one phoneme data and across phonemes, while subtle temporal variations within one phoneme data can be learned and adjusted through temporal feedback connections. The experimental results support the claim that spatial-temporal organisation in EFuNNs can lead to a significant improvement in the recognition rate especially for the diphthong and the vowel phonemes in English, which in many cases are problematic for a system to learn and adjust in an adaptive way.}, Address = {Dunedin, New Zealand}, Author = {Nikola Kasabov and Michael Watts}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:05 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {99/03}, Size = {560 KB}, Title = {Spatial-temporal adaptation in evolving fuzzy neural networks for on-line adaptive phoneme recognition}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-19, Abstract = {In an introductory course in information technology at the University of Otago the acquisition of practical skills is considered to be a prime objective. An effective way of assessing the achievement of this objective is by means of a `practical test', in which students are required to accomplish simple tasks in a controlled environment. The assessment of such work demands a high level of expertise, is very labour intensive and can suffer from marker inconsistency, particularly with large candidatures. This paper describes the results of a trial in which the efforts of one thousand students in a practical test of word processing were scored by means of a program written in MediaTalk. Details of the procedure are given, including sampling strategies for the purpose of validation and examples of problems that were encountered. It was concluded that the approach was useful, and once properly validated gave rise to considerable savings in the time and effort.}, Address = {Dunedin, New Zealand}, Author = {Geoffrey Kennedy}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:05 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {computer-aided learning, automated scoring, computer education, test validation}, Month = {September}, Number = {99/19}, Size = {216 KB}, Title = {Automated scoring of practical tests in an introductory course in information technology}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-05, Abstract = {In this paper, an adaptive neuro-fuzzy system, called HyFIS, is proposed to build and optimise fuzzy models. The proposed model introduces the learning power of neural networks into the fuzzy logic systems and provides linguistic meaning to the connectionist architectures. Heuristic fuzzy logic rules and input-output fuzzy membership functions can be optimally tuned from training examples by a hybrid learning scheme composed of two phases: the phase of rule generation from data, and the phase of rule tuning by using the error backpropagation learning scheme for a neural fuzzy system. In order to illustrate the performance and applicability of the proposed neuro-fuzzy hybrid model, extensive simulation studies of nonlinear complex dynamics are carried out. The proposed method can be applied to on-line incremental adaptive leaning for the purpose of prediction and control of non-linear dynamical systems.}, Address = {Dunedin, New Zealand}, Author = {Jaesoo Kim and Nikola Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:05 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {neuro-fuzzy systems, neural networks, fuzzy logic, parameter and structure learning, knowledge acquisition, adaptation, time series}, Month = {March}, Number = {99/05}, Title = {Hybrid neuro-fuzzy inference systems and their application for on-line adaptive learning of nonlinear dynamical systems}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1998-04, Abstract = {Variation in fruit maturation can influence harvest timing and duration, post-harvest fruit attributes and consumer acceptability. Present methods of managing and identifying lines of fruit with specific attributes both in commercial fruit production systems and breeding programs are limited by a lack of suitable tools to characterise fruit attributes at different stages of development in order to predict fruit behaviour at harvest, during storage or in relation to consumer acceptance. With visible-near infrared (VNIR) reflectance spectroscopy a vast array of analytical information is collected rapidly with a minimum of sample pre-treatment. VNIR spectra contain information about the amount and the composition of constituents within fruit. This information can be obtained from intact fruit at different stage of development. Spectroscopic data is processed using chemometrics techniques such as principal component analysis (PCA), discriminant analysis and/or connectionist approaches in order to extract qualitative and quantitative information for classification and predictive purposes. In this paper, we will illustrate the effectiveness of a model, connectionist and hybrid approaches, for fruit quality classification problems.}, Address = {Dunedin, New Zealand}, Author = {Jaesoo Kim and Nikola Kasabov and A. Mowat and P. Poole}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:06 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {98/04}, Title = {Connectionist methods for classification of fruit populations based on visible-near infrared spectrophotometry data}, Type = {Discussion paper}, Year = {1998}} @techreport{dp1998-05, Abstract = {Biological processes are among the most challenging to predict and control. It has been recognised that the development of an intelligent system for the recognition, prediction and control of process states in a complex, nonlinear biological process control is difficult. Such unpredictable system behaviour requires an advanced, intelligent control system which learns from observations of the process dynamics and takes appropriate control action to avoid collapse of the biological culture. In the present study, a hybrid system called fuzzy neural network is considered, where the role of the fuzzy neural network is to estimate the correct feed demand as a function of the process responses. The feed material is an organic and/or inorganic mixture of chemical compounds for the bacteria to grow on. Small amounts of the feed sources must be added and the response of the bacteria must be measured. This is no easy task because the process sensors used are non-specific and their response would vary during the developmental stages of the process. This hybrid control strategy retains the advantages of both neural networks and fuzzy control. These strengths include fast and accurate learning, good generalisation capabilities, excellent explanation facilities in the form of semantically meaningful fuzzy rules, and the ability to accommodate both numerical data and existing expert knowledge about the problem under consideration. The application to the estimation and prediction of the correct feed demand shows the power of this strategy as compared with conventional fuzzy control.}, Address = {Dunedin, New Zealand}, Author = {Jaesoo Kim and Robert Kozma and Nikola Kasabov and B. Gols and M. Geerink and T. Cohen}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:06 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {fuzzy neural networks, hybrid learning, knowledge extraction and insertion, estimation, biological process and control, bacterial system, total organic carbon (TOC)}, Month = {March}, Number = {98/05}, Title = {A fuzzy neural network model for the estimation of the feeding rate to an anaerobic waste water treatment process}, Type = {Discussion paper}, Year = {1998}} @techreport{dp1999-14, Abstract = {For some years software engineers have been attempting to develop useful prediction systems to estimate such attributes as the effort to develop a piece of software and the likely number of defects. Typically, prediction systems are proposed and then subjected to empirical evaluation. Claims are then made with regard to the quality of the prediction systems. A wide variety of prediction quality indicators have been suggested in the literature. Unfortunately, we believe that a somewhat confusing state of affairs prevails and that this impedes research progress. This paper aims to provide the research community with a better understanding of the meaning of, and relationship between, these indicators. We critically review twelve different approaches by considering them as descriptors of the residual variable. We demonstrate that the two most popular indicators MMRE and pred(25) are in fact indicators of the spread and shape respectively of prediction accuracy where prediction accuracy is the ratio of estimate to actual (or actual to estimate). Next we highlight the impact of the choice of indicator by comparing three prediction systems derived using four different simulated datasets. We demonstrate that the results of such a comparison depend upon the choice of indicator, the analysis technique, and the nature of the dataset used to derive the predictive model. We conclude that prediction systems cannot be characterised by a single summary statistic. We suggest that we need indicators of the central tendency and spread of accuracy as well as indicators of shape and bias. For this reason, boxplots of relative error or residuals are useful alternatives to simple summary metrics.}, Address = {Dunedin, New Zealand}, Author = {Barbara Kitchenham and Stephen MacDonell and Lesley Pickard and Martin Shepperd}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:06 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {prediction systems, estimation, empirical analysis, metrics, goodness-of-fit statistics}, Month = {June}, Number = {99/14}, Size = {304 KB}, Title = {Assessing prediction systems}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1996-20, Abstract = {A novel connectionist architecture based on an optical thin-film multilayer model (OTFM) is described. The architecture is explored as an alternative to the widely used neuron-inspired models, with the thin-film thicknesses serving as adjustable `weights' for the computation. The use of genetic algorithms for training the thin-film model, along with experimental results on the parity problem and the iris data classification are presented.}, Address = {Dunedin, New Zealand}, Author = {Xiaodong Li and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:40:45 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {October}, Number = {96/20}, Size = {448 KB}, Title = {Using genetic algorithms for an optical thin-film learning model}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1996-25d, Abstract = {How should geographic information systems be developed? There is a strong demand from users for enhanced functionality and power. Vendors can and do respond to these demands. But where will this lead? Will the result be one all-embracing and all-conquering program or geographic information system (GIS)? A GIS could grow to incorporate all statistical functions, all visualisation techniques, all data management functions etc. It is possible to perceive a scenario in which GIS is developed to `bloatware' proportions. An alternative scenario is one in which a GIS is interfaced with other software systems. Embedding database bridges and other product-specific links, providing data import and export routines, and system calls are all ways of interfacing GIS with other systems. GIS vendors could opt to produce a `linkware' GIS, interfaced to as many third party systems as possible. Given these two alternatives to GIS development, an interesting set of questions arises. How far do vendors go with enhancing their systems compared with interfacing with third party systems? Is there a balance? Or do GIS users just keep calling for `more', regardless of the solution set? There is a balance. GIS is likely to be developed by being enhanced AND by being interfaced with third party software. In a way, this is a third developmental track leading to an increasingly functional GIS whose ability to interact with other systems is greatly improved. This interoperable GIS allows flexible combinations of systems components while still providing a comprehensive range of spatial operations and analytical functions. Of these three developmental tracks, this paper presents an example of what can be achieved with the interoperable GIS. Expert systems are introduced along with the client/server and object-oriented paradigms. By using these paradigms, a generic, spatial, rule-based toolbox called SES (spatial expert shell) has been created. SES is described using examples and contrasted with other documented expert system-GIS linkages. But first integration is modelled in three dimensions to highlight the need for improvements in how GISs can interact with other systems.}, Address = {Dunedin, New Zealand}, Author = {Linda Lilburne and George Benwell and Roz Buick}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:36:38 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {96/25d}, Size = {564 KB}, Title = {{GIS}, expert systems and interoperability}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1999-15, Abstract = {This paper describes ongoing research directed at formulating a set of appropriate measures for assessing and ultimately predicting effort requirements for multimedia systems development. Whilst significant advances have been made in the determination of measures for both transaction-based and process-intensive systems, very little work has been undertaken in relation to measures for multimedia systems. A small preliminary empirical study is reviewed as a precursor to a more exploratory investigation of the factors that are considered by industry to be influential in determining development effort. This work incorporates the development and use of a goal-based framework to assist the measure selection process from a literature basis, followed by an industry questionnaire. The results provide a number of preliminary but nevertheless useful insights into contemporary project management practices with respect to multimedia systems.}, Address = {Dunedin, New Zealand}, Author = {Stephen MacDonell and Tim Fletcher}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:07 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {99/15}, Size = {228 KB}, Title = {Industry practices in project management for multimedia information systems}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1993-04, Abstract = {This paper reports the results of a recent national survey which considered the use of CASE tools and 4GLs in commercial software development. Responses from just over 750 organisations show a high degree of product penetration, along with extensive use of package solutions. Use of 3GLs in general, and of COBOL in particular, is still relatively widespread, however. In terms of systems analysis and design techniques under a CASE/4GL environment, screen and report definition is the most preferred technique, although both dataflow analysis and data modelling also feature strongly.}, Address = {Dunedin, New Zealand}, Author = {Stephen G. MacDonell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2011-01-19 14:30:30 +1300}, Institution = {Department of Information Science, University of Otago}, Month = {November}, Number = {93/4}, Size = {225 KB}, Title = {Software development, {CASE} tools and {4GLs}---{A} survey of {N}ew {Z}ealand usage. Part 1: 750 New Zealand organisations}, Type = {Discussion paper}, Year = {1993}} @techreport{dp1994-08, Abstract = {Budgetary constraints are placing increasing pressure on project managers to effectively estimate development effort requirements at the earliest opportunity. With the rising impact of automation on commercial software development the attention of researchers developing effort estimation models has recently been focused on functional representations of systems, in response to the assertion that development effort is a function of specification content. A number of such models exist---several, however, have received almost no research or industry attention. Project managers wishing to implement a functional assessment and estimation programme are therefore unlikely to be aware of the various methods or how they compare. This paper therefore attempts to provide this information, as well as forming a basis for the development and improvement of new methods.}, Address = {Dunedin, New Zealand}, Author = {Stephen G. MacDonell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:07 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {94/8}, Size = {259 KB}, Title = {A comparative review of functional complexity assessment methods for effort estimation}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1995-05, Abstract = {Advances in software process technology have rendered many existing methods of size assessment and effort estimation inapplicable. The use of automation in the software process, however, provides an opportunity for the development of more appropriate software size-based effort estimation models. A specification-based size assessment method has therefore been developed and tested in relation to process effort on a preliminary set of systems. The results of the analysis confirm the assertion that, within the automated environment class, specification size indicators (that may be automatically and objectively derived) are strongly related to process effort requirements.}, Address = {Dunedin, New Zealand}, Author = {Stephen G. MacDonell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:16:17 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {CASE, process effort, software metrics}, Month = {July}, Number = {95/5}, Size = {264 KB}, Title = {Establishing relationships between specification size and software process effort in {CASE} environments}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1996-06, Abstract = {The controlled management of software processes, an area of ongoing research in the business systems domain, is equally important in the development of geographical information systems (GIS). Appropriate software processes must be defined, used and managed in order to ensure that, as much as possible, systems are developed to quality standards on time and within budget. However, specific characteristics of geographical information systems, in terms of their inherent need for graphical output, render some process management tools and techniques less appropriate. This paper examines process management activities that are applicable to GIS, and suggests that it may be possible to extend such developments into the visual programming domain. A case study concerned with development effort estimation is presented as a precursor to a discussion of the implications of system requirements for significant graphical output.}, Address = {Dunedin, New Zealand}, Author = {Stephen G. MacDonell and George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:39:36 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {96/06}, Size = {180 KB}, Title = {Process management for geographical information system development}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1996-17, Abstract = {The use of `standard' regression analysis to derive predictive equations for software development has recently been complemented by increasing numbers of analyses using less common methods, such as neural networks, fuzzy logic models, and regression trees. This paper considers the implications of using these methods and provides some recommendations as to when they may be appropriate. A comparison of techniques is also made in terms of their modelling capabilities with specific reference to function point analysis.}, Address = {Dunedin, New Zealand}, Author = {Stephen G. MacDonell and Andrew R. Gray}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:40:20 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {September}, Number = {96/17}, Size = {232 KB}, Title = {Alternatives to regression models for estimating software projects}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1996-19, Abstract = {This paper brings together a set of commonsense recommendations relating to the delivery of software quality, with some emphasis on the adoption of realistic perspectives for software process/product stakeholders in the area of process improvement. The use of software measurement is regarded as an essential component for a quality development program, in terms of prediction, control, and adaptation as well as the communication necessary for stakeholders' realistic perspectives. Some recipes for failure are briefly considered so as to enable some degree of contrast between what is currently perceived to be good and bad practices. This is followed by an evaluation of the quality-at-all-costs model, including a brief pragmatic investigation of quality in other, more mature, disciplines. Several programs that claim to assist in the pursuit of quality are examined, with some suggestions made as to how they may best be used in practice.}, Address = {Dunedin, New Zealand}, Author = {MacDonell, Stephen G. and Andrew R. Gray}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:40:37 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {September}, Number = {96/19}, Size = {240 KB}, Title = {Software process engineering for measurement-driven software quality programs---{R}ealism and idealism}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1996-15, Abstract = {There is comparatively little work, other than function points, that tackles the problem of building prediction systems for software that is dominated by data considerations, in particular systems developed using 4GLs. We describe an empirical investigation of 70 such systems. Various easily obtainable counts were extracted from data models (e.g. number of entities) and from specifications (e.g. number of screens). Using simple regression analysis, prediction systems of implementation size with accuracy of MMRE=21% were constructed. Our work shows that it is possible to develop simple and effective prediction systems based upon metrics easily derived from functional specifications and data models.}, Address = {Dunedin, New Zealand}, Author = {Stephen G. MacDonell and Martin J. Shepperd and Philip J. Sallis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:40:12 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {metrics, entity-relationship models, 4GL, empirical, prediction}, Month = {August}, Number = {96/15}, Size = {200 KB}, Title = {Measurement of database systems: {A}n empirical study}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1999-13, Abstract = {There has been increasing interest in recent times for using fuzzy logic techniques to represent software metric models, especially those predicting development effort. The use of fuzzy logic for this application area offers several advantages when compared to other commonly used techniques. These include the use of a single model with different levels of precision for inputs and outputs used throughout the development life cycle, the possibility of model development with little or no data, and its effectiveness when used as a communication tool. The use of fuzzy logic in any applied field however requires that suitable tools are available for both practitioners and researchers---satisfying both interface and functionality related requirements. After outlining some of the specific needs of the software metrics community, including results from a survey of software developers on this topic, the paper describes the use of a set of tools called FULSOME (Fuzzy Logic for Software Metrics). The development of a simple fuzzy logic system by a software metrician and subsequent tuning are then discussed using a real-world set of software metric data. The automatically generated fuzzy model performs acceptably when compared to regression-based models.}, Address = {Dunedin, New Zealand}, Author = {Stephen MacDonell and Andrew Gray and James Calvert}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:09 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {99/13}, Size = {236 KB}, Title = {{FULSOME}: {F}uzzy logic for software metric practitioners and researchers}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-12, Abstract = {Software forensics is a research field that, by treating pieces of program source code as linguistically and stylistically analyzable entities, attempts to investigate aspects of computer program authorship. This can be performed with the goal of identification, discrimination, or characterization of authors. In this paper we extract a set of 26 standard authorship metrics from 351 programs by 7 different authors. The use of feed-forward neural networks, multiple discriminant analysis, and case-based reasoning is then investigated in terms of classification accuracy for the authors on both training and testing samples. The first two techniques produce remarkably similar results, with the best results coming from the case-based reasoning models. All techniques have high prediction accuracy rates, supporting the feasibility of the task of discriminating program authors based on source-code measurements.}, Address = {Dunedin, New Zealand}, Author = {Stephen MacDonell and Andrew Gray and Grant MacLennan and Philip Sallis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:09 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {99/12}, Size = {148 KB}, Title = {Software forensics for discriminating between program authors using case-based reasoning, feed-forward neural networks and multiple discriminant analysis}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1996-25e, Abstract = {This paper first describes the difficulties inherent in supporting a class of environmental problems, those involved in Regional Environmental Decision Making. A set of conceptual criteria are presented along with discussion on how the criteria might be approached. It is shown that a major obstacle is the need for a system that integrates components of Geographic Information Systems with process modelling functions. A new approach, Spatial Process Modelling is proposed. More detailed design criteria for this system are developed which are then used to develop a prototype system. The system is described and benefits and limitations discussed.}, Address = {Dunedin, New Zealand}, Author = {Samuel Mann}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:37:52 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {96/25e}, Size = {348 KB}, Title = {Environmental decisions with spatial process modelling}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1995-12, Abstract = {Resource management in New Zealand is fraught with debate and controversy. Regional Councils often seem stuck in the middle of two opposing groups, the farmers and the environmentalists. There are areas, however, where the Regional Councils could be seen to be hindering progress towards resolution of problems. By avoiding policy formulations of certain issues eg: vegetation burning, Councils are creating difficulties for their own staff, landholders and environmental groups. This paper examines one debate that could be greatly simplified by a few policy direction decisions.}, Address = {Dunedin, New Zealand}, Author = {Samuel A. Mann}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:09 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {September}, Number = {95/12}, Title = {A case study in environmental decision making}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1997-02, Abstract = {Analyses of landscape structure are used to test the hypothesis that remotely sensed images can be used as indicators of ecosystem conservation status. Vegetation types based on a classified SPOT satellite image were used in a comparison of paired, reserve (conservation area) and adjacent more human modified areas (controls). Ten reserves (average size 965 ha) were selected from upland tussock grasslands in Otago, New Zealand. While there were equal numbers of vegetation types and the size and shape distribution of patches within the overall landscapes were not significantly different, there was less of `target' vegetation in controls. This was in smaller patches and fewer of these patches contained `core areas'. These control `target' patches were also less complex in shape than those in the adjacent reserves. These measures showed that remotely sensed images can be used to derive large scale indicators of landscape conservation status. An index is proposed for assessing landscape change and conservation management issues are raised.}, Address = {Dunedin, New Zealand}, Author = {Samuel Mann and George L. Benwell and William G. Lee}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2011-01-17 14:34:32 +1300}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {97/02}, Size = {108 KB}, Title = {Landscape structure and ecosystem conservation: {A}n assessment using remote sensing}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1995-13, Abstract = {This paper draws together existing data with recent survey results and compares the development of local government GIS with the evolution of Information Systems (IS). These comparisons are made using the philosophy that organisational GIS can be modelled. Using this model, various stages of GIS maturity are evaluated.}, Address = {Dunedin, New Zealand}, Author = {Andrew J. Marr and George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:16:57 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {September}, Number = {95/13}, Size = {268 KB}, Title = {Local government {GIS} in {N}ew {Z}ealand since 1989}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1996-25f, Abstract = {This paper discusses the concept of maturity in the use of GIS and then formulates a computational method for measuring an organisations maturity level from the construction of a surrogate indicator. Generation of this model is made under the proposition that maturity is linked to the level that GIS has been integrated and utilised on an organisation wide basis in day to day activities. The research focuses on New Zealand local government and incorporates parallel studies of conventional information technology (IT) with recently collected data to provide support for the concepts and techniques used. It is postulated that due to similarities of function found in other local authorities, that the model has the potential, with further research for wide application.}, Address = {Dunedin, New Zealand}, Author = {Andrew J. Marr and George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:36:04 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {96/25f}, Size = {500 MB}, Title = {{GIS} maturity and integration}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1998-09, Abstract = {In this paper is proposed a structure for the development of a generic graphical system for modelling spatial processes (SMSP). This system seeks to integrate the spatial data handling operations of a GIS with specialist numerical modelling functionality, by the description of the processes involved. A conceptual framework is described, the foundation of which are six defined modules (or services) that are considered a minimum requirement for basic system operation. The services are identified following description of the three key components to systems integration, and the examination of the preferred integrating structure. The relationship of the integration components to sample commentary on the future requirements of integration is discussed, and the benefits and deficiencies of an implemented system for modelling spatial processes are noted.}, Address = {Dunedin, New Zealand}, Author = {Andrew Marr and Richard Pascoe and George Benwell and Samuel Mann}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-11 09:58:36 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {98/09}, Size = {644 KB}, Title = {Development of a generic system for modelling spatial processes}, Type = {Discussion paper}, Year = {1998}} @techreport{dp1996-25g, Abstract = {This paper describes the use of a prototype spatial information system to facilitate exploratory analyses of 60 years of scientific observation data concerning a breeding population of royal albatrosses at Taiaroa Head, on the east coast of New Zealand's South Island. This system shall form the basis on an on-going data collection, management and analysis effort. Incorporation of breeding records with spatial and landscape data permits the investigation of spatial interactions between the location of nest sites and other phenomena. Three example analyses that explore these interactions are described and discussed.}, Address = {Dunedin, New Zealand}, Author = {B.R. McLennan and Martin K. Purvis and C.J.R. Robertson}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 17:10:34 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Note = {(Not in electronic version.)}, Number = {96/25g}, Title = {Wildlife population analysis with {GIS}: {C}onservation management of royal albatross}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1996-13, Abstract = {This paper describes a tool being developed to allow users to visualise the ripening characteristics of fruit. These characteristics, such as sugar, acid and moisture content, can be measured using non-destructive Near Infrared Reflectance (NIR) analysis techniques. The four dimensional nature of the NIR data introduces some interesting visualisation problems. The display device only provides two dimensions, making it necessary to design two dimensional methods for representing the data. In order to help the user fully understand the dataset, a graphical display system is created with an interface that provides flexible visualisation tools.}, Address = {Dunedin, New Zealand}, Author = {Hayden Munro and Kevin Novins and George L. Benwell and Alistair Moffat}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:41:35 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {NIR spectroscopy, Polhemus FasTrak{\texttrademark}, interaction, interactive graphics, interfaces, visualisation, scientific visualisation}, Month = {July}, Number = {96/13}, Title = {Interactive visualisation tools for analysing {NIR} data}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1997-03, Abstract = {Multimedia technology allows a variety of the presentation formats to portray instructions for performing a task. These formats include the use of text, graphics, video, aural, photographs, used singly or in combination (Kawin, 1992; Hills, 1984; Newton, 1990; Bailey, 1996). As part of research at the Multimedia Systems Research Laboratory to identify a syntax for the use of multimedia elements, an experiment was conducted to determine whether the use text or video representations of task instructions was more effective at communicating task instructions (Norris, 1996). This paper reports on the outcome of that study. The repair and assembly environment of a local whiteware manufacturer provided the study domain. The task chosen for the study was the replacement of a heating element in a cooktop oven. As there were no task instructions available from the manufacturer, the study was conducted in two phases: Phase I was a cognitive task analysis of service technicians to determine the steps as well as the cues and considerations of the assembly task; and in Phase II we evaluated the text and video representations of the task instructions. The next sections briefly describe the methodology and the results from the experiment.}, Address = {Dunedin, New Zealand}, Author = {Brian E. Norris and William B.L. Wong}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:11 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {May}, Number = {97/03}, Size = {44 KB}, Title = {Supporting task performance: {I}s text or video better?}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1999-21, Abstract = {This paper reports on an investigation into wayfinding principles, and their effectiveness within a virtual environment. To investigate these principles, a virtual environment of an actual museum was created using QuickTime Virtual Reality. Wayfinding principles used in the real world were identified and used to design the interaction of the virtual environment. The initial findings suggests that real-world navigation principles, such as the use of map and landmark principles, can significantly help navigation within this virtual environment. However, navigation difficulties were discovered through an Activity Theory-based Cognitive Task Analysis.}, Address = {Dunedin, New Zealand}, Author = {Brian Norris and Da'oud Rashid and William Wong}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:48:32 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {wayfinding, navigation, QTVR, virtual environments, activity theory}, Month = {September}, Number = {99/21}, Title = {Wayfinding/navigation within a {QTVR} virtual environment: {P}reliminary results}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-10, Abstract = {More and more medical information is appearing on the Internet, but it is not easy to get at the nuggets amongst all the spoil. Bruce McKenzie's editorial in the December 1997 edition of SIM Quarterly dealt very well with the problems of quality, but I would suggest that the problem of accessibility is as much of a challenge. As ever-greater quantities of high quality medical information are published electronically, the need to be able to find it becomes imperative. There are a number of tools to find what you want on the Internet---search engines, agents, indexing and classification schemes and hyperlinks, but their use requires care, skill and experience.}, Address = {Dunedin, New Zealand}, Author = {David Parry}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:12 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {99/10}, Size = {196 KB}, Title = {Finding medical information on the {I}nternet: {W}ho should do it and what should they know}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-27, Abstract = {Since July 1998 we have been teaching an Internet-based distance learning course in health informatics (http://basil.otago.ac.nz:800). The development of this course and the experiences we have had running it are described in this paper. The course was delivered using paper materials, a face-to-face workshop, a CD-ROM and Internet communication tools. We currently have about 30 students around New Zealand, a mixture of physicians, nurses and other health staff. Some teaching methods have worked, some haven't, but in the process we have learned a number of valuable lessons.}, Address = {Dunedin, New Zealand}, Author = {David Parry and Alice Breton and David Abernethy and Sophie Cockcroft and John Gillies}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:49:10 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {distance learning, healthcare, Internet, CD-ROM}, Month = {December}, Number = {99/27}, Size = {80 KB}, Title = {Using the {I}nternet to teach health informatics}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-24, Abstract = {Since 1997 the authors have been involved in the development of a distance learning course in health informatics. The course is delivered via CD-ROM and the Internet. During this process we have learned valuable lessons about computer-assisted collaboration and cooperative work. In particular we have developed methods of using the software tools available for communication and education. We believe that electronic distance learning offers a realistic means of providing education in health informatics and other fields to students whom for reasons of geography or work commitments would not be able to participate in a conventional course.}, Address = {Dunedin, New Zealand}, Author = {David Parry and Sophie Cockcroft and Alice Breton and David Abernethy and John Gillies}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:12 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {99/24}, Size = {496 KB}, Title = {The development of an electronic distance learning course in health informatics}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-26, Abstract = {The rate of induction of labour (IOL) is increasing, despite no obvious increase in the incidence of the major indications. However the rate varies widely between different centres and practitioners and this does not seem to be due to variations in patient populations. The IOL decision-making process of six clinicians was recorded and examined using hypothetical scenarios presented on a computer. Several rules were identified from a rough sets analysis of the data. These rules were compared to the actual practise of these clinicians in 1994 Initial tests of these rules show that they may form a suitable set for developing an expert system for the induction of labour.}, Address = {Dunedin, New Zealand}, Author = {David Parry and Wai Kiang Yeap and Neil Pattison}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:12 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {rough sets, obstetrics, knowledge acquisition}, Month = {December}, Number = {99/26}, Size = {108 KB}, Title = {Using rough sets to study expert behaviour in induction of labour}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1998-06, Abstract = {The aim of the study was to compare the 2 management protocols for postterm pregnancy; elective induction of labour at 42 weeks' gestation and continuing the pregnancy with fetal monitoring while awaiting spontaneous labour. A retrospective observational study compared a cohort of 360 pregnancies where labour was induced with 486 controls. All pregnancies were postterm (>294 days) by an early ultrasound scan. Induction of labour was achieved with either prostaglandin vaginal pessaries or gel or forewater rupture and Syntocinon infusion. The control group consisted of women with postterm pregnancies who were not induced routinely and who usually had twice weekly fetal assessment with cardiotocography and/or ultrasound. Women who had their labour induced differed from those who awaited spontaneous labour. Nulliparas (OR 1.54; 95% CI 1.24-1.83) and married women (OR 1.76; 95% CI 1.45-2.06) were more likely to have their labour induced. There was no association between the type of caregiver and induction of labour. Induction of labour was associated with a reduction in the incidence of normal vaginal delivery (OR 0.63, 95% CI 0.43-0.92) and an increased incidence of operative vaginal delivery (OR 1.46; 95% CI 1.34-2.01). There was no difference in the overall rate of Caesarean section. There was no difference in fetal or neonatal outcomes. Parity had a major influence on delivery outcomes from a policy of induction of labour. Nulliparas in the induced group had worse outcomes with only 43% achieving a normal vaginal delivery (OR 0.78, 95% CI 0.65-0.95). In contrast for multiparas, the induced group had better outcomes with less Caesarean sections (OR 0.88, 95% CI 0.81-0.96). This retrospective observational study of current clinical practice shows that induction of labour for postterm pregnancy appears to be favoured by nulliparous married women. It suggests that induction of labour may improve delivery outcomes for multigravas but has an adverse effect for nulliparas.}, Address = {Dunedin, New Zealand}, Author = {Emma Parry and David Parry and Neil Pattison}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:13 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {98/06}, Title = {Induction of labour for post term pregnancy: {A}n observational study}, Type = {Discussion paper}, Year = {1998}} @techreport{dp1996-25h, Abstract = {Sharing geographical data sets is highly desirable for economical and technical reasons. In this paper the author describes the development of an agency for sharing geographical data which is based on the use of the ISODE implementation of the X.500 Directory Service and a collection of software agents which collaborate with each other to perform the various task associated with sharing data.}, Address = {Dunedin, New Zealand}, Author = {Richard T. Pascoe}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 17:10:37 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Note = {(Not in electronic version.)}, Number = {96/25h}, Title = {Data sharing using the {X}.500 directory}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1996-16, Abstract = {In this paper is discussed the preliminary development of a Diploma in Medical Informatics which will comprise courses offered entirely through the Internet in the form of World Wide Web documents and electronic mail. Proposed use of such educational technology for the delivery of these courses within a distance learning environment is based upon a conversational framework developed by Laurillard (1993) and an associated classification of this technology according to the length to which elements within the conversational framework is supported.}, Address = {Dunedin, New Zealand}, Author = {Richard T. Pascoe and David Abernathy}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:41:59 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {Diploma in Medical Informatics, World Wide Web (WWW), distance learning, educational technology}, Month = {September}, Number = {96/16}, Title = {Teaching a diploma in medical informatics using the {W}orld {W}ide {W}eb}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1995-01, Abstract = {This paper develops an approach to data modelling for the design of hypermedia databases. First, the use of data modelling for the design of hypermedia database systems is investigated. A specific example, that of a car parts database, is used as a means of illustrating a generic problem, namely the difficulty associated with interrogating a large database when the exact data element being sought is unknown. The use of hypermedia as a basis for data retrieval in such situations is then discussed. The data contained within hypermedia database systems is typically unstructured, which has led to systems being developed using ad hoc design approaches with little regard for formal data modelling techniques. Hence, the main contribution of the paper is the illustration of a hybrid data modelling approach of suitable semantic richness to capture the complexities of hypermedia databases.}, Address = {Dunedin, New Zealand}, Author = {Russell J. Pegler and Peter G. Firns}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:13 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {95/1}, Title = {Semantic data modelling for hypermedia database applications}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1999-17, Abstract = {This paper describes an architecture for building distributed information systems from existing information resources, based on distributed object and software agent technologies. This architecture is being developed as part of the New Zealand Distributed Information Systems (NZDIS) project. An agent-based architecture is used: information sources are encapsulated as information agents that accept messages in an agent communication language (the FIPA ACL). A user agent assists users to browse ontologies appropriate to their domain of interest and to construct queries based on terms from one or more ontologies. One or more query processing agents are then responsible for discovering (from a resource broker agent) which data source agents are relevant to the query, decomposing the query into subqueries suitable for those agents (including the translation of the query into the specific ontologies implemented by those agents), executing the subqueries and translating and combining the subquery results into the desired result set. Novel features of this system include the use of standards from the object-oriented community such as the Common Object Request Broker Architecture (CORBA) (as a communications infrastructure), the Unified Modeling Language (used as an ontology representation language), the Object Data Management Group's Object Query Language (used for queries) and the Object Management Group's Meta Object Facility (used as the basis for an ontology repository agent). Query results need not be returned within an ACL message, but may instead be represented by a CORBA object reference which may be used to obtain the result set.}, Address = {Dunedin, New Zealand}, Author = {Martin K. Purvis and Stephen Cranefield and Geoff Bush and Dan Carter and Bryce McKinlay and Mariusz Nowostawski and Roy Ward}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:14 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {August}, Number = {99/17}, Size = {212 KB}, Title = {The {NZDIS} project: {A}n agent-based distributed information systems architecture}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1995-08, Abstract = {With the increasing size, complexity and interconnectedness of systems and organisations, there is a growing need for high level modelling approaches that span the range of application domains. Causal agent modelling offers an intuitive and powerful approach for the development of dynamic models for any application area. This paper outlines some of the basic ideas behind the nature of causal agent models, why they are fundamental to the modelling enterprise, and compares developments in this area to those in the related field of coordination theory. It also describes some research activities using causal agent models at the University of Otago.}, Address = {Dunedin, New Zealand}, Author = {Martin K. Purvis and Stephen J.S. Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:16:48 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {August}, Number = {95/8}, Size = {180 KB}, Title = {Causal agent modelling: {A} unifying paradigm for systems and organisations}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1996-04, Abstract = {The use of intelligent software agents is a modelling paradigm that is gaining increasing attention in the applications of distributed systems. This paper identifies essential characteristics of agents and shows how they can be mapped into a coloured Petri net representation so that the coordination of activities both within agents and between interacting agents can be visualised and analysed. The detailed structure and behaviour of an individual agent in terms of coloured Petri nets is presented, as well as a description of how such agents interact. A key notion is that the essential functional components of an agent are explicitly represented by means of coloured Petri net constructs in this representation.}, Address = {Dunedin, New Zealand}, Author = {Martin K. Purvis and Stephen J.S. Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:39:00 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {96/04}, Title = {Agent modelling with {P}etri nets}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1999-06, Abstract = {The increasing availability and variety of large environmental data sets is opening new opportunities for data mining and useful cross-referencing of disparate environmental data sets distributed over a network. In order to take advantage of these opportunities, environmental information systems will need to operate effectively in a distributed, open environment. In this paper, we describe the New Zealand Distributed Information System (NZDIS) software architecture for environmental information systems. In order to optimise extensibility, openness, and flexible query processing, the architecture is organised into collaborating software agents that communicate by means of a standard declarative agent communication language. The metadata of environmental data sources are stored as part of agent ontologies, which represent information models of the domain of the data repository. The agents and the associated ontological framework are designed as much as possible to take advantage of standard object-oriented technology, such as CORBA, UML, and OQL, in order to enhance the openness and accessibility of the system.}, Address = {Dunedin, New Zealand}, Author = {Martin K. Purvis and Stephen Cranefield and Mariusz Nowostawski}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:14 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {April}, Number = {99/06}, Size = {208 Kb}, Title = {A distributed architecture for environmental information systems}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1998-10, Abstract = {This paper describes combined approaches of data preparation, neural network analysis, and fuzzy inferencing techniques (which we collectively call neuro-fuzzy engineering) to the problem of environmental modelling. The overall neuro-fuzzy architecture is presented, and specific issues associated with environmental modelling are discussed. A case study that shows how these techniques can be combined is presented for illustration. We also describe our current software implementation that incorporates neuro-fuzzy analytical tools into commercially available geographical information system software.}, Address = {Dunedin, New Zealand}, Author = {Martin K. Purvis and Nikola Kasabov and George Benwell and Qingqing Zhou and Feng Zhang}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-11 09:58:29 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {98/10}, Size = {384 KB}, Title = {Neuro-fuzzy methods for environmental modelling}, Type = {Discussion paper}, Year = {1998}} @techreport{dp1996-14, Abstract = {A novel connectionist architecture that differs from conventional architectures based on the neuroanatomy of biological organisms is described. The proposed scheme is based on the model of multilayered optical thin-films, with the thicknesses of the individual thin-film layers serving as adjustable `weights' for the training. A discussion of training techniques for this model and some sample simulation calculations in the area of pattern recognition are presented. These results are shown to compare with results when the same training data are used in connection with a feed-forward neural network with back propagation training. A physical realization of this architecture could largely take advantage of existing optical thin-film deposition technology.}, Address = {Dunedin, New Zealand}, Author = {Martin K. Purvis and Xiaodong Li}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:15 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {August}, Number = {96/14}, Title = {A connectionist computational architecture based on an optical thin-film model}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1994-07, Abstract = {Almost by definition, any engineering discipline has quantitative measurement at its foundation. In adopting an engineering approach to software development, the establishment and use of software metrics has therefore seen extensive discussion. The degree to which metrics are actually used, however, particularly in New Zealand, is unclear. Four surveys, conducted over the last eight years, are therefore reviewed in this paper, with a view to determining trends in the use of metrics. According to the findings presented, it would appear that no more than one third of organisations involved in software development utilise software metrics.}, Address = {Dunedin, New Zealand}, Author = {Martin K. Purvis and Stephen G. MacDonell and Jason Westland}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:20:57 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {94/7}, Title = {Software metrics in {N}ew {Z}ealand: {R}ecent trends}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1995-10, Abstract = {A single piece of legislation, the Resource Management Act, governs the management of environmental resources in New Zealand. It establishes procedural requirements and time constraints for all decision-making activities related to governmental environmental management. The present paper describes a model, based on coloured Petri nets, that is under development to facilitate understanding of the Act and to examine performance characteristics of legal processes defined in the Act.}, Address = {Dunedin, New Zealand}, Author = {Martin K. Purvis and Maryam A. Purvis and George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:37:22 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {August}, Number = {95/10}, Title = {Modelling and simulation of the {N}ew {Z}ealand {R}esource {M}anagement {A}ct}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1993-01, Abstract = {Despite the many qualitative elements of software time-to-build estimating, some observable features can be quantified; even if the resulting set of variables observed is arbitrary. Such is the case when estimating the expected duration for database re-engineering. If we assume that for any extant database, an entity-relationship model (ERM) can be produced from which a new normalised schema is generated, then our estimating task needs to quantify both the complexity of the ensuing ERM and also the data modelling knowledge of the `re-engineer'. Whilst there may be additional variables to be considered, a set of primary elements required for estimating the durations of the task have been identified. The formula proposed in this paper is arbitrary but it is intended as an instrument for measuring ER model complexity, such that time-to-build estimates can be made for the task of re-engineering extant non-relational databases into relational form.}, Address = {Dunedin, New Zealand}, Author = {Philip J. Sallis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:16 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {November}, Number = {93/1}, Size = {773 KB}, Title = {A data complexity formula for deriving time-to-build estimates from non-relational to relational databases}, Type = {Discussion paper}, Year = {1993}} @techreport{dp1994-01, Abstract = {This paper is the result of some research in computational stylistics; in particular, the analysis of a document corpus that has attracted the attention of scholars from several disciplines for hundreds of years. This corpus, the Epistles of Saint Ignatius of Antioch, was originally written in Greek but this analysis is of a single translation in English. The analysis has been undertaken using a conventional approach in computational stylistics but has employed a number of contemporary software packages, such as a grammar checker, normally used for text and document creation. Research in this field predominantly characterises authorship style by the use of document statistics, such as word frequency, sentence and paragraph length and in some cases the recurrence of certain phrases. During the research described here it was considered appropriate to use a grammar checker to identify the existence of a `new' set of characteristics. These include comparing the use of passive voice across the corpus being analysed, the percentage use of prepositions, as well as document statistics such as sentence and paragraph length, and the application of text readability formulas as indicators of writing style. The corpus analysed in this paper consists of the seven Epistles of Ignatius of Antioch, together with the Epistle of Polycarp to the Philippians. The latter epistle has traditionally been held to authenticate the Ignatian writings. It has been suggested by some church historians that Ignatius was not the author of these epistles and may not in fact, have existed as a person at all. Further, they suggest that two paragraphs in the Polycarp Epistle may have been added later by a second author to authenticate the Ignatian corpus. In order to contribute to the ongoing debate, this paper first examines the Ignatian corpus in order to determine single authorship of the seven epistles. Second, it seeks to determine whether or not the two disputed paragraphs in Polycarp's Epistle to the Philippians vary in authorship style from the rest of that epistle. Third, it compare authorship style in the two inserted paragraphs of Polycarp's Epistle with that of the Ignatian corpus in order to make some observations on the hypothesis that a single author was responsible for both.}, Address = {Dunedin, New Zealand}, Author = {Philip J. Sallis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:20:07 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {January}, Number = {94/1}, Size = {885 KB}, Title = {A comparison of authorship style in the document corpus of the {E}pistles of {S}t.\ {I}gnatius of {A}ntioch}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1994-02, Abstract = {Whilst change is an inherent characteristic of the IT industry, the difficulty of frequent and timely change in tertiary curricula is a constraint on the ability of universities to adequately meet the requirements of knowledge and expertise expected of new graduates. In this paper, some recently published research concerning the top ten issues for managers of information technology in the USA, Europe and Australia is evaluated in terms of its impact on IS teaching and research. The paper concludes that the top ten issues perceived by IS managers was probably in large part due to change resulting not only from advances in technology but also in response to past failures or inadequacies in the process of delivering high quality information system products to corporate consumers. The need for business and education to be aware of the motivations for change and the constraints that are attendant on it in both environments is emphasised for harmonious progress to prevail in the production and utilisation of new IS graduates.}, Address = {Dunedin, New Zealand}, Author = {Philip J. Sallis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:19:50 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {January}, Number = {94/2}, Size = {599 KB}, Title = {Management perceptions of {IS} research and development issues}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1997-13, Address = {Dunedin, New Zealand}, Author = {Philip J. Sallis and Diana A. Kassabova}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-11 09:53:34 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {97/13}, Title = {Computer-mediated communication: {E}xperiments with e-mail readability}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1997-11, Abstract = {The paper explores building profiles of Newsgroups from a corpus of Usenet E-mail messages employing some standard statistical techniques as well as fuzzy clustering methods. A large set of data from a number of Newsgroups has been analysed to elicit some text attributes, such as number of words, length of sentences and other stylistic characteristics. Readability scores have also been obtained by using recognised assessment methods. These text attributes were used for building Newsgroups' profiles. Three newsgroups, each with similar number of messages were selected from the processed sample for the analysis of two types of one-dimensional profiles, one by length of texts and the second by readability scores. Those profiles are compared with corresponding profiles of the whole sample and also with those of a group of frequent participants in the newsgroups. Fuzzy clustering is used for creating two-dimensional profiles of the same groups. An attempt is made to identify the newsgroups by defining centres of data clusters. It is contended that this approach to Newsgroup profile analysis could facilitate a better understanding of computer-mediated communication (CMC) on the Usenet, which is a growing medium of informal business and personal correspondence.}, Address = {Dunedin, New Zealand}, Author = {Philip J. Sallis and Diana A. Kassabova}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:17 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {97/11}, Title = {Usenet newsgroups' profile analysis utilising standard and non-standard statistical methods}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1995-02, Abstract = {In September 1994, the government of New Zealand published a document entitled Education for the 21st Century. The document sets out targets and challenges for the education system in New Zealand to meet by 2001. One of the targets, and the associated fiscal challenge, is to improve the access of New Zealand students to information technology, so that by 2001 there is at least one computer for every five students at all levels of school education. This bold policy statement follows a chain of reports and government initiatives extending over approximately 15 years. This paper describes government policy initiatives, the reports which gave rise to them, and the changes in curriculum, teacher, and classroom practice which have taken place since computers were first used in New Zealand classrooms in the 1970s. The short history of educational computing in New Zealand has spanned a period of massive political and economic reform, and enormous structural change in the education system. The authors have been observers of, and contributors to, aspects of the development of New Zealand's use of information technology in education over the whole of this period.}, Address = {Dunedin, New Zealand}, Author = {Philip J. Sallis and Tim McMahon}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:17 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {April}, Number = {95/2}, Title = {Pursuing a national policy for information technology in school education: {A} {N}ew {Z}ealand odyssey}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1999-23, Abstract = {Smith's method (Smith, 1985) is a formal technique for deriving a set of normalised relations from a functional dependency diagram (FDD). Smith's original rules for deriving these relations are incomplete, as they do not fully address the issue of determining the foreign key links between relations. In addition, one of the rules for deriving foreign keys can produce incorrect results, while the other rule is difficult to automate. In this paper are described solutions these issues.}, Address = {Dunedin, New Zealand}, Author = {Nigel Stanger}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2010-11-10 12:06:17 +1300}, Institution = {Department of Information Science, University of Otago}, Keywords = {normalisation, functional dependencies, relational model, data model translation}, Month = {December}, Number = {99/23}, Size = {184 KB}, Title = {Modifications to {S}mith's method for deriving normalised relations from a functional dependency diagram}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1997-07, Abstract = {Modelling the structure of data is an important part of any system analysis project. One problem that can arise is that there may be many differing viewpoints among the various groups that are involved in a project. Each of these viewpoints describes a perspective on the phenomenon being modelled. In this paper, we focus on the representation of developer viewpoints, and in particular on how multiple viewpoint representations may be used for database design. We examine the issues that arise when transforming between different viewpoint representations, and describe an architecture for implementing a database design environment based on these concepts.}, Address = {Dunedin, New Zealand}, Author = {Nigel J. Stanger and Richard T. Pascoe}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:18 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {97/07}, Size = {232 KB}, Title = {Environments for viewpoint representations}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1997-08, Abstract = {In this paper, we describe the implementation of a database design environment (Swift) that incorporates several novel features: Swift's data modelling approach is derived from viewpoint-oriented methods; Swift is implemented in Java, which allows us to easily construct a client/server based environment; the repository is implemented using PostgreSQL, which allows us to store the actual application code in the database; and the combination of Java and PostgreSQL reduces the impedance mismatch between the application and the repository.}, Address = {Dunedin, New Zealand}, Author = {Nigel J. Stanger and Richard T. Pascoe}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:18 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {97/08}, Size = {108 KB}, Title = {Exploiting the advantages of object-oriented programming in the implementation of a database design environment}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1996-25, Abstract = {A collection of papers authored by members of the Information Science department and presented at the 1st International Conference on GeoComputation, Leeds, United Kingdom.}, Address = {Dunedin, New Zealand}, Author = {{Multiple authors}}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:48:51 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {96/25}, Size = {2.9 MB}, Title = {Special issue: {G}eo{C}omputation '96}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1994-19, Abstract = {The facility to be able to display features of speech in a visual speech aid does not by itself guarantee that the aid will be effective in speech therapy. An effective visual speech aid must provide a visual representation of an utterance from which a judgement on the ``goodness'' of the utterance can be made. Two things are required for an aid to be effective. Firstly, the clusters of acceptable utterances must be separate from the unacceptable utterances in display space. Secondly, the acoustic features which distinguish acceptable utterances from unacceptable utterances must be evident in the displays of the speech aid. A two part test, called the Visual Display Test (VDT), has been developed to assess a visual speech aid's capacity to fulfil these requirements.}, Address = {Dunedin, New Zealand}, Author = {Catherine I. Watson}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:18 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {November}, Number = {94/19}, Size = {257 KB}, Title = {The visual display test: {A} test to assess the usefulness of a visual speech aid}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1999-22, Abstract = {Building predictive time series models for freshwater systems is important both for understanding the dynamics of these natural systems and in the development of decision support and management software. This work describes the application of a machine learning technique, namely genetic programming (GP), to the prediction of chlorophyll-a. The system endeavoured to evolve several mathematical time series equations, based on limnological and climate variables, which could predict the dynamics of chlorophyll-a on unseen data. The predictive accuracy of the genetic programming approach was compared with an artificial neural network and a deterministic algal growth model. The GP system evolved some solutions which were improvements over the neural network and showed that the transparent nature of the solutions may allow inferences about underlying processes to be made. This work demonstrates that non-linear processes in natural systems may be successfully modelled through the use of machine learning techniques. Further, it shows that genetic programming may be used as a tool for exploring the driving processes underlying freshwater system dynamics.}, Address = {Dunedin, New Zealand}, Author = {Peter Whigham and Friedrich Recknagel}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:19 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {September}, Number = {99/22}, Size = {264 KB}, Title = {Predictive modelling of plankton dynamics in freshwater lakes using genetic programming}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1996-25i, Abstract = {Geographic information systems are an important tool for the field of geocomputing. A key component of every system is the data---spatial data has traditionally been labour-intensive to collect, and hence expensive. This paper establishes a new method of acquiring spatial data from motion video. The proposed method is based upon the principles of photogrammetry, but allows position to be calculated with feature tracking rather than point correspondence. By doing so, it avoids many constraints imposed by previous solutions. The new method is demonstrated with linear and rotational motion.}, Address = {Dunedin, New Zealand}, Author = {Mark Williams}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:37:20 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {96/25i}, Size = {808 KB}, Title = {Spatial data acquisition from motion video}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1994-09, Abstract = {It is virtually impossible to know everything about any facet of computing as it changes on almost a daily basis. Having said that I believe that it is worth sharing some of the knowledge that I have gained as a result of 5 years of study and experimentation with viruses and virus defense strategies as well as having personally tested nearly 50 anti-virus products.}, Address = {Dunedin, New Zealand}, Author = {Henry B. Wolfe}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:21:06 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {94/9}, Title = {Viruses: {W}hat can we really do?}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1996-12, Address = {Dunedin, New Zealand}, Author = {Henry B. Wolfe}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:55:13 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {96/12}, Size = {72 KB}, Title = {Information warfare: {W}here are the threats?}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1996-10, Abstract = {Cryptography is the art or science, depending on how you look at it, of keeping messages secure. It has been around for a couple of thousand years in various forms. The Spartan Lysander and even Caesar made use of cryptography in some of their communications. Others in history include Roger Bacon, Edgar Allan Poe, Geoffrey Chaucer, and many more. By today's standards cryptographic techniques, through the ages right up to the end of World War I, have been pretty primitive. With the development of the electro-mechanical devices cryptography came of age. The subsequent evolution of the computer has raised the level of security that cryptography can provide in communications and data storage.}, Address = {Dunedin, New Zealand}, Author = {Henry B. Wolfe}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:20 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {96/10}, Title = {Politics and techniques of data encryption}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1996-11, Abstract = {In today's world most businesses, large and small, depend on their computer(s) to provide vital functions consistently and without interruption. In many organizations the loss of the computer function could mean the difference between continued operation and shutdown. Reliability and continuity, therefore, become the critical aspect of any computer system(s) currently in use. This paper attempts to describe some of the most important issues any organization should address in order to reduce their risk where it relates to computer related failure.}, Address = {Dunedin, New Zealand}, Author = {Henry B. Wolfe}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:20 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {96/11}, Title = {Reasonable security safeguards for small to medium organisations}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1997-09, Abstract = {Privacy is one of the most fundamental of human rights. It is not a privilege granted by some authority or state. It is, in fact, necessary for each human being's normal development and survival. Those nations who have, in the past, and currently follow the notion that they have the authority and/or moral high ground to grant or deny privacy to their citizens are notable for their other human rights violations. This paper is centered around the above premise and will offer the reader some good news and some bad news. But most important, it will put the reader on notice that our privacy is constantly under attack from one vested interest or another and that each and every one of us must be vigilant in the protection of our private matters. It is common in New Zealand to assume that anything secret is bad. This is an extremely na{\"\i}ve position to take for any intelligent individual. The old phrase ``if you haven't got anything to hide, then you shouldn't mind{\ldots}'' is often used to intimidate, manipulate or coerce an individual to ``confess'' or share information that he/she initially believes to be confidential, private or otherwise not for sharing with others. Secrecy is not bad nor good in and of itself. It is merely a factual description of the condition of some information. Now for some good news. There are a number of technological devices and procedures that can be used to enhance one's privacy. The bad news is that most, if not all, can be easily defeated with other technological advances.}, Address = {Dunedin, New Zealand}, Author = {Henry B. Wolfe}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:20 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {July}, Number = {97/09}, Title = {Privacy enhancing technology}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1998-02, Abstract = {Electronic security in this day and age covers a wide variety of techniques. One of the most important areas that must be addressed is that of commerce on the Internet. The Internet is an insecure medium to say the least. Every message sent must pass through many computers that are most likely controlled by unrelated and untrusted organizations before it ultimately reaches the final destination. At any one of these relays the information within the message can be scrutinized, analyzed and/or copied for later reference. There are documented and suspected instances of surveillance of Internet traffic. It has been suggested that several of the major communication switches (through which 90% or more of Internet traffic must pass) have permanent surveillance in place. Another insidious but less obvious fact about Internet use is that messages once sent, are not discarded nor do they disappear forever. Usually, at one or more relays, copies of messages are archived and kept for differing time periods. Most ordinary users are not aware that messages sent six months ago may be able to be retrieved. That fact could have serious legal ramifications for the sender. At this time cryptography is really the only effective method that can be used to protect Internet transactions and communications from unauthorized interception. Unauthorized means anyone who you have not expressly given permission to read your private communications. Cryptography is the art or science of hidden writing. Plain text (your message in readable form) is modified using an algorithm (like a mathematical equation) that requires at least one special variable (your special private key that no one else knows) to create ciphered text (your message in unreadable form). At the destination the person who the message is meant for must have the ``special key'' in order to be able to unlock the ciphered message. All encryption is not created equal nor does it necessarily provide equivalent security. It would be wrong to intimate that merely using ``encryption'' to protect your communication is enough. There are other factors at work here as well and they have to do with the politics of privacy. I have often heard it said in New Zealand that ``if you have nothing to hide then it shouldn't matter who reads your communications''. Of course, that opinion is na{\"\i}ve and does not represent reality in any meaningful way.}, Address = {Dunedin, New Zealand}, Author = {Henry B. Wolfe}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:46:29 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {98/02}, Title = {Electronic security}, Type = {Discussion paper}, Year = {1998}} @techreport{dp1994-13, Abstract = {This paper reviews the research and practice of how computer-based output information has been presented in nine different information display formats and the suitability of their use in environments ranging from static, reference-type situations, to complex, dynamic situations. The review while not generating conclusive results suggests that displays are more than a platform to place information. Instead care should be taken to organise, lay out, and pre-process the information so that it enhances the communication between computer and human. The information on the screen should also be designed to augment human cognitive limitations. For instance, human weakness in integrating information across time and multiple sources could be assisted by display formats that integrate the information in the display rather than having the user attempt to integrate that information mentally. If this be the desired outcome, information designers must start to consider performing analyses that help them understand the demands on the human information processing system and hence how information can be presented to augment this weakness. This would have to be further investigated in subsequent research.}, Address = {Dunedin, New Zealand}, Author = {William B.L. Wong}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:21:25 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {information display design, output information, visual display formats,}, Month = {July}, Number = {94/13}, Size = {1.6 MB}, Title = {Information display design: {A} survey of visual display formats}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1997-12, Address = {Dunedin, New Zealand}, Author = {William B.L. Wong}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-11 09:53:29 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {97/12}, Size = {60 KB}, Title = {The ecological approach to interface design in intentional domains}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1996-07, Abstract = {The purpose of this paper is to report on an experiment conducted to evaluate the feasibility of an empirical approach for translating a cognitive schema into a display structure. This experiment is part of a series of investigations aimed at determining how information about dynamic environments should be portrayed to facilitate decision making. Studies to date have generally derived an information display organisation that is largely based on a designer's experience, intuition and understanding of the processes. In this study we report on how we attempted to formalise this design process so that if the procedures were adopted, other less experienced designers would still be able to objectively formulate a display organisation that is just as effective. This study is based on the first stage of the emergency dispatch management process, the call-taking stage. The participants in the study were ambulance dispatch officers from the Dunedin-based Southern Regional Communications Centre of the St. John's Ambulance Service in New Zealand.}, Address = {Dunedin, New Zealand}, Author = {William B.L. Wong and David P. O'Hare and Philip J. Sallis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:21 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {96/07}, Title = {Experimental transformation of a cognitive schema into a display structure}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1996-18, Abstract = {This paper reports on how the Critical Decision Method, a cognitive task analysis technique, was employed to identify the goal states of tasks performed by dispatchers in a dynamic environment, the Sydney Ambulance Co-ordination Centre. The analysis identified five goal states: Notification; Situation awareness; Planning resource to task compatibility; Speedy response; Maintain history of developments. These goals were then used to guide the development of display concepts that support decision strategies invoked by dispatchers in this task environment.}, Address = {Dunedin, New Zealand}, Author = {William B.L. Wong and David P. O'Hare and Philip J. Sallis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:40:28 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {critical decision method (CDM), cognitive task analysis, cognitive engineering, ambulance dispatch, command and control, information portrayal, display design, decision support}, Month = {September}, Number = {96/18}, Size = {148 KB}, Title = {A goal-oriented approach for designing decision support displays in dynamic environments}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1995-06, Abstract = {This paper reports on preliminary findings of a cognitive task analysis conducted at an ambulance despatch control center. The intense and dynamic nature of the decision making environment is first described, and the decision process modelled in an attempt to identify decision strategies used by the Communications Officers. Some information portrayal requirements stemming from one of the decision processes are then discussed, and these requirements are then translated into a proposed display solution.}, Address = {Dunedin, New Zealand}, Author = {William B.L. Wong and Philip J. Sallis and David P. O'Hare}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:16:29 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {information portrayal, information display design, decision support design, decision modelling, naturalistic decision making, critical decision method, ambulance service}, Month = {July}, Number = {95/6}, Size = {244 KB}, Title = {Information portrayal for decision support in dynamic intentional process environments}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1995-03, Abstract = {It is increasingly recognised that the manner in which information required by a decision maker is portrayed is as important as providing appropriate information. In dynamic intentional process environments such as emergency dispatch control, where the problems are non-trivial and time is tightly constrained, it is important to portray information that is used together, close to one another or appropriately integrated. This is important in speeding up the decision maker's interpretation of the information and assessment of the state of the situation. But how should information be portrayed so that the information may be assimilated quickly in such situations? To answer this question, a framework for analysis was developed to guide the investigation. This framework brings together the decisions made, the information used, the source and accessibility of the source, and how the information is used in each decision, thereby identifying the information portrayal requirements. This framework will be presented in this paper. However, before discussing the framework, it is necessary to introduce the concept of decision making in naturalistic environments as it is within this context of dynamic decision making that the problem of information portrayal is studied. The paper will examine the characteristics of dynamic intentional processes, and then briefly describe the environment of one example of an intentional process environment, an emergency control center, that formed the basis of the study. The cognitive task analysis techniques use to elicit the decision processes and the information portrayal requirements will also be described, and then finally, the initial results of the study will be presented.}, Address = {Dunedin, New Zealand}, Author = {William B.L. Wong and Philip J. Sallis and David P. O'Hare}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:22 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {95/3}, Title = {Information portrayal for intentional processes: {A} framework for analysis}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1997-04, Abstract = {This study is part of research that is investigating the notion that human performance in dynamic and intentional decision making environments, such as ambulance dispatch management, can be improved if information is portrayed in a manner that supports the decision strategies invoked to achieve the goal states of the process being controlled. Hence, in designing interfaces to support real-time dispatch management decisions, it is suggested that it would be necessary to first discover the goal states and the decision strategies invoked during the process, and then portray the required information in a manner that supports such a user group's decision making goals and strategies. The purpose of this paper is to report on the experiences gleaned from the use of a cognitive task analysis technique called Critical Decision Method as an elicitation technique for determining information portrayal requirements. This paper firstly describes how the technique was used in a study to identify the goal states and decision strategies invoked during the dispatch of ambulances at the Sydney Ambulance Co-ordination Centre. The paper then describes how the interview data was analysed within and between cases in order to reveal the goal states of the ambulance dispatchers. A brief description of the resulting goal states follows, although a more detailed description of the goals states and their resulting display concepts has been reported elsewhere (Wong et al., 1996b). Finally, the paper concludes with a set of observations and lessons learnt from the use of the Critical Decision Method for developing display design concepts in dynamic intentional environments.}, Address = {Dunedin, New Zealand}, Author = {William B.L. Wong and Philip J. Sallis and David P. O'Hare}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:23 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {display design, cognitive task analysis, Critical Decision Method, ambulance dispatch management}, Month = {May}, Number = {97/04}, Size = {100 KB}, Title = {Eliciting information portrayal requirements: {E}xperiences with the critical decision method}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1997-15, Abstract = {Fuzzy neural networks provide for the extraction of fuzzy rules from artificial neural network architectures. In this paper we describe a general method, based on statistical analysis of the training data, for the selection of fuzzy membership functions to be used in connection with fuzzy neural networks. The technique is first described and then illustrated by means of two experimental examinations.}, Address = {Dunedin, New Zealand}, Author = {Qingqing Zhou and Martin K. Purvis and Nikola K. Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:23 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {97/15}, Size = {172 KB}, Title = {A membership function selection method for fuzzy neural networks}, Type = {Discussion paper}, Year = {1997}}
%% This BibTeX bibliography file was created using BibDesk. %% http://bibdesk.sourceforge.net/ %% Created for Nigel Stanger at 2011-07-25 16:59:08 +1200 %% Saved with string encoding Western (Mac OS Roman) @techreport{dp2011-05, Abstract = {In Normative Multi-Agent Systems (NorMAS), researchers have investigated several mechanisms for agents to learn norms. In the context of agents learning norms, the objectives of the paper are three-fold. First, this paper aims at providing an overview of different mechanisms employed by researchers for norm learning. Second, it discusses the contributions of different mechanisms to the three aspects of active learning namely learning by doing, observing and com- municating. Third, it compares two normative architectures which have an emphasis on the learning of norms. It also discusses the features that should be considered in future norm learning architectures.}, Address = {Dunedin, New Zealand}, Author = {Bastin Tony Roy Savarimuthu}, Date-Added = {2011-07-25 16:39:52 +1200}, Date-Modified = {2011-07-25 16:39:52 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {norms, learning, agents, mechanisms}, Month = may, Number = {2011/05}, Title = {Norm learning in multi-agent societies}, Type = {Discussion paper}, Year = {2011}} @techreport{dp2011-04, Abstract = {Previous research on modelling and monitoring norms, contracts and commitments has studied the semantics of concepts such as obligation, permission, prohibition and commitment; languages for expressing behavioural constraints (such as norms or contracts) to be followed by agents in specific contexts; and mechanisms for run-time monitoring of fulfilment and violation of these constraints. However, there has been little work that provided all of these features while also allowing the current expectations of agents, and the fulfilment and violation of these expectations to be expressed as first-class constructs in the language. This paper demonstrates the benefits of providing this capability by considering a variety of use cases and demonstrating how these can be addressed as applications of a previously defined temporal logic of expectations and an associated monitoring technique.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Michael Winikoff and Wamberto Vasconcelos}, Date-Added = {2011-05-06 10:36:50 +1200}, Date-Modified = {2011-05-06 10:36:50 +1200}, Institution = {Department of Information Science, University of Otago}, Month = feb, Number = {2011/04}, Title = {Modelling and monitoring interdependent expectations}, Type = {Discussion paper}, Year = {2011}} @techreport{dp2011-03, Abstract = {Second Life is a multi-purpose online virtual world that provides a rich platform for remote human interaction. It is increasingly being used as a simulation platform to model complex human interactions in diverse areas, as well as to simulate multi-agent systems. It would therefore be beneficial to provide techniques allowing high-level agent development tools, especially cognitive agent platforms such as belief-desire-intention (BDI) programming frameworks, to be interfaced to Second Life. This is not a trivial task as it involves mapping potentially unreliable sensor readings from complex Second Life simulations to a domain-specific abstract logical model of observed properties and/or events. This paper investigates this problem in the context of agent interactions in a multi-agent system simulated in Second Life. We present a framework which facilitates the connection of any multi-agent platform with Second Life, and demonstrate it in conjunction with an extension of the Jason BDI interpreter.}, Address = {Dunedin, New Zealand}, Author = {Surangika Ranathunga and Stephen Cranefield and Martin Purvis}, Date-Added = {2011-02-04 15:54:36 +1300}, Date-Modified = {2011-02-09 16:49:21 +1300}, Institution = {Department of Information Science, University of Otago}, Month = feb, Number = {2011/03}, Title = {Interfacing a cognitive agent platform with Second Life}, Type = {Discussion paper}, Year = {2011}} @techreport{dp2011-02, Abstract = {Although expectations play an important role in designing cognitive agents, agent expectations are not explicitly being handled in most common agent programming environments. There are techniques for monitoring fulfilment and violation of agent expectations, however they are not linked with common agent programming environments so that agents can be easily programmed to respond to these circumstances. This paper investigates how expectation monitoring tools can be tightly integrated with the Jason BDI agent interpreter by extending it with built-in actions to initiate and terminate monitoring of expectations, and demonstrates how an external expectation monitor is linked with Jason using these internal actions.}, Address = {Dunedin, New Zealand}, Author = {Surangika Ranathunga and Stephen Cranefield and Martin Purvis}, Date-Added = {2011-02-04 13:15:44 +1300}, Date-Modified = {2011-02-09 16:41:53 +1300}, Institution = {Department of Information Science, University of Otago}, Month = feb, Number = {2011/02}, Title = {Integrating expectation handling into Jason}, Type = {Discussion paper}, Year = {2011}} @techreport{dp2011-01, Abstract = {Container terminals play a critical role in international shipping and are under pressure to cope with increasing container traffic. The problem of managing container terminals effectively has a number of characteristics which make agents a suitable technology to consider applying. Container terminals involve the operation of distributed entities (e.g. quay cranes, straddle carriers) which coordinate to achieve competing goals in a dynamic environment. This paper describes a joint industry-university project which has explored the applicability of agent technology to the domain of container terminal management. We describe an emulation platform of a container terminal based on the JADE agent framework, along with two optimisations that have been developed and integrated with the emulator: allocating container moves to machines through negotiation, and allocating containers to yard locations through an evolutionary algorithm.}, Address = {Dunedin, New Zealand}, Author = {Michael Winikoff and Hanno-Felix Wagner and Thomas Young and Stephen Cranefield and Roger Jarquin and Guannan Li and Brent Martin and Rainer Unland}, Date-Added = {2011-02-04 13:15:06 +1300}, Date-Modified = {2011-02-04 13:15:06 +1300}, Institution = {Department of Information Science, University of Otago}, Keywords = {container terminal management, container terminal optimisation, logistics}, Month = jan, Number = {2011/01}, Title = {Agent-based container terminal optimisation}, Type = {Discussion paper}, Year = {2011}} @techreport{dp2010-07, Abstract = {In this article we review contemporary multi-agent system architectures and implementations. We particularly focus on asynchronous message passing mechanisms. Our motivation is to explore two main areas in the context of multi-agent systems: the concept of micro-agents and the asynchronous message passing architectures. In the article we take a close look at the emerging area of micro-agent-based systems and contrast them with selected representatives from the general field of agent architectures. We provide historical references and examples of contemporary implementations supporting the hierarchical micro-agent-based software engineering paradigm. In addition, we also investigate various implementation mechanisms for efficient asynchronous message passing between large numbers of small interacting software components with regards to their use in the context of multi-agent systems. The results show a trade-off between performance, fairness and usability as key problem when selecting an appropriate solution. Future investigations into alternative concurrency handling mechanisms for better support of micro-agent architectures are suggested.}, Address = {Dunedin, New Zealand}, Author = {Christopher Frantz and Mariusz Nowostawski and Martin Purvis}, Date-Added = {2011-01-21 13:20:28 +1300}, Date-Modified = {2011-01-21 13:20:28 +1300}, Institution = {Department of Information Science, University of Otago}, Keywords = {multi-agent systems, micro-agents, asynchronous, communication, message passing}, Month = nov, Number = {2010/07}, Title = {Multi-agent platforms and asynchronous message passing: {F}rameworks overview}, Type = {Discussion paper}, Year = {2010}} @techreport{dp2011-06, Abstract = {Over the last few years, the voluminous increase in the academic research publications has gained significant research attention. Research has been carried out exploring novel ways of providing information services using the research content. However, the task of extracting meaningful information from research documents remains a challenge. This paper presents our research work carried out for developing intelligent information systems, exploiting the research content. We present in this paper, a linked data application which uses a new semantic publishing model for providing value added information services for the research community. The paper presents a conceptual framework for modelling contexts associated with sentences in research articles and discusses the Sentence Context Ontology, which is used to convert the information extracted from research documents into machine-understandable data. The paper also reports on supervised learning experiments carried out using conditional probabilistic models for achieving automatic context identification.}, Address = {Dunedin, New Zealand}, Author = {M.A. Angrosh and Stephen Cranefield and Nigel Stanger}, Date-Added = {2010-11-17 14:50:10 +1300}, Date-Modified = {2011-07-25 16:47:40 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {semantic publishing models, sentence context ontology, linked data application, conditional random fields, maximum entropy markov models, citation classification, sentence context identification}, Month = jul, Number = {2011/06}, Title = {Contextual information retrieval in research articles: Semantic publishing tools for the research community}, Type = {Discussion paper}, Year = {2011}} @techreport{dp2010-06, Abstract = {Changes in population demographics and lifestyle choices have led to an increased risk of higher mortality from house fires. The current average of 27 house fire related deaths per year is likely to be exceeded in the following years. The aging population with its natural increase in age related hearing loss and the younger demographic only having mobile phones and no land-lines means there is a need for alternative warning methods of smoke alarm activation. This project develops a proof of concept application that runs on a smart phone and detects an activated smoke alarm. If there is no response by the occupants automatically trigger an alarm to a predefined contact group. This application can reduce the possibility of death or injury by persons unable to respond to an activated alarm.}, Address = {Dunedin, New Zealand}, Author = {Alan Woods and Mariusz Nowostawski}, Date-Added = {2010-11-12 12:38:29 +1300}, Date-Modified = {2010-12-03 09:43:34 +1300}, Institution = {Department of Information Science, University of Otago}, Month = nov, Number = {2010/06}, Title = {Smoke alarm detection, broadcast notifications and social implications}, Type = {Discussion paper}, Year = {2010}} @techreport{dp2010-05, Address = {Dunedin, New Zealand}, Author = {Alan Woods and Mariusz Nowostawski}, Date-Added = {2010-11-12 12:38:12 +1300}, Date-Modified = {2010-11-12 12:38:12 +1300}, Institution = {Department of Information Science, University of Otago}, Month = nov, Number = {2010/05}, Title = {Feasibility study of fall detection with the use of mobile smartphones}, Type = {Discussion paper}, Year = {2010}} @techreport{dp2010-04, Abstract = {Modeling of financial market data for detecting important market characteristics as well as their abnormalities plays a key role in identifying their behavior. Researchers have proposed different types of techniques to model market data. One such model proposed by Sergie Maslov, models the behavior of a limit order book. Being a very simple and interesting model, it has several drawbacks and limitations. This paper analyses the behavior of the Maslov model and proposes several variants of it to make the original Maslov model more realistic. The price signals generated from these models are analyzed by comparing with real life stock data and it was shown that the proposed variants of the Maslov model are more realistic than the original Maslov model.}, Address = {Dunedin, New Zealand}, Author = {Rasika M. Withanawasam and Peter A. Whigham and Timothy Crack and I. M. Premachandra}, Date-Added = {2010-11-12 12:37:26 +1300}, Date-Modified = {2010-11-12 12:37:26 +1300}, Institution = {Department of Information Science, University of Otago}, Month = feb, Number = {2010/04}, Size = {832 KB}, Title = {An empirical investigation of the {M}aslov limit order market model}, Type = {Discussion paper}, Year = {2010}} @techreport{dp2010-03, Abstract = {In normative multi-agent systems, the question of ``how an agent identifies a norm in an agent society'' has not received much attention. This paper aims at addressing this question. To this end, this paper proposes an architecture for norm identification for an agent. The architecture is based on observation of interactions between agents. This architecture enables an autonomous agent to identify the norms in a society using the Candidate Norm Inference (CNI) algorithm. The CNI algorithm uses association rule mining approach to identify sequences of events as candidate norms. When a norm changes, the agent using our architecture will be able to modify the norm and also remove a norm if it does not hold in its society. Using simulations we demonstrate how an agent makes use of the norm identification framework.}, Address = {Dunedin, New Zealand}, Author = {Bastin Tony Roy Savarimuthu and Stephen Cranefield and Maryam A. Purvis and Martin K. Purvis}, Date-Added = {2010-04-12 11:59:13 +1200}, Date-Modified = {2010-04-12 11:59:13 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {norms, agents, architecture, identification, simulation, societies}, Month = feb, Number = {2010/03}, Size = {562 KB}, Title = {Norm identification in multi-agent societies}, Type = {Discussion paper}, Year = {2010}} @techreport{dp2010-02, Abstract = {Novelty detection is an important functionality that has found many applications in information retrieval and processing. In this paper we propose a novel framework that deals with novelty detection for multiple-scene image sets. Working with wildlife image data, the framework starts with image segmentation, followed by feature extraction and classification of the image blocks extracted from image segments. The labelled image blocks are then scanned through to generate a co-occurrence matrix of object labels, representing the semantic context within the scene. The semantic co-occurrence matrices then undergo binarization and principal component analysis for dimension reduction, forming the basis for constructing one-class models for each scene category. An algorithm for outlier detection that employs multiple one-class models is proposed. An advantage of our approach is that it can be used for scene classification and novelty detection at the same time. Our experiments show that the proposed approach algorithm gives favourable performance for the task of detecting novel wildlife scenes, and binarization of the label co-occurrence matrices helps to significantly increase the robustness in dealing with the variation of scene statistics.}, Address = {Dunedin, New Zealand}, Author = {Suet-Peng Yong and Jeremiah D. Deng and Martin K. Purvis}, Date-Added = {2010-02-01 13:15:53 +1300}, Date-Modified = {2010-05-04 11:15:34 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {context, co-occurrence matrix, semantics, novel image, multi-class}, Month = jan, Number = {2010/02}, Size = {3.4 MB}, Title = {Modelling semantic context for novelty detection in wildlife scenes}, Type = {Discussion paper}, Year = {2010}} @techreport{dp2010-01, Abstract = {In this paper we consider the broader issue of gaining assurance that an agent system will behave appropriately when it is deployed. We ask to what extent this problem is addressed by existing research into formal verification. We identify a range of issues with existing work which leads us to conclude that, broadly speaking, verification approaches on their own are too narrowly focussed. We argue that a shift in direction is needed, and outline some possibilities for such a shift in direction.}, Address = {Dunedin, New Zealand}, Author = {Michael Winikoff}, Date-Added = {2010-02-01 13:03:08 +1300}, Date-Modified = {2010-02-01 13:03:08 +1300}, Institution = {Department of Information Science, University of Otago}, Month = jan, Number = {2010/01}, Size = {296 KB}, Title = {Assurance of agent systems: What role should formal verification play?}, Type = {Discussion paper}, Year = {2010}} @techreport{dp2009-01, Abstract = {In this paper we discuss a tag-based model that facilitates knowledge sharing in the context of agents playing the knowledge sharing game. Sharing the knowledge incurs a cost for the sharing agent, and thus non-sharing is the preferred option for selfish agents. Through agent-based simulations we show that knowledge sharing is possible even in the presence of non-sharing agents in the population. We also show that the performance of an agent society can be better when some agents bear the cost of sharing instead of the whole group sharing the cost.}, Address = {Dunedin, New Zealand}, Author = {Sharmila Savarimuthu and Maryam Purvis and Martin Purvis}, Date-Added = {2010-01-11 14:04:00 +1300}, Date-Modified = {2010-01-11 14:04:00 +1300}, Institution = {Department of Information Science, University of Otago}, Keywords = {cooperation, altruism, tags, knowledge sharing, multi-agent based simulation, artificial society}, Month = feb, Number = {2009/01}, Size = {424 KB}, Title = {Tag based model for knowledge sharing in agent society}, Type = {Discussion paper}, Year = {2009}} @techreport{dp2009-07, Abstract = {While wireless sensor networks (WSN) are increasingly equipped to handle more complex functions, in-network processing still require the battery powered sensors to judiciously use their constrained energy so as to prolong the effective network life time.There are a few protocols using sensor clusters to coordinate the energy consumption in a WSN. To cope with energy heterogeneity among sensor nodes, a modified clustering algorithm is proposed with a three-tier sensor node setting. Simulation has been conducted to evaluate the new clustering algorithm and favorable results are obtained especially in heterogeneous energy settings.}, Address = {Dunedin, New Zealand}, Author = {Femi A. Aderohunmu and Jeremiah D. Deng and Martin K. Purvis}, Date-Added = {2009-10-05 16:49:19 +1300}, Date-Modified = {2010-02-01 13:51:51 +1300}, Institution = {Department of Information Science, University of Otago}, Keywords = {wireless sensor network, heterogeneous settings, clustering}, Month = oct, Number = {2009/07}, Size = {881 KB}, Title = {Enhancing clustering in wireless sensor networks with energy heterogeneity}, Type = {Discussion paper}, Year = {2009}} @techreport{dp2009-06, Abstract = {This research focuses on the design and development of an IBIS-based tool called IBISMod, which facilitates a distributed and collaborative decision-making process. IBIS-based systems help analysts and designers in the process of formulating the requirements and design issues associated with complex problems that are difficult to specify. In particular, it captures the rationale behind group decision-making process. The group members are usually distributed over a network and may be working together concurrently. IBISMod is based on Rittel's Issue-Based Information System. This particular implementation is a web-based tool that makes it possible for the participants to work together on a specific problem while they may be physically present in different locations. In order to improve the interactivity, speed and usability of the framework, the AJAX approach has been adopted.}, Address = {Dunedin, New Zealand}, Author = {Toktam Ebadi and Maryam A. Purvis and Martin K. Purvis}, Date-Added = {2009-10-01 18:01:45 +1300}, Date-Modified = {2009-10-01 18:01:45 +1300}, Institution = {Department of Information Science, University of Otago}, Month = jun, Number = {2009/06}, Size = {500 KB}, Title = {A collaborative Web-based issue based information system (IBIS) framework}, Type = {Discussion paper}, Year = {2009}} @techreport{dp2009-05, Abstract = {Software development effort estimation is important for quality management in the software development industry, yet its automation still remains a challenging issue. Applying machine learning algorithms alone often can not achieve satisfactory results. In this paper, we present an integrated data mining framework that incorporates domain knowledge into a series of data analysis and modeling processes, including visualization, feature selection, and model validation. An empirical study on the software effort estimation problem using a benchmark dataset shows the effectiveness of the proposed approach.}, Address = {Dunedin, New Zealand}, Author = {Jeremiah D. Deng and Martin K. Purvis and Maryam A. Purvis}, Date-Added = {2009-09-17 15:46:48 +1200}, Date-Modified = {2009-09-17 15:46:48 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {software effort estimation, machine learning}, Month = jun, Number = {2009/05}, Size = {260 KB}, Title = {Software effort estimation: {H}armonizing algorithms and domain knowledge in an integrated data mining approach}, Type = {Discussion paper}, Year = {2009}} @techreport{dp2009-04, Abstract = {Sapstain is considered a defect that must be removed from processed wood. So far, research in automatic wood inspection systems has been mostly limited to dealing with knots. In this paper, we extract a number of colour and texture features from wood pictures. These features are then assessed using machine learning techniques via feature selection, visualization, and finally classification. Apart from average colour and colour opponents, texture features are also found to be useful in classifying sapstain. This implies a significant modification to the domain understanding that sapstain is mainly a discolourization effect. Preliminary results are presented, with satisfactory classification performance using only a few selected features. It is promising that a real world wood inspection system with the functionality of sapstain detection can be developed.}, Address = {Dunedin, New Zealand}, Author = {Jeremiah D. Deng}, Date-Added = {2009-06-08 14:57:36 +1200}, Date-Modified = {2009-06-09 16:58:39 +1200}, Institution = {Department of Information Science, University of Otago}, Month = apr, Number = {2009/04}, Size = {884 KB}, Title = {Automatic sapstain detection in processed timber through image feature analysis}, Type = {Discussion paper}, Year = {2009}} @techreport{dp2008-04, Abstract = {In a multi-agent system, a single agent may not be capable of completing complex tasks. Therefore agents are required to form a team to fulfill the task requirements. In this paper an agent model is introduced that facilitates cooperation among agents. A multi-threaded multi-agent simulation framework is designed to test the model. The experimental results demonstrate that the model is significantly useful in achieving cooperation under various environmental constraints. It also allows agents to adjust their teammate selection strategies according to environmental constraints.}, Address = {Dunedin, New Zealand}, Author = {Toktam Ebadi and Maryam Purvis and Martin Purvis}, Date-Added = {2009-06-08 13:59:38 +1200}, Date-Modified = {2009-06-09 16:58:39 +1200}, Institution = {Department of Information Science, University of Otago}, Month = oct, Number = {2008/04}, Size = {176 KB}, Title = {Partner selection mechanisms for agent cooperation}, Type = {Discussion paper}, Year = {2008}} @techreport{dp2008-03, Abstract = {Before deploying a software system we need to assure ourselves (and stake-holders) that the system will behave correctly. This assurance is usually done by testing the system. However, it is intuitively obvious that adaptive systems, including agent-based systems, can exhibit complex behaviour, and are thus harder to test. In this paper we examine this ``obvious intuition'' in the case of Belief-Desire-Intention (BDI) agents. We analyse the size of the behaviour space of BDI agents and show that although the intuition is correct, the factors that influence the size are not what we expected them to be; specifically, we found that the introduction of failure handling had a much larger effect on the size of the behaviour space than we expected. We also discuss the implications of these findings on the testability of BDI agents.}, Address = {Dunedin, New Zealand}, Author = {Michael Winikoff and Stephen Cranefield}, Date-Added = {2009-06-08 13:58:46 +1200}, Date-Modified = {2009-06-09 16:58:39 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {testing, complexity, validation, belief-desire-intention (BDI)}, Month = nov, Number = {2008/03}, Size = {472 KB}, Title = {On the testability of {BDI} agent systems}, Type = {Discussion paper}, Year = {2008}} @techreport{dp2008-01, Abstract = {This aim of this project was to the explore JAIN SLEE standard 1.1 programming model and SIMPLE (Sip for Instant Messaging and Presence Leveraging Extensions) protocols, developing a Voice over Internet Protocol (VoIP) application with functions that include making a phone call, instant messaging to peers, and at the same time providing users with buddy list information of their peers. The JAIN SLEE platform RhinoSDK 2.0 (developed by OpenCloud) was to be used and an example application that is included with RhinoSDK 2.0 was to be extended. During the project the phone call functionality was scoped out of the project and the focus was set on implementing the instant messaging and presence functionality. This report describes the functions that have been implemented on the server side and client side of this VoIP application.}, Address = {Dunedin, New Zealand}, Author = {Dee Milic and Dong Zhou and Hailing Situ}, Date-Added = {2009-06-08 13:57:39 +1200}, Date-Modified = {2009-06-10 15:56:39 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2008/01}, Size = {221 KB}, Title = {{VoIP} application development using {SIP} protocol}, Type = {Discussion paper}, Year = {2008}} @techreport{dp2008-02, Abstract = {Gartner has for some time been reporting the potential for virtual world technology to become the next wave of the Internet, delivering what is known as the Web3.D environment. This is characterised by a high level of user participation through immersion in the virtual world. Gartner has predicted that by 2011, 80% of internet users will be regular users of Web3.D technology. Project LifeLink was initiated to discover what opportunities for Telecom might exist in the growth of business and consumer interest in virtual worlds. This has focused on a number of technologies, in particular Second Life, OpenSimulator (OpenSIM) and JAIN SLEE. The project has been run by Telecom with coordination and support from MediaLab, and with researchers at Canterbury and Otago Universities. This report describes the work undertaken at Otago University to implement a gateway to enable demonstration of communications between an object in Second Life and the JAIN SLEE environment in order to interoperate with external network services.}, Address = {Dunedin, New Zealand}, Author = {Nathan Lewis and Hailing Situ and Melanie Middlemiss}, Date-Added = {2009-06-08 13:56:48 +1200}, Date-Modified = {2009-06-10 15:56:55 +1200}, Institution = {Department of Information Science, University of Otago}, Month = jul, Number = {2008/02}, Size = {1.3 MB}, Title = {Report of {O}tago contributions to {T}elecom {L}ife{L}ink {P}roject}, Type = {Discussion paper}, Year = {2008}} @techreport{dp2008-05, Abstract = {Most people hold a variety of opinions on different topics ranging from sports, entertainment, spiritual beliefs to moral principles. These can be based on a personal reflection and evaluation or on their interactions with others. How do we influence others in our social network and how do they influence us and how do we reach consensus? In this paper, we present our investigations based on the use of multiple opinions (a vector of opinions) that should be considered to determine consensus in a society. We have extended Deffuant model and have tested our model on top of two well-known network topologies the Barabasi-Albert network and the Erdos-Renyi network. We have implemented a two phase filtering process determining the consensus.}, Address = {Dunedin, New Zealand}, Author = {Alya Alaali and Maryam Purvis and Bastin Tony Roy Savarimuthu}, Date-Added = {2009-06-08 13:54:29 +1200}, Date-Modified = {2009-06-09 16:58:40 +1200}, Institution = {Department of Information Science, University of Otago}, Month = oct, Number = {2008/05}, Size = {696 KB}, Title = {Vector opinion dynamics: {A}n extended model for consensus in social networks}, Type = {Discussion paper}, Year = {2008}} @techreport{dp2009-08, Abstract = {Telecommunications technologies and Internet services are experiencing unprecedented growth. Technological advances together with the growing scale of deployments are driving rapid change in the telecommunications arena. All these factors contribute to the push towards convergence on the network core. Next generation networks, programmable networks, and the converged core opens up and provides new network architectures and new converged service opportunities. The Global Network Interconnectivity (GNI) Project was established at the University of Otago in 2006 to develop expertise, provide knowledge sharing and conduct activities supporting new ICT technologies that contribute to telecommunications, multimedia, and information systems convergence. The aim of the GNI Symposium was to bring together academic and industry leaders for one day to discuss current and future issues relating to convergence in the ICT and Telecommunications arena. This report provides a summary of the day's presentations and discussion sessions.}, Address = {Dunedin, New Zealand}, Annote = {Problem with the original file: PDFLaTeX doesn't like PDF 1.5! Constructed manually using Preview instead.}, Author = {Melanie Middlemiss}, Date-Added = {2009-06-07 21:50:57 +1200}, Date-Modified = {2009-10-05 16:56:53 +1300}, Institution = {Department of Information Science, University of Otago}, Month = oct, Number = {2009/08}, Size = {632 KB}, Title = {2009 Global Network Interconnectivity (GNI) Symposium}, Type = {Discussion paper}, Year = {2009}} @techreport{dp2009-03, Abstract = {The problem with the uptake of new technologies such as ZigBee is the lack of development environments that help in faster application software development. This paper describes a software framework for application development using ZigBee wireless protocol. The architecture is based on defining XML based design interfaces that represent the profiles of ZigBee nodes that are used in the application.}, Address = {Dunedin, New Zealand}, Author = {Bastin Tony Roy Savarimuthu and Morgan Bruce and Maryam Purvis}, Date-Added = {2009-06-07 21:49:30 +1200}, Date-Modified = {2009-06-10 15:57:21 +1200}, Institution = {Department of Information Science, University of Otago}, Month = apr, Number = {2009/03}, Size = {168 KB}, Title = {A software framework for application development using {Z}ig{B}ee protocol}, Type = {Discussion paper}, Year = {2009}} @techreport{dp2009-02, Abstract = {Online virtual worlds such as Second Life provide a rich medium for unstructured human interaction in a shared simulated 3D environment. However, many human interactions take place in a structured social context where participants play particular roles and are subject to expectations governing their behaviour, and current virtual worlds do not provide any support for this type of interaction. There is therefore an opportunity to adapt the tools developed in the MAS community for structured social interactions between software agents (inspired by human society) and adapt these for use with the computer-mediated human communication provided by virtual worlds. This paper describes the application of one such tool for use with Second Life. A model checker for online monitoring of social expectations defined in temporal logic has been integrated with Second Life, allowing users to be notified when their expectations of others have been fulfilled or violated. Avatar actions in the virtual world are detected by a script, encoded as propositions and sent to the model checker, along with the social expectation rules to be monitored. Notifications of expectation fulfilment and violation are returned to the script to be displayed to the user. This utility of this tool is reliant on the ability of the Linden scripting language (LSL) to detect events of significance in the application domain, and a discussion is presented on how a range of monitored structured social scenarios could be realised despite the limitations of LSL.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Guannan Li}, Date-Added = {2009-06-07 21:45:12 +1200}, Date-Modified = {2009-06-09 17:16:19 +1200}, Institution = {Department of Information Science, University of Otago}, Month = may, Number = {2009/02}, Size = {188 KB}, Title = {Monitoring social expectations in {S}econd {L}ife}, Type = {Discussion paper}, Year = {2009}} @techreport{dp2007-02, Abstract = {In this paper we propose a mechanism for norm emergence based on role models. The mechanism uses the concept of normative advice whereby the role models provide advice to the follower agents. Our mechanism is built using two layers of networks, the social link layer and the leadership layer. The social link network represents how agents are connected to each other. The leadership network represents the network that is formed based on the role played by each agent on the social link network. The two kinds of roles are leaders and followers. We present our findings on how norms emerge on the leadership network when the topology of the social link network changes. The three kinds of social link networks that we have experimented with are fully connected networks, random networks and scale-free networks.}, Address = {Dunedin, New Zealand}, Author = {Bastin Tony Roy Savarimuthu and Stephen Cranefield and Maryam Purvis and Martin Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:41 +1200}, Institution = {Department of Information Science, University of Otago}, Month = jul, Number = {2007/02}, Size = {488 KB}, Title = {Role model based mechanism for norm emergence in artificial agent societies}, Type = {Discussion paper}, Year = {2007}} @techreport{dp2000-14, Abstract = {Electronic medical consultation is available worldwide through access to the World Wide Web (WWW). This article outlines a research study on the adoption of electronic medical consultation as a means of health delivery. It focuses on the delivery of healthcare specifically for New Zealanders, by New Zealanders. It is acknowledged that the WWW is a global market place and it is therefore difficult to identify New Zealanders' use of such a global market, but we have attempted to provide a New Zealand perspective on electronic medical consultation.}, Address = {Dunedin, New Zealand}, Author = {Brebner, C. and Jones, R. and Krisjanous, J. and Marshall, W. and Parry, G. and A. Holt}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:41 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {electronic medical consultation, on-line health, New Zealand}, Month = oct, Number = {2000/14}, Size = {80 KB}, Title = {Electronic medical consultation: {A} {N}ew {Z}ealand perspective}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2003-01, Abstract = {The process for requirements elicitation has traditionally been based on textual descriptions or graphical models using UML. While these may have worked for the design of desktop-based systems, we argue, that these notations are not adequate for a dialog with mobile end users, in particular for end users in ``blue collar'' application domains. We propose an alternative modelling technique ``Software Cinema'' based on the use of digital videos. We discuss one particular example of using Software cinema in the design of a user interface for a navigation system of a mobile end user.}, Address = {Dunedin, New Zealand}, Author = {Bernd Bruegge and Martin Purvis and Oliver Creighton and Christian Sandor}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:41 +1200}, Institution = {Department of Information Science, University of Otago}, Month = apr, Number = {2003/01}, Size = {301 KB}, Title = {Software cinema}, Type = {Discussion paper}, Year = {2003}} @techreport{dp2006-02, Abstract = {The purpose of this study was to create a ubiquitous proximity activated interactive digital display system providing adjusted artworks as content for evaluating viewer reactions and opinions to determine if similar interactive ubiquitous systems are a beneficial, enjoyable and even an appropriate way to display art. Multimedia used in galleries predominately provides content following set patterns and disregards the viewer. Interactive displays using viewer location usually require the viewer's conscious participation through carrying some form of hardware or using expensive sensing equipment. We created an inexpensive, simple system that reacts to the user in a ubiquitous manner, allowing the evaluation of the usability and suitability of such systems in the context of viewing art. Results from testing show that interactive displays are generally enjoyed and wanted for displaying art, however even simple ubiquitous displays can cause user difficulty due to the transparency of their interaction.}, Address = {Dunedin, New Zealand}, Author = {Gary Burrows}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:41 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {interactive, digital displays, art, proximity, ubiquitous, gallery, intuitive interfaces}, Month = jan, Number = {2006/02}, Size = {496 KB}, Title = {Ubiquitous interactive art displays: {A}re they wanted, are they intuitive?}, Type = {Discussion paper}, Year = {2006}} @techreport{dp2001-02, Abstract = {Agent-oriented software engineering is a promising new approach to software engineering that uses the notion of an agent as the primary entity of abstraction. The development of methodologies for agent-oriented software engineering is an area that is currently receiving much attention, there have been several agent-oriented methodologies proposed recently and survey papers are starting to appear. However the authors feel that there is still much work necessary in this area; current methodologies can be improved upon. This paper presents a new methodology, the Styx Agent Methodology, which guides the development of collaborative agent systems from the analysis phase through to system implementation and maintenance. A distinguishing feature of Styx is that it covers a wider range of software development life-cycle activities than do other recently proposed agent-oriented methodologies. The key areas covered by this methodology are the specification of communication concepts, inter-agent communication and each agent's behaviour activation---but it does not address the development of application-specific parts of a system. It will be supported by a software tool which is currently in development.}, Address = {Dunedin, New Zealand}, Author = {Geoff Bush and Stephen Cranefield and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:51:16 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {agent-based software engineering methodologies for agent-oriented software development}, Month = jan, Number = {2001/02}, Size = {153 KB}, Title = {The {S}tyx agent methodology}, Type = {Discussion paper}, Year = {2001}} @techreport{dp2006-01, Abstract = {Health care has entered the electronic domain. This domain has improved data collection and storage abilities while allowing almost instantaneous access and results to data queries. Furthermore it allows direct communication between healthcare providers and health consumers. The development of privacy, confidentiality and security principles are necessary to protect consumers' interests against inappropriate access. The electronic health systems vendors have dominated the transition of media, claiming it will improve the quality and coherence of the care process. However, numerous studies show that the health consumer is the important stakeholder in this process, and their views are suggesting that the electronic medium is the way forward, but not just yet. With the international push towards Electronic Health Records (EHRs) by the Health and Human Services (United States of America), National Health Service (United Kingdom), Health Canada (Canada) and more recently the Ministry of Health (New Zealand), this paper presents the consumers' role with a focus on their perceptions on the security of EHRs. A description of a study, looking at the New Zealand health consumer, is given.}, Address = {Dunedin, New Zealand}, Author = {Prajesh Chhanabhai and Alec Holt and Inga Hunter}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:41 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {electronic health records, New Zealand health system, consumer, security}, Month = jan, Number = {2006/01}, Size = {291 KB}, Title = {Consumers, security and electronic health records}, Type = {Discussion paper}, Year = {2006}} @techreport{dp2000-19, Abstract = {In any data set there some of the data will be bad or noisy. This study identifies two types of noise and investigates the effect of each in the training data of backpropagation neural networks. It also compares the mean square error function with a more robust alternative advocated by Huber.}, Address = {Dunedin, New Zealand}, Author = {David Clark}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:51:03 +1200}, Institution = {Department of Information Science, University of Otago}, Month = dec, Number = {2000/19}, Size = {360 KB}, Title = {Comparing {H}uber's {M}-{E}stimator function with the mean square error in backpropagation networks when the training data is noisy}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2000-17, Abstract = {In a consensus ensemble all members must agree before they classify a data point. But even when they all agree some data is still misclassified. In this paper we look closely at consistently misclassified data to investigate whether some of it may be outliers or may have been mislabeled.}, Address = {Dunedin, New Zealand}, Author = {David Clark}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:42 +1200}, Institution = {Department of Information Science, University of Otago}, Month = nov, Number = {2000/17}, Size = {331 KB}, Title = {Using consensus ensembles to identify suspect data}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2001-04, Abstract = {This paper discusses technology to support the use of UML for representing ontologies and domain knowledge in the Semantic Web. Two mappings have been defined and implemented using XSLT to produce Java classes and an RDF schema from an ontology represented as a UML class diagram and encoded using XMI. A Java application can encode domain knowledge as an object diagram realised as a network of instances of the generated classes. Support is provided for marshalling and unmarshalling this object-oriented knowledge to and from an RDF/XML serialisation.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:51:43 +1200}, Institution = {Department of Information Science, University of Otago}, Month = feb, Number = {2001/04}, Size = {482 KB}, Title = {{UML} and the {S}emantic {W}eb}, Type = {Discussion paper}, Year = {2001}} @techreport{dp2004-03, Abstract = {The use of asynchronous communication is traditionally seen to be an important element of an agent's autonomy. This paper argues that groups of agents within a society need the ability to choose forms of communication with stronger guarantees for particular interactions, and in particular, focuses on the use of reliable group communication. An example electronic trading scenario---the game of Pit---is presented, and it is shown how a formal institution for a particular critical phase of Pit can be built on top of the semantics for totally ordered and virtually synchronous multicasting.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:42 +1200}, Institution = {Department of Information Science, University of Otago}, Month = dec, Number = {2004/03}, Size = {183 KB}, Title = {Reliable group communication and institutional action in a multi-agent trading scenario}, Type = {Discussion paper}, Year = {2004}} @techreport{dp2005-01, Abstract = {This paper proposes a rule language for defining social expectations based on a metric interval temporal logic with past and future modalities and a current time binding operator. An algorithm for run-time monitoring compliance of rules in this language based on formula progression is also presented.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:42 +1200}, Institution = {Department of Information Science, University of Otago}, Month = feb, Number = {2005/01}, Size = {188 KB}, Title = {A rule language for modelling and monitoring social expectations in multi-agent systems}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2001-07, Abstract = {Ontologies play an important role in defining the terminology that agents use in the exchange of knowledge-level messages. As object-oriented modelling, and the Unified Modeling Language (UML) in particular, have built up a huge following in the field of software engineering and are widely supported by robust commercial tools, the use of UML for ontology representation in agent systems would help to hasten the uptake of agent-based systems concepts into industry. This paper examines the potential for UML to be used for ontology modelling, compares it to traditional description logic formalisms and discusses some further possibilities for applying UML-based technologies to agent communication systems.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Stefan Haustein and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:51:53 +1200}, Institution = {Department of Information Science, University of Otago}, Month = may, Number = {2001/07}, Size = {143 KB}, Title = {{UML}-based ontology modelling for software agents}, Type = {Discussion paper}, Year = {2001}} @techreport{dp2005-12, Abstract = {This paper discusses the potential benefits to ontology engineering in making the toolset of the Object Management Group's model-driven architecture (MDA) applicable to ontology modelling, and describes the design of an MDA-based tool to convert ontologies expressed in any language having a metamodel defined used the OMG's MOF model to an equivalent representation in RDF but with the same metamodel. It is shown how this representation, compared to the XMI format, provides a higher level generic serialisation format for MDA models (especially ontologies) that is amenable to analysis and transformation using existing RDF tools. This helps to bridge the gap between the MDA and ontology engineering by providing a route for ontologies in various ontology modelling languages to be imported into industrial-strength MDA model repositories and other tools, and by allowing these ontologies to be transformed to and from other forms of model.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Jin Pan}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:55:06 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {Model-driven Architecture (MDA), ontologies, MOF, JMI, RDF, Jena, NetBeans MDR, ODM}, Month = dec, Number = {2005/12}, Size = {416 KB}, Title = {Bridging the gap between the {M}odel-{D}riven {A}rchitecture and ontology engineering}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2000-07, Abstract = {It is canonical practice in agent-based systems to use a declarative format for the exchange of information. The increasing usage and facility of object-oriented tools and techniques, however, suggests there may be benefits in combining the use of object-oriented modelling approaches with agent-based messaging. In this paper we outline our efforts in connection with the New Zealand Distributed Information Systems project to use object-oriented knowledge representation in an agent-based architecture. Issues and tradeoffs are discussed, as well as the possible extensions to current agent-based message protocols that may be necessary in order to support object-oriented information exchange.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:49:52 +1200}, Institution = {Department of Information Science, University of Otago}, Month = apr, Number = {2000/07}, Size = {85 KB}, Title = {Extending agent messaging to enable {OO} information exchange}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2000-02, Abstract = {An approach is presented for incorporating metatata constraints into queries to be processed by a distributed environmental information system. The approach, based on a novel metamodel unifying concepts from the Unified Modelling Language (UML), the Object Query Language (OQL), and the Resource Description Framework (RDF), allows metadata information to be represented and processed in combination with regular data queries.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:43 +1200}, Institution = {Department of Information Science, University of Otago}, Month = feb, Number = {2000/02}, Size = {134 KB}, Title = {Integrating environmental information: {I}ncorporating metadata in a distributed information systems architecture}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2001-08, Abstract = {This paper examines a recent trend amongst software agent application and platform developers to desire the ability to send domain-specific objects within inter-agent messages. If this feature is to be supported without departing from the notion that agents communicate in terms of knowledge, it is important that the meaning of such objects be well understood. Using an object-oriented metamodelling approach, the relationships between ontologies and agent communication and content languages in FIPA-style agent systems are examined. It is shown how object structures in messages can be considered as expressions in ontology-specific extensions of standard content languages. It is also argued that ontologies must distingish between objects with and objects without identity.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2010-10-20 14:59:05 +1300}, Institution = {Department of Information Science, University of Otago}, Month = may, Number = {2001/08}, Size = {171 KB}, Title = {Generating ontology-specific content languages}, Type = {Discussion paper}, Year = {2001}} @techreport{dp2000-08, Abstract = {This paper describes a system of interlinked ontologies to describe the concepts underlying FIPA agent communication. A meta-modelling approach is used to relate object-oriented domain ontologies and abstract models of agent communication and content languages and to describe them in a single framework. The modelling language used is the Unified Modeling Language, which is extended by adding the concepts of resource and reference. The resulting framework provides an elegant basis for the development of agent systems that combine object-oriented information representation with agent messaging protocols.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Martin K. Purvis and Mariusz Nowostawski}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:50:01 +1200}, Institution = {Department of Information Science, University of Otago}, Month = apr, Number = {2000/08}, Size = {100 KB}, Title = {Is it an ontology or an abstract syntax? {M}odelling objects, knowledge and agent messages}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2001-03, Abstract = {This paper proposes the use of the Unified Modelling Language (UML) as a formalism for defining an abstract syntax for Agent Communication Languages (ACLs) and their associated content languages. It describes an approach supporting an automatic mapping from high-level abstract specifications of language structures to specific computer language bindings that can be directly used by an agent platform. Some advantages of this approach are that it provides a framework for specifying and experimenting with alternative agent communication languages and reduces the error-prone manual process of generating compatible bindings and grammars for different syntaxes. A prototype implementation supporting an automatic conversion from an abstract communication language expressed in UML to a native Java API and a Resource Description Framework (RDF) serialisation format is described.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Martin K. Purvis and Mariusz Nowostawski}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:51:28 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {agent communication languages, abstract syntax, UML, XMI, Java binding, marshalling, RDF}, Month = feb, Number = {2001/03}, Size = {488 KB}, Title = {Implementing agent communication languages directly from {UML} specifications}, Type = {Discussion paper}, Year = {2001}} @techreport{dp2007-08, Abstract = {One approach to moderating the behaviour of agents in open societies is the use of explicit languages for defining norms, conditional commitments and/or social expectations, together with infrastructure supporting conformance checking and the identification and possible punishment of anti-social agents. This paper presents a logical account of the creation, fulfilment and violation of social expectations modelled as conditional rules over a hybrid propositional temporal logic. The semantics are designed to allow model checking over finite histories to be used to check for fulfilment and violation of expectations in both online and offline modes. For online checking, expectations are always considered at the last state in the history, but in the offline mode expectations in previous states are also checked. At each past state, the then active expectations must be checked for fulfilment without recourse to information from later states: the truth of a future-oriented temporal proposition at state s over the full history does not imply the fulfilment at s of an expectation with content . This issue is addressed by defining fulfilment and violation in terms of an extension of Eisner et al.'s weak/strong semantics for LTL over truncated paths. The update of expectations from one state to the next is based on formula progression and the approach has been implemented by extending the MCLITE and MCFULL algorithms of the Hybrid Logic Model Checker.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Michael Winikoff}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:43 +1200}, Institution = {Department of Information Science, University of Otago}, Month = dec, Number = {2007/08}, Size = {240 KB}, Title = {Verifying social expectations by model checking truncated paths}, Type = {Discussion paper}, Year = {2007}} @techreport{dp2005-11, Abstract = {Progresses made on content-based image retrieval has reactivated the research on image analysis and similarity-based approaches have been investigated to assess the similarity between images. In this paper, the content-based approach is extended towards the problem of image collection summarization and comparison. For these purposes we propose to carry out clustering analysis on visual features using self-organizing maps, and then evaluate their similarity using a few dissimilarity measures implemented on the feature maps. The effectiveness of these dissimilarity measures is then examined with an empirical study.}, Address = {Dunedin, New Zealand}, Author = {Da Deng}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:43 +1200}, Institution = {Department of Information Science, University of Otago}, Month = dec, Number = {2005/11}, Size = {1.3 MB}, Title = {Content-based image collection summarization and comparison using self-organizing maps}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2000-16, Abstract = {Although widely studied for many years, colour image quantisation remains a challenging problem. We propose to use an evolving self-organising map model for the on-line image quantisation tasks. Encouraging results are obtained in experiments and we look forward to implementing the algorithm in real world applications with further improvement.}, Address = {Dunedin, New Zealand}, Author = {Da Deng and Nikola Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:43 +1200}, Institution = {Department of Information Science, University of Otago}, Month = nov, Number = {2000/16}, Size = {2.2 MB}, Title = {Evolving localised learning for on-line colour image quantisation}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2000-06, Abstract = {In real world information systems, data analysis and processing are usually needed to be done in an on-line, self-adaptive way. In this respect, neural algorithms of incremental learning and constructive network models are of increased interest. In this paper we present a new algorithm of evolving self-organizing map (ESOM), which features fast one-pass learning, dynamic network structure, and good visualisation ability. Simulations have been carried out on some benchmark data sets for classification and prediction tasks, as well as on some macroeconomic data for data analysis. Compared with other methods, ESOM achieved better classification with much shorter learning time. Its performance for time series modelling is also comparable, requiring more hidden units but with only one-pass learning. Our results demonstrate that ESOM is an effective computational model for on-line learning, data analysis and modelling.}, Address = {Dunedin, New Zealand}, Author = {Da Deng and Nikola Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:44 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2000/06}, Size = {233 KB}, Title = {Evolving self-organizing maps for on-line learning, data analysis and modelling}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2007-04, Abstract = {In tackling data mining and pattern recognition tasks, finding a compact but effective set of features has often been found to be a crucial step in the overall problem-solving process. In this paper we present an empirical study on feature analysis for classical instrument recognition, using machine learning techniques to select and evaluate features extracted from a number of different feature schemes. It is revealed that there is significant redundancy between and within feature schemes commonly used in practice. Our results suggest that further feature analysis research is necessary in order to optimize feature selection and achieve better results for the instrument recognition problem.}, Address = {Dunedin, New Zealand}, Author = {Da Deng and Christian Simmermacher and Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:44 +1200}, Institution = {Department of Information Science, University of Otago}, Month = aug, Number = {2007/04}, Size = {204 KB}, Title = {A study on feature analysis for musical instrument classification}, Type = {Discussion paper}, Year = {2007}} @techreport{dp2006-09, Abstract = {Along with the progress of the content-based image retrieval research and the development of the MPEG-7 XM feature descriptors, there has been an increasing research interest on object recognition and semantics extraction from images and videos. In this paper, we revisit an old problem of indoor versus outdoor scene classification. By introducing a precision-boosted combination scheme of multiple classifiers trained on several global and regional feature descriptors, our experiment has led to better results compared with conventional approaches.}, Address = {Dunedin, New Zealand}, Author = {Xianglin Deng and Jianhua Zhang}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:44 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {scene classification, classifier combination}, Month = may, Number = {2006/09}, Size = {843 KB}, Title = {Combining multiple precision-boosted classifiers for indoor-outdoor scene classification}, Type = {Discussion paper}, Year = {2006}} @techreport{dp2007-03, Abstract = {This paper proposes to design a mechanism that will allow M{\=a}ori users to specify their privacy preferences related to their culture when a software system asks for culturally sensitive information. We first identify various concepts associated with sensitive aspects of M{\=a}ori culture, such as tapu. We propose to build an ontology that describes these concepts and the relations between them in a formal way. This ontology will help service providers integrate M{\=a}ori cultural protocols in order to make M{\=a}ori users more confident about the use of the sensitive information related to their culture.}, Address = {Dunedin, New Zealand}, Author = {Xianglin Deng and Noria Foukia and Bastin Tony Roy Savarimuthu}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:44 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {privacy, M{\=a}ori culturally sensitive information}, Month = jul, Number = {2007/03}, Size = {308 KB}, Title = {Building privacy infrastructure for culturally sensitive information of {N}ew {Z}ealand {M}{\=a}ori}, Type = {Discussion paper}, Year = {2007}} @techreport{dp2002-08, Abstract = {(No abstract.)}, Address = {Dunedin, New Zealand}, Author = {Grant Dick and Peter Whigham}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:44 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {evolutionary computation, selection, spatial patterns}, Month = nov, Number = {2002/08}, Size = {255 KB}, Title = {Population density and spatially constrained selection in evolutionary computation}, Type = {Discussion paper}, Year = {2002}} @techreport{dp2006-06, Abstract = {This paper presents an implementation of the first stage of a Virtual Organization (VO) life cycle, which is the VO's creation. This implementation is based on previous work by one of the authors describing a framework which facilitates the establishment of VO agreements. In accordance with the framework, the implementation makes the VO's creation fully automated, thereby reducing its duration considerably. This is beneficial for the VO, which should only exist for the limited period needed to satisfy its goal. The VO is implemented as a Multi-Agent System (MAS), where autonomous agents negotiate the agreement leading the the VO's establishment. The Opal FIPA-compliant MAS platform was used to implement the VO agents. Different scenarios and evaluations provide a clear demonstration of the implementation, showing how agents dynamically negotiate the establishment of the agreement and how opportunistic agents' behavior affect the trust level during the negotiation process.}, Address = {Dunedin, New Zealand}, Author = {Noria Foukia and Pierre-Etienne Mallet}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:55:31 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {virtual organization, trust, autonomy, agent}, Month = mar, Number = {2006/06}, Size = {336 KB}, Title = {Establishing dynamic trust in virtual organization by means of {MAS}}, Type = {Discussion paper}, Year = {2006}} @techreport{dp2002-05, Abstract = {The technology sector of healthcare is entering a new evolutionary phase. The medical community has an obligation to the public to provide the safest, most effective healthcare possible. This is more achievable with the use of computer technology at the point of care, and small, portable devices could fulfil this role. A Modern Physician/PricewaterhouseCoopers 2001 survey on information technology in Physician practices found that 60% of respondents say that physicians in their organisation use PDAs, compare this with 26% in the 2000 technology survey. This trend is expected to continue to the point where these devices will have their position on a physician's desk next to their stethoscope. Once this electronic evolution occurs, the practice of medicine will change. Doctors will be able to practice medicine with greater ease and safety. In our opinion, the new generation of PDA mobile devices will be the tools to enable a transformation of healthcare to a paperless, wireless world. This article focuses on uses for PDAs in health care. Healthcare software is categorised into the following groups; reference/text book, calculators, patient management/logbook and personal clinical/study notebook. With a focus on the healthcare audience (the user), which can be registrar, consultant, nurse, student, teacher, patient, medical director and surgical.}, Address = {Dunedin, New Zealand}, Author = {Wayne Gillingham and Alec Holt and John Gillies}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:45 +1200}, Institution = {Department of Information Science, University of Otago}, Month = jul, Number = {2002/05}, Size = {864 KB}, Title = {Hand-held computers in health care: {W}hat software programs are available?}, Type = {Discussion paper}, Year = {2002}} @techreport{dp2004-02, Abstract = {Touch screens are a popular method of interaction with information systems embedded in public kiosks. Typical information systems are used on desktop PCs and therefore restricted to having a mouse as the selection device used to interact with the system. The purpose of this paper is to investigate how effective a touch screen overlay is in selecting typical graphical user interface (GUI) items used in information systems. A series of tests were completed involving multi-directional point and select tasks. A mouse, being the standard selection device, was also tested so the results of the touch screen could be compared. The GUI items tested were a button, check box, combo box and a text box. The results showed that the touch screen overlay was not suitable in terms of selecting small targets with a size of 4mm or less. The touch screen overlay was slower and had higher error rate compared to the mouse. There was no significant difference in throughput between a touch screen overlay and mouse. The mouse was rated easier to use and easier to make accurate selections with. The touch screen had higher arm, wrist and finger fatigue. This indicates that a touch screen overlay used only with a finger is not a practical selection device to use with interfaces containing small targets.}, Address = {Dunedin, New Zealand}, Author = {Matthew Gleeson and Nigel Stanger and Elaine Ferguson}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:54:04 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {touch screen overlay, mouse, pointing devices, Fitts' Law, performance evaluation, GUI items}, Month = dec, Number = {2004/02}, Size = {610 KB}, Title = {Design strategies for {GUI} items with touch screen based information systems: {A}ssessing the ability of a touch screen overlay as a selection device}, Type = {Discussion paper}, Year = {2004}} @techreport{dp2002-07, Abstract = {The St John's Ambulance Service, Southern Region Control Centre (the control centre) is located in Dunedin City and controls 56 ambulances based in 26 regional stations. The Southern Region covers an area of approximately 54,000 square kilometres, which has a usually resident population of 272,541 (Census, 2001). This report focuses on the dispatch and workload profile of the control centre between the 1st January 1997 and the 31st December 2001. During this period the control centre dispatched ambulances on approximately 135,822 occasions to a total of 118,759 incidents (this includes both emergency incidents and patient transfers). Based on an analysis of these incidents several key findings are discussed in this report. These include: * A 21.8% increase in the total number of incidents handled in the control centre between 1997 and 2001 * A 44 second increase in average activation times between 1997 and 2001 * A strong correlation between increased workload and increased activation times * A large increase in activation times during low and medium workload periods}, Address = {Dunedin, New Zealand}, Author = {Jared Hayes}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:52:34 +1200}, Institution = {Department of Information Science, University of Otago}, Month = nov, Number = {2002/07}, Size = {180 KB}, Title = {St {J}ohn's {A}mbulance {S}ervice, {S}outhern {R}egion: {C}ontrol centre dispatch profile (1997--2001)}, Type = {Discussion paper}, Year = {2002}} @techreport{dp2000-01, Abstract = {This article outlines similarity applied to the general environment and geographical information domains. The hypothesis is if physical and social sciences manifest similar amenities, then similarity would be a generative technique to analyse the cached information inherent in the data retrieved. Similarity is examined concerning the spatial grouping of natural kinds in a complex environment.}, Address = {Dunedin, New Zealand}, Author = {Alec Holt}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:45 +1200}, Institution = {Department of Information Science, University of Otago}, Month = feb, Number = {2000/01}, Size = {206 KB}, Title = {Investigating complexities through computational techniques}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2001-09, Abstract = {This paper proposes a novel View-based Consistency model for Distributed Shared Memory. A view is a set of ordinary data objects that a processor has the right to access in a data-race-free program. The View-based Consistency model only requires that the data objects of a view are updated before a processor accesses them. Compared with other memory consistency models, the View-based Consistency model can achieve data selection without user annotation and can reduce much false-sharing effect. This model has been implemented based on TreadMarks. Performance results have shown that for all our applications the View-based Consistency model outperforms the Lazy Release Consistency model.}, Address = {Dunedin, New Zealand}, Author = {Zhiyi Huang and Chengzheng Sun and Martin K. Purvis and Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:45 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {distributed shared memory, sequential consistency, false sharing}, Month = may, Number = {2001/09}, Size = {139 KB}, Title = {View-based consistency and its implementation}, Type = {Discussion paper}, Year = {2001}} @techreport{dp2001-01, Abstract = {Fuzzy neural networks are connectionist systems that facilitate learning from data, reasoning over fuzzy rules, rule insertion, rule extraction, and rule adaptation. The concept evolving fuzzy neural networks (EFuNNs), with respective algorithms for learning, aggregation, rule insertion, rule extraction, is further developed here and applied for on-line knowledge discovery on both prediction and classification tasks. EFuNNs operate in an on-line mode and learn incrementally through locally tuned elements. They grow as data arrive, and regularly shrink through pruning of nodes, or through node aggregation. The aggregation procedure is functionally equivalent to knowledge abstraction. The features of EFuNNs are illustrated on two real-world application problems---one from macroeconomics and another from Bioinformatics. EFuNNs are suitable for fast learning of on-line incoming data (e.g., financial and economic time series, biological process control), adaptive learning of speech and video data, incremental learning and knowledge discovery from growing databases (e.g. in Bioinformatics), on-line tracing of processes over time, life-long learning. The paper includes also a short review of the most common types of rules used in the knowledge-based neural networks for knowledge discovery and data mining.}, Address = {Dunedin, New Zealand}, Author = {Nikola Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:45 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {fuzzy rules evolving fuzzy neural networks on-line learning macroeconomics bioinformatics}, Month = jan, Number = {2001/01}, Size = {707 KB}, Title = {Evolving fuzzy neural networks for on-line knowledge discovery}, Type = {Discussion paper}, Year = {2001}} @techreport{dp2000-15, Abstract = {The paper applies novel techniques for on-line, adaptive learning of macroeconomic data and a consecutive analysis and prediction. The evolving connectionist system paradigm (ECOS) is used in its two versions---unsupervised (evolving self-organised maps), and supervised (evolving fuzzy neural networks---EFuNN). In addition to these techniques self-organised maps (SOM) are also employed for finding clusters of countries based on their macroeconomic parameters. EFuNNs allow for modelling, clustering, prediction and rule extraction. The rules that describe future annual values for the consumer price index (CPI), interest rate, unemployment and GDP per capita are extracted from data and reported in the paper for both global---EU-Asia block of countries, and for smaller groups---EU, EU-candidate countries, Asia-Pacific countries. The analysis and prediction models proof to be useful tools for the analysis of trends in macroeconomic development of clusters of countries and their future prediction.}, Address = {Dunedin, New Zealand}, Author = {Nikola Kasabov and H. Akpinar and L. Rizzi and Da Deng}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:50:41 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {neural networks, fuzzy rules, evolving connectionist systems, macroeconomic clusters}, Month = oct, Number = {2000/15}, Title = {Analysis of the macroeconomic development of {E}uropean and {A}sia-{P}acific countries with the use of connectionist models}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2000-18, Abstract = {Elsewhere Kennedy describes three elementary structures to be found in entity-relationship diagrams. Here, each of these structures is considered in the context of a transaction processing system and a specific set of components that can be associated with the structure is described. Next, an example is given illustrating the use of elementary structures as an analytical tool for data modelling and a diagnostic tool for the identification of errors in the resulting data model. It is conjectured that the amount of effort associated with each structure can be measured. A new approach for the estimation of the total effort required to develop a system, based on a count of the elementary structures present in the entity-relationship diagram, is then proposed. The approach is appealing because it can be automated and because it can be applied earlier in the development cycle than other estimation methods currently in use. The question of a suitable counting strategy remains open.}, Address = {Dunedin, New Zealand}, Author = {Geoffrey Kennedy}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:46 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {data modelling, design tools and techniques, entity-relationship model, software metrics}, Month = dec, Number = {2000/18}, Size = {112 KB}, Title = {Elementary structures in entity-relationship diagrams as a diagnostic tool in data modelling and a basis for effort estimation}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2000-05, Abstract = {M{\=a}ori speech data collection and analysis is an ongoing process, as new and existing data sets are continuously accessed for many different experimental speech perception and generation processing tasks. A data management system is an important tool to facilitate the systematic techniques applied to the speech and language data. Identification of the core components for M{\=a}ori speech and language databases, translation systems, speech recognition and speech synthesis have been undertaken as research themes. The latter component will be the main area of discussion here. So to hasten the development of M{\=a}ori speech synthesis, joint collaborative research with established international projects has begun. This will allow the M{\=a}ori language to be presented to the wider scientific community well in advance of other similar languages, many times it's own size and distribution. Propagation of the M{\=a}ori language via the information communication technology (ICT) medium is advantageous to it's long term survival.}, Address = {Dunedin, New Zealand}, Author = {Mark Laws}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:46 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2000/05}, Size = {243 KB}, Title = {Development of a {M}{\=a}ori database for speech perception and generation}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2000-04, Abstract = {The English and M{\=a}ori word translator ng{\=a} aho whakam{\=a}ori-{\=a}-tuhi was designed to provide single head-word translations to on-line web users. There are over 13,000 words all based on traditional text sources, derived because of their high frequency used within each of the respective languages. The translator has been operational for well over a year now, and it has had the highest web traffic usage in the Department of Information Science. Two log files were generated to record domain hits and language translations, both provided the up-to-date data for analysis contained in this paper.}, Address = {Dunedin, New Zealand}, Author = {Mark Laws and Richard Kilgour and Michael Watts}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:46 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2000/04}, Size = {202 KB}, Title = {Analysis of the {N}ew {Z}ealand and {M}{\=a}ori on-line translator}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2007-07, Abstract = {This paper presents a new approach of dynamic symmetric key distribution for encrypting the communication between two nodes in a Wireless Sensor Network (WSN). The distribution of a shared key can be performed by any sensor node and does not always require that it is performed by the base station (BS). Each node can be selected by one of its neighbor nodes in order to distribute a pair-wise key for a communication between two nodes. The selection is based on the local computation of a trust value granted by the requesting nodes. This scheme considerably reduces the cost of communication between the BS and the nodes when setting up pair-wise keys between neighboring nodes. This paper also describes a dynamic route selection mechanisms based on trust and cost, that each node performs to route data to neighbor nodes and to the BS.}, Address = {Dunedin, New Zealand}, Author = {Nathan Lewis and Noria Foukia and Donovan G. Govan}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:46 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {key distribution, trust, wireless sensor network, route selection,}, Month = sep, Number = {2007/07}, Size = {448 KB}, Title = {Using trust for key distribution and route selection in wireless sensor networks}, Type = {Discussion paper}, Year = {2007}} @techreport{dp2005-07, Abstract = {The immune system is a complex and distributed system. It provides a multilevel form of defence, capable of identifying and reacting to harmful pathogens that it does not recognise as being part of its ``self''. The framework proposed in this paper incorporates a number of immunological principles, including the multilevel defence and the cooperation between cells in the adaptive immune system. It is proposed that this approach could be used to provide a high level of intrusion detection, while minimising the level of false negative detections.}, Address = {Dunedin, New Zealand}, Author = {Melanie Middlemiss}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:46 +1200}, Institution = {Department of Information Science, University of Otago}, Month = jul, Number = {2005/07}, Size = {264 KB}, Title = {Framework for intrusion detection inspired by the immune system}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2006-03, Abstract = {The immune system is a complex and distributed system. It provides a multilayered form of defence, capable of identifying and responding to harmful pathogens that it does not recognise as ``self''. The framework proposed in this paper incorporates a number of immunological concepts and principles, including the multilayered defence and the cooperation between cells in the adaptive immune system. An alternative model of positive selection is also presented. It is suggested that the framework discussed here could lead to reduced false positive responses in anomaly detection tasks, such as intrusion detection, as well being extended to a population of computational immune systems that are able to maintain population diversity of recognition and response.}, Address = {Dunedin, New Zealand}, Author = {Melanie Middlemiss}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:47 +1200}, Institution = {Department of Information Science, University of Otago}, Month = jan, Number = {2006/03}, Size = {444 KB}, Title = {Positive and negative selection in a multilayer artificial immune system}, Type = {Discussion paper}, Year = {2006}} @techreport{dp2002-03, Abstract = {The use of computer software as an aid to rugby, and sports coaching in general, is becoming increasingly utilized. Videoed sport is the most widely used form of raw data for sports analysis, though it is currently not being used to its full potential. Patterns of player movement and position, both for individuals and groupings of players, are important for understanding the complexities of professional team sports, and yet are not being adequately addressed. This paper outlines a project that aims to support coaching and/or commentary by visualizing and measuring the similarity of video-derived spatiotemporal information, and enabling timely access to relevant video clips. Specifically, methods by which a user of spatially-enabled sports software can visualize spatio-temporal and rugby object information will be discussed. Two issues are examined: (1) powerful spatio-temporal representation techniques for rugby constructs (such as the pitch, players and amalgamations of players: team, scrum, lineout, backline) and (2) user interface design and how it enables rugby object representation alongside the spatio-temporal visualization facility.}, Address = {Dunedin, New Zealand}, Author = {Antoni Moore and Peter Whigham and Colin Aldridge and Alec Holt and Ken Hodge}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:47 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {spatial, temporal, video, representation, object, rugby}, Month = jun, Number = {2002/03}, Size = {2.3 MB}, Title = {Spatio-temporal and object visualization in rugby union}, Type = {Discussion paper}, Year = {2002}} @techreport{dp2003-05, Abstract = {It is a standard aim to complete tasks efficiently and effectively. When technology is involved, the tools must be designed to facilitate optimal performance. The ActualDepth{\texttrademark} Multi-Layer Display (MLD{\texttrademark}) is a `new generation' display, consisting of two layered Liquid Crystal Displays (LCDs), with a region of space between them. The top LCD displays transparently, allowing both layers to be viewed simultaneously. This paper describes an experiment that investigated relative reading speeds, error detection, comprehension speeds and comprehension accuracy on the MLD{\texttrademark}, including a comparison with standard single layered displays. A framework pertaining to colour and transparency usage on the MLD{\texttrademark} was then developed, which is intended to enhance the usability and effectiveness of the display. In general, it was found that overall readability was improved on the MLD{\texttrademark}, compared to a standard display, and different transparency levels and colours should be employed, depending on the purpose of reading the text.}, Address = {Dunedin, New Zealand}, Author = {Anna Nees and Rochelle Villanueva and William Wong}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:53:50 +1200}, Institution = {Department of Information Science, University of Otago}, Month = nov, Number = {2003/05}, Size = {249 KB}, Title = {Colour and transparency on the {M}ulti-{L}ayer {D}isplay ({MLD}{\texttrademark})}, Type = {Discussion paper}, Year = {2003}} @techreport{dp2001-10, Abstract = {The purpose of this document is to describe the key technology issues for distributed information access in New Zealand. It is written from an industrial and public sector perspective, representing the views and findings of a wide cross-section of institutions in public and private sectors. It is an output of Objective 2 of the Distributed Information Systems project funded under contract UO0621 by the New Zealand Foundation for Research, Science and Technology (FRST). It complements other project material produced by the academic research team at the University of Otago and its collaborators. It focuses on requirements and applications, and is intended to provide a real-world, New Zealand-oriented context for the research in distributed information technologies (DIST). The report represents the culmination of a series of workshops, industrial consultations, a questionnaire, and the experiences of the authors' institutions during the project, and therefore it supplements any previously produced material.}, Address = {Dunedin, New Zealand}, Author = {Howard Nicholls and Robert Gibb}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:47 +1200}, Institution = {Department of Information Science, University of Otago}, Month = sep, Number = {2001/10}, Size = {1.3 MB}, Title = {Distributed information access in {N}ew {Z}ealand}, Type = {Discussion paper}, Year = {2001}} @techreport{dp2006-08, Abstract = {The notion that all (or in weaker sense, some) natural phenomena can be modelled as a computable process, some kind of algorithm is recently gaining scientific recognition, and more research is dedicated to the rigorous explorations of the mapping between natural phenomena and the formalised computational systems. There is some debate and controversy as to how much of the natural can be expressed in the models of the artificial, although due to formalised nature of mathematics and physics itself, it is generally accepted that computation is viable way to model physical reality. Contemporary developments in computer science and in physics not only do no refute computationalism -- they provide more data and evidence in support of the basic theses. In this article we discuss some of the aspects of contemporary computationalist efforts based on the traditional notions of Turning Machine computation. Then we present an extended notion of computation, that goes beyond the traditional Turing limit. We propose a new interactive computation model called Evolvable Virtual Machines (EVMs). The EVM model uses the notion of many independently asynchronously executing processes, that communicate between each other and with the outside environment. We present some of the pitfalls of traditional computationalism, and compare it to our new, extended computationalist model, based on the notion of massively concurrent interactive computation (hypercomputation). We argue, that hypercomputationalism based on the collection of asynchronously concurrently communicating computational machines is a more compact and more appropriate way of representing natural phenomena (or the Universe in general). It is theoretically sound, and does not violate any of the current state-of-the-art physical theories. We discuss the details of our computational architecture, and present some of the implications of the hypercomputationalism on contemporary physical, life sciences, and computer science.}, Address = {Dunedin, New Zealand}, Author = {Mariusz Nowostawski}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:55:48 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2006/08}, Size = {477 KB}, Title = {The {EVM}'s universe and the {U}niverse}, Type = {Discussion paper}, Year = {2006}} @techreport{dp2000-13, Abstract = {The use of modelling abstractions to map from items in the real-world to objects in the computational domain is useful both for the effective implementation of abstract problem solutions and for the management of software complexity. This paper discusses the new approach of agent-oriented software engineering (AOSE), which uses the notion of an autonomous agent as its fundamental modelling abstraction. For the AOSE approach to be fully exploited, software engineers must be able to gain leverage from an agent software architecture and framework, and there are several such frameworks now publicly available. At the present time, however, there is little information concerning the options that are available and what needs to be considered when choosing or developing an agent framework. We consider three different agent software architectures that are (or will be) publicly available and evaluate some of the design and architectural differences and trade-offs that are associated with them and their impact on agent-oriented software development. Our discussion examines these frameworks in the context of an example in the area of distributed information systems.}, Address = {Dunedin, New Zealand}, Author = {Mariusz Nowostawski and Geoff Bush and Martin K. Purvis and Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:47 +1200}, Institution = {Department of Information Science, University of Otago}, Month = aug, Number = {2000/13}, Size = {222 KB}, Title = {Platforms for agent-oriented software}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2001-06, Abstract = {An architecture, and the accompanying infrastructural support, for agent-based software developement is described which supports the use of agent-oriented ideas at multiple levels of abstraction. At the lowest level are micro-agents, which are robust and efficient implementations of streamlined agents that can be used for many conventional programming tasks. Agents with more sophisticated functionality can be constructed by combining these micro-agents into more complicated agents. Consequently the system supports the consistent use of agent-based ideas throughout the software engineering process, since higher level agents may be hierarchically refined into more detailed agent implementations. We outline how micro-agents are implemented in Java and how they have been used to construct the Opal framework for the construction of more complex agents that are based on the FIPA specifications.}, Address = {Dunedin, New Zealand}, Author = {Mariusz Nowostawski and Geoff Bush and Martin K. Purvis and Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2010-10-20 14:50:25 +1300}, Institution = {Department of Information Science, University of Otago}, Keywords = {agents, multi-agent system, multi-agent platform scalability}, Month = mar, Number = {2001/06}, Size = {293 KB}, Title = {A multi-level approach and infrastructure for agent-oriented software development}, Type = {Discussion paper}, Year = {2001}} @techreport{dp2003-02, Abstract = {In FIPA-style multi-agent systems, agents coordinate their activities by sending messages representing particular communicative acts (or performatives). Agent communication languages must strike a balance between simplicity and expressiveness by defining a limited set of communicative act types that fit the communication needs of a wide set of problems. More complex requirements for particular problems must then be handled by defining domain-specific predicates and actions within ontologies. This paper examines the communication needs of a multi-agent distributed information retrieval system and discusses how well these are met by the FIPA ACL.}, Address = {Dunedin, New Zealand}, Author = {Mariusz Nowostawski and Daniel Carter and Stephen Cranefield and Martin Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:48 +1200}, Institution = {Department of Information Science, University of Otago}, Month = may, Number = {2003/02}, Size = {340 KB}, Title = {Communicative acts and interaction protocols in a distributed information system}, Type = {Discussion paper}, Year = {2003}} @techreport{dp2005-03, Abstract = {Increasing complexity of software applications forces researchers to look for automated ways of programming and adapting these systems. Self-adapting, self-organising software system is one of the possible ways to tackle and manage higher complexity. A set of small independent problem solvers, working together in a dynamic environment, solving multiple tasks, and dynamically adapting to changing requirements is one way of achieving true self-adaptation in software systems. Our work presents a dynamic multi-task environment and experiments with a self-adapting software system. The Evolvable Virtual Machine (EVM) architecture is a model for building complex hierarchically organised software systems. The intrinsic properties of EVM allow the independent programs to evolve into higher levels of complexity, in a way analogous to multi-level, or hierarchical evolutionary processes. The EVM is designed to evolve structures of self-maintaining, self-adapting ensembles, that are open-ended and hierarchically organised. This article discusses the EVM architecture together with different statistical exploration methods that can be used with it. Based on experimental results, certain behaviours that exhibit self-adaptation in the EVM system are discussed.}, Address = {Dunedin, New Zealand}, Author = {Mariusz Nowostawski and Lucien Epiney and Martin Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:48 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2005/03}, Size = {877 KB}, Title = {Self-adaptation and dynamic environment experiments with evolvable virtual machines}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2007-05, Abstract = {Referrals are used in multi-agent systems, network agents and peer-to-peer systems for the purpose of global or local information spreading to facilitate trust relationships and reciprocal interactions. Based on referral local interactions can be altered with a purpose to maximise the utility function of each of the participants, which in many cases requires mutual co-operation of participants. The referral system is often based on the global detailed or statistical behaviour of the overall society. Traditionally, referrals are collected by referring agents and the information is provided upon request to individuals. In this article, we provide a simple taxonomy of referral systems and on that basis we discuss three distinct ways information can be collected and aggregated. We analyse the effects of global vs. local information spreading, in terms of individual and global performance of a population based on the maximisation of a utility function of each of the agents. Our studies show that under certain conditions such as large number of non uniformly acting autonomous agents the spread of global information is undesirable. Collecting and providing local information only yields better overall results. In some experimental setups however, it might be necessary for global information to be available otherwise global stable optimal behaviour cannot be achieved. We analyse both of these extreme cases based on simple game-theoretic setup. We analyse and relate our results in the context of e-mail relying and spam filtering.}, Address = {Dunedin, New Zealand}, Author = {Mariusz Nowostawski and Noria Foukia}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:48 +1200}, Institution = {Department of Information Science, University of Otago}, Month = aug, Number = {2007/05}, Size = {568 KB}, Title = {Social collaboration, stochastic strategies and information referrals}, Type = {Discussion paper}, Year = {2007}} @techreport{dp2007-06, Abstract = {The concept of autonomy is a central concept in distributed computational systems and in multi-agent systems in particular. With diverse implications in philosophy and despite frequent use in social sciences and the theory of computation, autonomy remains somewhat a vague notion. Most researchers do not discuss the details of this concept, but rather assume a general, common-sense understanding of autonomy in the context of computational multi-agent systems. We will review the existing definitions and formalisms related to the notion of autonomy. We re-introduce two concepts: relative autonomy and absolute autonomy. We argue that even though the concept of absolute autonomy does not make sense in computational settings, it is useful if treated as an assumed property of computational units. For example, the concept of autonomous agents may facilitate more flexible and robust abstract architectures. We adopt and discuss a new formalism based on results from the study of massively parallel multi-agent systems in the context of evolvable virtual machines. We also present the architecture for building such architectures based on our multi-agent system KEA, where we use the extended notion of dynamic linking. We augment our work with theoretical results from cham algebra for concurrent and asynchronous information processing systems. We argue that for open distributed systems, entities must be connected by multiple computational dependencies and a system as a whole must be subjected to influence from external sources. However, the exact linkages are not directly known to the computational entities themselves. This provides a useful notion and the necessary means to establish an relative autonomy in such systems.}, Address = {Dunedin, New Zealand}, Author = {Mariusz Nowostawski and Martin Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:48 +1200}, Institution = {Department of Information Science, University of Otago}, Month = aug, Number = {2007/06}, Size = {528 KB}, Title = {The concept of autonomy in distributed computation and multi-agent systems}, Type = {Discussion paper}, Year = {2007}} @techreport{dp2004-01, Abstract = {Contemporary software systems are exposed to demanding, dynamic, and unpredictable environments where the traditional adaptability mechanisms may not be sufficient. To imitate and fully benefit from life-like adaptability in software systems, that might come closer to the complexity levels of biological organisms, we seek a formal mathematical model of certain fundamental concepts such as: life, organism, evolvability and adaptation. In this work we will concentrate on the concept of software evolvability. Our work proposes an evolutionary computation model, based on the theory of hypercycles and autopoiesis. The intrinsic properties of hypercycles allow them to evolve into higher levels of complexity, analogous to multi-level, or hierarchical evolutionary processes. We aim to obtain structures of self-maintaining ensembles, that are hierarchically organised, and our primary focus is on such open-ended hierarchically organised evolution.}, Address = {Dunedin, New Zealand}, Author = {Mariusz Nowostawski and Martin Purvis and Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:48 +1200}, Institution = {Department of Information Science, University of Otago}, Month = nov, Number = {2004/01}, Size = {349 KB}, Title = {An architecture for self-organising evolvable virtual machines}, Type = {Discussion paper}, Year = {2004}} @techreport{dp2001-05, Abstract = {Although the notion of conversations has been discussed for some time as a way in which to provide an abstract representation of extended agent message exchange, there is still no consensus established concerning how to use these abstractions effectively. This paper describes a layered approach based on coloured Petri Nets that can be used for modelling complex, concurrent conversations among agents in a multi-agent system. The approach can be used both to define simple conversation protocols and to define more complex conversation protocols composed of a number of simpler conversations. With this method it is possible (a) to capture the concurrent characteristics of a conversation, (b) to capture the state of a conversation at runtime, and (c) to reuse conversation structures for the processing of multiple concurrent messages. A prototype implementation of such a system with some examples is described.}, Address = {Dunedin, New Zealand}, Author = {Mariusz Nowostawski and Martin K. Purvis and Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:49 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {agent communication languages, conversations, conversation protocols, Petri Nets, conversation monitoring and visualising}, Month = mar, Number = {2001/05}, Size = {216 KB}, Title = {A layered approach for modelling agent conversations}, Type = {Discussion paper}, Year = {2001}} @techreport{dp2005-06, Abstract = {In this paper we describe a graphical notation for physical database modelling. This notation provides database administrators with a means to model the physical structure of new and existing databases, thus enabling them to make more proactive and informed tuning decisions, compared to existing database monitoring tools.}, Address = {Dunedin, New Zealand}, Author = {Antonia Pillay and Nigel Stanger}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:49 +1200}, Institution = {Department of Information Science, University of Otago}, Month = jun, Number = {2005/06}, Size = {337 KB}, Title = {A graphical notation for physical database modelling}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2002-01, Abstract = {The Opal architecture for software development is described that supports the use of agent-oriented concepts at multiple levels of abstraction. At the lowest level are micro-agents, streamlined agents that can be used for conventional, system-level programming tasks. More sophisticated agents may be constructed by assembling combinations of micro-agents. The architecture consequently supports the systematic use of agent-based notions throughout the software development process. The paper describes (a) the implementation of micro-agents in Java, (b) how they have been used to fashion the Opal framework for the construction of more complex agents based on the Foundation for Intelligent Physical Agents (FIPA) specifications, and (c) the Opal Conversation Manager that facilitates the capability of agents to conduct complex conversations with other agents.}, Address = {Dunedin, New Zealand}, Author = {Martin Purvis and Stephen Cranefield and Mariusz Nowostawski and Dan Carter}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:49 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2002/01}, Size = {537 KB}, Title = {Opal: {A} multi-level infrastructure for agent-oriented software development}, Type = {Discussion paper}, Year = {2002}} @techreport{dp2002-04, Abstract = {An area where multi-agent systems can be put to effective use is for the case of an open collection of autonomous problem solvers in a dynamically changing environment. One example of such a situation is that of environmental management and emergency response, which can require the joint cooperation of a distributed set of components, each one of which may be specialised for a specific task or problem domain. The various stakeholders in the process can all be represented and interfaced by software agents which collaborate with each other toward achieving a particular goal. For such situations new agents that arrive on the scene must be apprised of the group interaction protocols so that they can cooperate effectively with the existing agents. In this paper we show how this can be done by using coloured Petri net representations for each role in an interaction protocol and passing these nets dynamically to new agents that wish to participate in a group interaction. We argue that multi-agent systems are particularly suited for such dynamically changing environments, but their effectiveness depends on the their ability to use adaptive interaction protocols.}, Address = {Dunedin, New Zealand}, Author = {Martin Purvis and Stephen Cranefield and Maryam Purvis and Mariusz Nowostawski}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:49 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {multi-agent systems, agent conversations, adaptive systems}, Month = jul, Number = {2002/04}, Size = {150 KB}, Title = {Multi-agent system interaction protocols in a dynamically changing environment}, Type = {Discussion paper}, Year = {2002}} @techreport{dp2002-02, Abstract = {Environmental management and emergency response often involves the joint cooperation of a network of distributed problem solvers, each of which may be specialised for a specific task or problem domain. Some of these problem solvers could be human, others could be `intelligent' environmental monitoring and control systems. Environmental software systems are needed not only for the provision of basic environmental information but also to support the coordination of these problem solvers. An agent architecture can support the requirement associated with disparate problem solvers. The various stakeholders in the process are represented by software agents which can collaborate with each other toward achieving a particular goal. The communication between agents can be accomplished by using interaction protocols which are represented by coloured Petri nets (CPN). This paper describes an approach for providing this support by employing a software agent framework for the modelling and execution of environmental process tasks in a networked environment.}, Address = {Dunedin, New Zealand}, Author = {Martin Purvis and Peter Hwang and Maryam Purvis and Stephen Cranefield and Martin Schievink}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:49 +1200}, Institution = {Department of Information Science, University of Otago}, Month = apr, Number = {2002/02}, Size = {121 KB}, Title = {Interaction protocols for a network of environmental problem solvers}, Type = {Discussion paper}, Year = {2002}} @techreport{dp2000-12, Abstract = {Workflow management systems are increasingly used to assist the automation of business processes that involve the exchange of documents, information, or task execution results. Recent developments in distributed information system technology now make it possible to extend the workflow management system idea to much wider spheres of activity in the industrial and commercial world. This paper describes a framework under development that employs such technology so that software tools and processes may interoperate in a distributed and dynamic environment. Key technical elements of the framework include the use of coloured Petri nets and distributed object technology (CORBA).}, Address = {Dunedin, New Zealand}, Author = {Martin K. Purvis and Maryam A. Purvis and Selena Lemalu}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:49 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {distributed systems, workflow, process modelling}, Month = aug, Number = {2000/12}, Size = {195 KB}, Title = {An adaptive distributed workflow system framework}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2000-20, Abstract = {Workflow management systems (WFMS) are being adopted to assist the automation of business processes that involve the exchange of information. As a result of developments in distributed information system technology, it is now possible to extend the WFMS idea to wider spheres of activity in the industrial and commercial world and thereby to encompass the increasingly sprawling nature of modern organisations. This paper describes a framework under development that employs such technology so that software tools and processes may interoperate in a distributed and dynamic environment. The framework employs Petri nets to model the interaction between various sub-processes. CORBA technology is used to enable different participants who are physically disparate to monitor activity in and make resource-level adaptations to their particular subnet.}, Address = {Dunedin, New Zealand}, Author = {Martin K. Purvis and Maryam A. Purvis and Selena Lemalu}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:50 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {distributed systems, workflow, process modelling, Petri nets}, Month = dec, Number = {2000/20}, Size = {199 KB}, Title = {A framework for distributed workflow systems}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2006-05, Abstract = {This paper describes efforts to facilitate collaborative work in a distributed environment by providing infrastructure that facilitates the understanding of inter-connected processes involved and how they interact. In this work we describe how our agent-based framework supports these. This distributed work environment makes use of both P2P and client-server architectures. Using an example of developing an open source software system, we explain how a collaborative work environment can be achieved. In particular we address how the support for coordination, collaboration and communication are provided using our framework.}, Address = {Dunedin, New Zealand}, Author = {Maryam Purvis and Martin Purvis and Bastin Tony Roy Savarimuthu}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:55:24 +1200}, Institution = {Department of Information Science, University of Otago}, Month = feb, Number = {2006/05}, Size = {361 KB}, Title = {Facilitating collaboration in a distributed software development environment using {P2P} architecture}, Type = {Discussion paper}, Year = {2006}} @techreport{dp2007-01, Abstract = {Norms are shared expectations of behaviours that exist in human societies. Norms help societies by increasing the predictability of individual behaviours and by improving co-operation and collaboration among members. Norms have been of interest to multi-agent system researchers as software agents intend to follow certain norms. But, owing to their autonomy, agents sometimes violate norms which needs monitoring. There are two main branches of research in normative agent systems. One of the branches focuses on normative agent architectures, norm representations, norm adherence and the associated punitive or incentive measures. The other branch focuses on two main issues. The first issue is on the study of spreading and internalization of norms. The second issue that has not received much attention is the emergence of norms in agent societies. Our objective in this paper is to propose mechanisms for norm emergence in artificial agent societies and provide initial experimental results.}, Address = {Dunedin, New Zealand}, Author = {Bastin Tony Roy Savarimuthu and Maryam Purvis and Stephen Cranefield and Martin Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:56:07 +1200}, Institution = {Department of Information Science, University of Otago}, Month = feb, Number = {2007/01}, Size = {216 KB}, Title = {How do norms emerge in multi-agent societies? {M}echanisms design}, Type = {Discussion paper}, Year = {2007}} @techreport{dp2006-04, Abstract = {With the advent of Web Services, more and more business organizations make their services available on the Internet through Web Services and also use other services that are available on the corporate Intranet. From the viewpoint of workflow systems, these freely available Web Services and the proprietary intranet-based services should be integrated into individual businesses for their day-to-day workflows. Businesses that use Web Services not only provide the services to their customers but can also use Web Services to customize their internal processing, such as online order placement for raw materials. In this paper we describe the architecture of our agent-based workflow system that can be used for Web Service composition. In the context of an example from the apparel manufacturing industry, we demonstrate how Web Services can be composed and used.}, Address = {Dunedin, New Zealand}, Author = {Bastin Tony Roy Savarimuthu and Maryam Purvis and Martin Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:50 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {web services, multi-agent systems, workflow systems}, Month = feb, Number = {2006/04}, Size = {565 KB}, Title = {Agent based web service composition in the context of a supply-chain based workflow}, Type = {Discussion paper}, Year = {2006}} @techreport{dp2005-05, Abstract = {Rapid changes in the business environment call for more flexible and adaptive workflow systems. Researchers have proposed that Workflow Management Systems (WfMSs) comprising multiple agents can provide these capabilities. We have developed a multi-agent based workflow system, JBees, which supports distributed process models and the adaptability of executing processes. Modern workflow systems should also have the flexibility to integrate available web services as they are updated. In this paper we discuss how our agent-based architecture can be used to bind and access web services in the context of executing a workflow process model. We use an example from the diamond processing industry to show how our agent architecture can be used to integrate web services with WfMSs.}, Address = {Dunedin, New Zealand}, Author = {Bastin Tony Roy Savarimuthu and Maryam Purvis and Martin Purvis and Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:50 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2005/05}, Size = {433 KB}, Title = {Agent-based integration of web services with workflow management systems}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2006-10, Abstract = {We present an empirical study on classical music instrument classification. A methodology with feature extraction and evaluation is proposed and assessed with a number of experiments, whose final stage is to detect instruments in solo passages. In feature selection it is found that similar but different rankings for individual tone classification and solo passage instrument recognition are reported. Based on the feature selection results, excerpts from concerto and sonata files are processed, so as to detect and distinguish four ma jor instruments in solo passages: trumpet, flute, violin, and piano. Nineteen features selected from the Mel-frequency cepstral coefficients (MFCC) and the MPEG-7 audio descriptors achieve a recognition rate of around 94% by the best classifier assessed by cross validation.}, Address = {Dunedin, New Zealand}, Author = {Christian Simmermacher and Da Deng and Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:51 +1200}, Institution = {Department of Information Science, University of Otago}, Month = may, Number = {2006/10}, Size = {206 KB}, Title = {Feature analysis and classification of classical musical instruments: {A}n empirical study}, Type = {Discussion paper}, Year = {2006}} @techreport{dp2000-11, Abstract = {An important part of the systems development process is building models of real-world phenomena. These phenomena are described by many different kinds of information, and this diversity has resulted in a wide variety of modelling representations. Some types of information are better expressed by some representations than others, so it is sensible to use multiple representations to describe a real-world phenomenon. The author has developed an approach to facilitating the use of multiple representations within a single viewpoint by translating descriptions of the viewpoint among different representations. An important issue with such translations is their quality, or how well they map constructs of one representation to constructs of another representation. Two possible methods for improving translation quality, heuristics and enrichment, are proposed in this paper, and a preliminary metric for measuring relative translation quality is described.}, Address = {Dunedin, New Zealand}, Author = {Nigel Stanger}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:51 +1200}, Institution = {Department of Information Science, University of Otago}, Month = may, Number = {2000/11}, Size = {474 KB}, Title = {Translating descriptions of a viewpoint among different representations}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2000-09, Abstract = {When modelling a real-world phenomenon, it can often be useful to have multiple descriptions of the phenomenon, each expressed using a different modelling approach or representation. Different representations such as entity-relationship modelling, data flow modelling and use case modelling allow analysts to describe different aspects of real-world phenomena, thus providing a more thorough understanding than if a single representation were used. Researchers working with multiple representations have approached the problem from many different fields, resulting in a diverse and potentially confusing set of terminologies. In this paper is described a viewpoint-based framework for discussing the use of multiple modelling representations to describe real-world phenomena. This framework provides a consistent and integrated terminology for researchers working with multiple representations. An abstract notation is also defined for expressing concepts within the framework.}, Address = {Dunedin, New Zealand}, Author = {Nigel Stanger}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:51 +1200}, Institution = {Department of Information Science, University of Otago}, Month = apr, Number = {2000/09}, Size = {478 KB}, Title = {A viewpoint-based framework for discussing the use of multiple modelling representations}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2006-07, Abstract = {A fully functional and publicly available, digital institutional repository (IR) in the space of just ten days? The technology was available, the time was right, the team was right and technical assistance from colleagues in Australia was on hand a mere cyber call away. This paper reports on how we were able to ``hit the ground running'' in building an open access IR in such a short space of time. What has taken our breath away is not so much the speed of the process, but the scale of responsiveness from the Internet community. Consequently, we also consider the research impact of more than 18,000 downloads from eighty countries, less than three months into the project!}, Address = {Dunedin, New Zealand}, Author = {Nigel Stanger and Graham McGregor}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:51 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2006/07}, Size = {327 KB}, Title = {Hitting the ground running: {B}uilding {N}ew {Z}ealand's first publicly available institutional repository}, Type = {Discussion paper}, Year = {2006}} @techreport{dp2000-03, Abstract = {We report on the clustering of nodes in internally represented acoustic space. Learners of different languages partition perceptual space distinctly. Here, an Evolving Connectionist-Based System (ECOS) is used to model the perceptual space of New Zealand English. Currently, the system evolves in an unsupervised, self-organising manner. The perceptual space can be visualised, and the important features of the input patterns analysed. Additionally, the path of the internal representations can be seen. The results here will be used to develop a supervised system that can be used for speech recognition based on the evolved, internal sub-word units.}, Address = {Dunedin, New Zealand}, Author = {John Taylor and Nikola Kasabov and Richard Kilgour}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:51 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2000/03}, Size = {125 KB}, Title = {Modelling the emergence of speech sound categories in evolving connectionist systems}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2003-04, Abstract = {Accurate effort prediction is often an important factor for successful software development. However, the diversity of software development tools observed today has resulted in a situation where existing effort prediction models' applicability appears to be limited. Data-centred fourth-generation-language (4GL) software development provides one such difficulty. This paper aims to construct an accurate effort prediction model for data-centred 4GL development where a specific tool suite is used. Using historical data collected from 17 systems developed in the target environment, several linear regression models are constructed and evaluated in terms of two commonly used prediction accuracy measures, namely the mean magnitude of relative error (MMRE) and pred measures. In addition, R2, the maximum value of MRE, and statistics of the absolute residuals are used for comparing the models. The results show that models consisting of specification-based software size metrics, which were derived from Entity Relationship Diagrams (ERDs) and Function Hierarchy Diagrams (FHDs), achieve good prediction accuracy in the target environment. The models' good effort prediction ability is particularly beneficial because specification-based metrics usually become available at an early stage of development. This paper also investigates the effect of developers' productivity on effort prediction and has found that inclusion of productivity improves the models' prediction accuracy further. However, additional studies will be required in order to establish the best productivity inclusive effort prediction model.}, Address = {Dunedin, New Zealand}, Author = {Chikako van Koten}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:51 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {prediction systems, 4GL, effort, metrics, empirical analysis}, Month = nov, Number = {2003/04}, Size = {398 KB}, Title = {An effort prediction model for data-centred fourth-generation-language software development}, Type = {Discussion paper}, Year = {2003}} @techreport{dp2005-08, Abstract = {Constructing an accurate effort prediction model is a challenge in Software Engineering. This paper presents new Bayesian statistical models, in order to predict development effort of software systems in the International Software Benchmarking Standards Group (ISBSG) dataset. The first model is a Bayesian linear regression (BR) model and the second model is a Bayesian multivariate normal distribution (BMVN) model. Both models are calibrated using subsets randomly sampled from the dataset. The models' predictive accuracy is evaluated using other subsets, which consist of only the cases unknown to the models. The predictive accuracy is measured in terms of the absolute residuals and magnitude of relative error. They are compared with the corresponding linear regression models. The results show that the Bayesian models have predictive accuracy equivalent to the linear regression models, in general. However, the advantage of the Bayesian statistical models is that they do not require a calibration subset as large as the regression counterpart. In the case of the ISBSG dataset it is confirmed that the predictive accuracy of the Bayesian statistical models, in particular the BMVN model is significantly better than the linear regression model, when the calibration subset consists of only five or smaller number of software systems. This finding justifies the use of Bayesian statistical models in software effort prediction, in particular, when the system of interest has only a very small amount of historical data.}, Address = {Dunedin, New Zealand}, Author = {Chikako van Koten}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:52 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {effort prediction, Bayesian statistics, regression, software metrics}, Month = oct, Number = {2005/08}, Size = {287 KB}, Title = {Bayesian statistical models for predicting software development effort}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2005-02, Abstract = {As the number of object-oriented software systems increases, it becomes more important for organizations to maintain those systems effectively. However, currently only a small number of maintainability prediction models are available for objectoriented systems. This paper presents a Bayesian network maintainability prediction model for an object-oriented software system. The model is constructed using object-oriented metric data in Li and Henry's datasets, which were collected from two different object-oriented systems. Prediction accuracy of the model is evaluated and compared with commonly used regression-based models. The results suggest that the Bayesian network model can predict maintainability more accurately than the regression-based models for one system, and almost as accurately as the best regression-based model for the other system.}, Address = {Dunedin, New Zealand}, Author = {Chikako van Koten and Andrew Gray}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:54:16 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2005/02}, Size = {287 KB}, Title = {An application of {B}ayesian network for predicting object-oriented software maintainability}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2005-09, Abstract = {Constructing an accurate effort prediction model is a challenge in Software Engineering. This paper presents three Bayesian statistical software effort prediction models for database-oriented software systems, which are developed using a specific 4GL tool suite. The models consist of specification-based software size metrics and development team's productivity metric. The models are constructed based on the sub jective knowledge of human expert and calibrated using empirical data collected from 17 software systems developed in the target environment. The models' predictive accuracy is evaluated using subsets of the same data, which were not used for the models' calibration. The results show that the models have achieved very good predictive accuracy in terms of MMRE and pred measures. Hence it is confirmed that the Bayesian statistical models can predict effort successfully in the target environment. In comparison with commonly used multiple linear regression models, the Bayesian statistical models' predictive accuracy is equivalent in general. However, when the number of software systems used for the models' calibration becomes smaller than five, the predictive accuracy of the best Bayesian statistical models are significantly better than the multiple linear regression model. This result suggests that the Bayesian statistical models would be a better choice when software organizations/practitioners do not posses sufficient empirical data for the models' calibration. The authors expect those findings encourage more researchers to investigate the use of Bayesian statistical models for predicting software effort.}, Address = {Dunedin, New Zealand}, Author = {Chikako van Koten and Andrew Gray}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:54:38 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {effort prediction, 4GL, Bayesian statistics, regression, software metrics}, Month = oct, Number = {2005/09}, Size = {331 KB}, Title = {Bayesian statistical effort prediction models for data-centred {4GL} software development}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2003-03, Abstract = {A modularised connectionist model, based on the Mixture of Experts (ME) algorithm for time series prediction, is introduced. A set of connectionist modules learn to be local experts over some commonly appearing states of a time series. The dynamics for mixing the experts is a Markov process, in which the states of a time series are regarded as states of a HMM. Hence, there is a Markov chain along the time series and each state associates to a local expert. The state transition on the Markov chain is the process of activating a different local expert or activating some of them simultaneously by different probabilities generated from the HMM. The state transition property in the HMM is designed to be time-variant and conditional on the first order dynamics of the time series. A modified Baum--Welch algorithm is introduced for the training of the time-variant HMM and it has been proved that by EM process the likelihood function will converge to a local minimum. Experiments, with two time series, show this approach achieves significant improvement in the generalisation performance over global models.}, Address = {Dunedin, New Zealand}, Author = {Xin Wang and Peter Whigham and Da Deng}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:53:27 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {series prediction, Mixture of Experts, HMM, connectionist model, expectation and maximization, Gauss probability density distribution}, Month = jun, Number = {2003/03}, Size = {486 KB}, Title = {Time-line {H}idden {M}arkov {E}xperts and its application in time series prediction}, Type = {Discussion paper}, Year = {2003}} @techreport{dp2000-10, Abstract = {Most applications of Genetic Programming to time series modeling use a fitness measure for comparing potential solutions that treat each point in the time series independently. This non-temporal approach can lead to some potential solutions being given a relatively high fitness measure even though they do not correspond to the training data when the overall shape of the series is taken into account. This paper develops two fitness measures which emphasize the concept of shape when measuring the similarity between a training and evolved time series. One approach extends the root mean square error to higher dimensional derivatives of the series. The second approach uses a simplified derivative concept that describes shape in terms of positive, negative and zero slope.}, Address = {Dunedin, New Zealand}, Author = {Peter Whigham and Colin Aldridge}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:52 +1200}, Institution = {Department of Information Science, University of Otago}, Month = apr, Number = {2000/10}, Size = {561 KB}, Title = {A shape metric for evolving time series models}, Type = {Discussion paper}, Year = {2000}} @techreport{dp2005-04, Abstract = {Cost is a major obstacle to the adoption of large-scale data integration solutions by small to medium enterprises (SME's). We therefore propose a lightweight data integration architecture built around the Atom XML syndication format, which may provide a cost-effective alternative technology for SME's to facilitate data integration, compared to expensive enterprise grade systems. The paper discusses the underlying principles and motivation for the architecture, the structure of the architecture itself, and our research goals.}, Address = {Dunedin, New Zealand}, Author = {David Williamson and Nigel Stanger}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:54:25 +1200}, Institution = {Department of Information Science, University of Otago}, Month = mar, Number = {2005/04}, Size = {301 KB}, Title = {A lightweight data integration architecture using {A}tom}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2005-10, Abstract = {In visual perception, finding regions of interest in a scene is very important in the carrying out visual tasks. Recently there have been a number of works proposing saliency detectors and visual attention models. In this paper, we propose an extensible visual attention framework based on MPEG-7 descriptors. Hotspots in an image are detected from the combined saliency map obtained from multiple feature maps of multi-scales. The saliency concept is then further extended and we propose a saliency index for the ranking of images on their interestingness. Simulations on hotspots detection and automatic image ranking are conducted and statistically tested with a user test. Results show that our method captures more important regions of interest and the automatic ranking positively agrees to user rankings.}, Address = {Dunedin, New Zealand}, Author = {Heiko Wolf and Da Deng}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:54:51 +1200}, Institution = {Department of Information Science, University of Otago}, Month = dec, Number = {2005/10}, Size = {3.1 MB}, Title = {Image saliency mapping and ranking using an extensible visual attention model based on {MPEG}-7 feature descriptors}, Type = {Discussion paper}, Year = {2005}} @techreport{dp2001-11, Abstract = {This paper reports on a field study into the nature of decision making in the command and control of emergency ambulances at the London Ambulance Service (LAS). This paper will describe how real-time decisions are made by emergency medical dispatchers and the decision strategies they invoke as they assess the situation, plan and co-ordinate the dispatch of emergency ambulances. A cognitive task analysis approach known as the Critical Decision Method (Hoffman et al., 1998; Klein et al., 1989) was used in the study. The study showed that decision making in emergency ambulance command and control involves four major processes---assessment of the situation, assessment of resources, planning, and co-ordinating and control. These four processes function within an awareness of goings-on in and around the sectors that the dispatchers operate in. This awareness is referred to as situation awareness and is being reported elsewhere (Wong {\&} Blandford, submitted). The decision making process resembles the decision making described by naturalistic decision making models (see (Zsambok {\&} Klein, 1997) for an extensive discussion on the topic) and is an extension of the Integrated Decision Model (Wong, 1999). The study also suggested that a lot of effort was directed at understanding and assessing the situation and in maintaining a constant awareness of the situation. These observations have significant implications for the design of information systems for command and control purposes. These implications will be discussed separately in another paper. The paper will first introduce the domain of EMD at the LAS, then explain how the Critical Decision Method was used in the data collection and in the data anlaysis. It will then describe how decisions are made, particularly during major incidents, and then discuss the implications of those findings for the design of command and control systems.}, Address = {Dunedin, New Zealand}, Author = {William Wong and Ann Blandford}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:53 +1200}, Institution = {Department of Information Science, University of Otago}, Month = oct, Number = {2001/11}, Size = {204 KB}, Title = {Naturalistic decision making in emergency ambulance command and control}, Type = {Discussion paper}, Year = {2001}} @techreport{dp1999-09, Abstract = {The speech recognition field is one of the most challenging fields that has faced scientists for a long time. The complete solution is still far from reach. The efforts are concentrated with huge funds from the companies to different related and supportive approaches to reach the final goal. Then, apply it to the enormous applications that are still waiting for the successful speech recognisers that are free from the constraints of speakers, vocabularies or environment. This task is not an easy one due to the interdisciplinary nature of the problem and as it requires speech perception to be implied in the recogniser (Speech Understanding Systems) which in turn point strongly to the use of intelligence within the systems. The bare techniques of recognisers (without intelligence) are following wide varieties of approaches with different claims of success by each group of authors who put their faith in their favourite way. However, the sole technique that gains the acceptance of the researchers to be the state of the art is the Hidden Markov Model (HMM) technique. HMM is agreed to be the most promising one. It might be used successfully with other techniques to improve the performance, such as hybridising the HMM with Artificial Neural Networks (ANN) algorithms. This does not mean that the HMM is pure from approximations that are far from reality, such as the successive observations independence, but the results and potential of this algorithm is reliable. The modifications on HMM take the burden of releasing it from these poorly representative approximations hoping for better results. In this report we are going to describe the backbone of the HMM technique with the main outlines for successful implementation. The representation and implementation of HMM varies in one way or another but the main idea is the same as well as the results and computation costs, it is a matter of preferences to choose one. Our preference here is that adopted by Ferguson and Rabiner et al. In this report we will describe the Markov Chain, and then investigate a very popular model in the speech recognition field (the Left-Right HMM Topology). The mathematical formulations needed to be implemented will be fully explained as they are crucial in building the HMM. The prominent factors in the design will also be discussed. Finally we conclude this report by some experimental results to see the practical outcomes of the implemented model.}, Address = {Dunedin, New Zealand}, Author = {Waleed Abdulla and Nikola Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:53 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {May}, Number = {99/09}, Size = {556 KB}, Title = {The concepts of hidden Markov model in speech recognition}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1995-11, Abstract = {Fuzzy concepts might have potential for protecting and preserving land which has special cultural or spiritual significance for indigenous peoples, because it might support any tangata whenua (indigenous peoples) desires for secrecy and confidentiality. These issues are examined in terms of New Zealand and from the technical perspective of Information Science. The various meanings of fuzzy are discussed. Some pertinent questions are: Is a fuzzy concept a useful tool to apply? Do the tangata whenua wish to make use of this tool?}, Address = {Dunedin, New Zealand}, Author = {Brian A. Ballantyne and George L. Benwell and Neil C. Sutherland}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:53 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {August}, Number = {95/11}, Title = {Fuzzy concepts, land and cultural confidentiality}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1993-02, Abstract = {This paper discusses the method of determining heights of mountains during the original geodetic survey of Victoria. From 1840 to 1875, more particularly the 1860s, geodetic surveyors were charged with the responsibility of mapping the colony. The subject of this paper is their efforts to determine the elevations by barometric heighting. A brief introduction to other methods is given while particular attention is paid to the determination of the height of Mount Sabine in the Otway Ranges, Victoria, by Surveyor Irwin in 1865. Attempts are made to recompute his original observations.}, Address = {Dunedin, New Zealand}, Author = {George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:54 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {November}, Number = {93/2}, Size = {770 KB}, Title = {Recomputing historical barometric heighting}, Type = {Discussion paper}, Year = {1993}} @techreport{dp1994-06, Abstract = {This paper describes the creation of a system development methodology suitable for spatial information systems. The concept is substantiated on the fact that spatial systems are similar to information systems in general. The subtle difference being the fact that spatial systems are not yet readily supported by large digital data bases. This fact has diverted attention away from system development to data collection. A spatial system development methodology is derived, based on a historical review of information systems methodologies and the coupling of same with a data collection and integration methodology for the spatially referenced digital data.}, Address = {Dunedin, New Zealand}, Author = {George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:54 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {February}, Number = {94/6}, Size = {1.3 MB}, Title = {A system development methodology for geomatics as derived from informatics}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1996-25a, Abstract = {There is continuing pressure to develop spatial information systems. This paper develops two concepts that could emerge. The first is a new spatial paradigm---an holistic model---which is less of an abstraction from reality than current models. Second, is the concept of federated databases for the improved and transparent access to data by disparate users. The latter concept is hardly new and is included in this paper to emphasize its growing importance. These two developments are presented after a introductory discussion of the present state of the discipline of geographical information systems and spatial analysis.}, Address = {Dunedin, New Zealand}, Author = {George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 17:08:00 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Note = {(Not in electronic version.)}, Number = {96/25a}, Title = {Spatial databases---{C}reative future concepts and use}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1994-03, Abstract = {Petri nets, as a modelling formalism, are utilised for the analysis of processes, whether for explicit understanding, database design or business process re-engineering. The formalism, however, can be represented on a virtual continuum from highly graphical to largely algorithmic. The use and understanding of the formalism will, in part, therefore depend on the resultant complexity and power of the representation and, on the graphical or algorithmic preference of the user. This paper develops a metric which will indicate the graphical or algorithmic tendency of hierarchical coloured Petri nets.}, Address = {Dunedin, New Zealand}, Author = {George L. Benwell and Stephen G. MacDonell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:20:30 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {February}, Number = {94/3}, Size = {427 KB}, Title = {Assessing the graphical and algorithmic structure of hierarchical coloured {P}etri net models}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1996-01, Abstract = {The creation of spatial information systems can be viewed from many directions. One such view is to see the creation in terms of data collection, data modelling, codifying spatial processes, information management, analysis and presentation. The amount of effort to create such systems is frequently under-estimated; this is true for each aspect of the above view. The accuracy of the assessment of effort will vary for each aspect. This paper concentrates on the effort required to create the code for spatial processes and analysis. Recent experience has indicated that this is an area where considerable under-estimation is occurring. Function point analysis presented in this paper provides a reliable metric for spatial systems developers to assess required effort based on spatial data models.}, Address = {Dunedin, New Zealand}, Author = {George L. Benwell and Stephen G. MacDonell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:54 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {January}, Number = {96/01}, Title = {Using data models to estimate required effort in creating a spatial information system}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1995-14, Abstract = {In New Zealand the management of the environment is now largely embodied in the Resource Management Act. Within this context there is a clear need to support regionally significant decisions. Furthermore it is important that such decisions are scale invariant, that is, they are appropriately implementable at the micro and macro levels. This demands that decision makers at these diametrically opposed levels are cognisant of the influence of their domain on other domains. A difficult concept. It also implies that there is consensus on what are the significant regional decisions and also how decisions and consequences interact across all scales and, possibly, even regions. As a region is a scale dependent term it is important that the different views can be perceived and conveyed to the different proponents and opponents. This paper develops the case that it is important to make appropriate use of technology when attempting to make decisions at the regional level. This is particularly so in the fragile environments of the high country of southern New Zealand. Furthermore, this paper embodies a concept of the Six Thinking Hats of E. de Bono in developing a simulation modelling tool which presents interactive management scenarios of agricultural areas of the high country. The modelling concept is presented along with the reasons for adopting the de Bono concept.}, Address = {Dunedin, New Zealand}, Author = {George L. Benwell and Tim Fletcher and Carolyne B. Smith}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:55 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {September}, Number = {95/14}, Title = {Integrating modelling and simulation into a problem solving paradigm for improved regional and environmental decision making}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1996-22, Abstract = {The Soft Systems Methodology (SSM) was used to identify requirements for the development of one or more information systems for a local company. The outcome of using this methodology was the development of three multimedia information systems. This paper discusses the use of the SSM when developing for multimedia environments. Namely, this paper covers the problems with traditional methods of requirements analysis (which the SSM addresses), how the SSM can be used to elicit multimedia information system requirements, and our personal experience of the method. Our personal experience is discussed in terms of the systems we developed using the SSM.}, Address = {Dunedin, New Zealand}, Author = {Da'oud Z. Butt and Tim Fletcher and Stephen G. MacDonell and Brian E. Norris and William B.L. Wong}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:55 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {multimedia information systems, Soft Systems methodology, systems development lifecycle}, Month = {October}, Number = {96/22}, Title = {Applying soft systems methodology to multimedia systems requirements analysis}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1995-04, Abstract = {The design of spatial information systems has traditionally been carried out independently of mainstream database developments. It is contended that the adoption of mainstream database design techniques is important to progress in the spatial information systems development field. An accepted approach to the development of information systems is through an integrated development environment with a design repository at its core. This paper proposes a skeleton model for the design of a repository to store spatial metadata. An object oriented modelling approach is adopted in preference to an entity relationship approach because of its ability to model functional and dynamic aspects of the repository.}, Address = {Dunedin, New Zealand}, Author = {S.K. Sophie Cockcroft}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:15:36 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {95/4}, Size = {184 KB}, Title = {An object repository model for the storage of spatial metadata}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1996-02, Abstract = {Database schemas currently used to define spatial databases are deficient in that they do not incorporate facilities to specify business rules/integrity constraints. This shortcoming has been noted by G{\"u}nther and Lamberts [G{\"u}nther {\&} Lamberts, 1994] who commented that geographical information systems (GIS) do not generally offer any functionality to preserve semantic integrity. It is desirable that this functionality be incorporated for reasons of consistency and so that an estimate of the accuracy of data entry can be made. Research into constraints upon spatial relationships at the conceptual level is well documented. A number of researchers have shown that the transition from conceptual to logical spatial data models is possible [Firns, 1994; Hadzilacos {\&} Tryfona, 1995]. The algorithmic accomplishment of this transition is a subject of current research. This paper presents one approach to incorporating spatial business rules in spatially referenced database schemas by means of a repository. It is demonstrated that the repository has an important role to play in spatial data management and in particular automatic schema generation for spatially referenced databases.}, Address = {Dunedin, New Zealand}, Author = {S.K. Sophie Cockcroft}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:39:16 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {February}, Number = {96/02}, Size = {188 KB}, Title = {The use of a metadata repository in spatial database development}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1997-05, Abstract = {Spatial data quality has become an issue of increasing concern to researchers and practitioners in the field of Spatial Information Systems (SIS). Clearly the results of any spatial analysis are only as good as the data on which it is based. There are a number of significant areas for data quality research in SIS. These include topological consistency; consistency between spatial and attribute data; and consistency between spatial objects' representation and their true representation on the ground. The last category may be subdivided into spatial accuracy and attribute accuracy. One approach to improving data quality is the imposition of constraints upon data entered into the database. This paper presents a taxonomy of integrity constraints as they apply to spatial database systems. Taking a cross disciplinary approach it aims to clarify some of the terms used in the database and SIS fields for data integrity management. An overview of spatial data quality concerns is given and each type of constraint is assessed regarding its approach to addressing these concerns. Some indication of an implementation method is also given for each.}, Address = {Dunedin, New Zealand}, Author = {S.K. Sophie Cockcroft}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:56 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {database constraints, spatial data quality, system development, rules}, Month = {May}, Number = {97/05}, Size = {128 KB}, Title = {A taxonomy of spatial data integrity constraints}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1996-25b, Abstract = {Integrated software engineering environments (ISEE) for traditional non spatial information systems are well developed, incorporating Database Management Systems (DBMS) and Computer Aided Software Engineering (CASE) tools. The core component of the ISEE is the repository. It brings all the other components together and provides a common area to which all tools can link. In this fashion it also provides a central point for control. No such facility exists for the management of spatial data. This paper describes the development of such a facility in the form of a spatial metadata repository.}, Address = {Dunedin, New Zealand}, Author = {Sophie Cockcroft}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 17:10:26 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Note = {(Not in electronic version.)}, Number = {96/25b}, Title = {First experiences in implementing a spatial metadata repository}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1998-01, Abstract = {The application of business rules as a means of ensuring data quality is an accepted approach in information systems development. Rules, defined by the user, are stored and manipulated by a repository or data dictionary. The repository stores the system design, including rules which result from constraints in the user's environment, and enforces these rules at runtime. The work presented here represents the application of this approach to spatial information system design using an integrated spatial software engineering tool (ISSET) with a repository at its core.}, Address = {Dunedin, New Zealand}, Author = {Sophie Cockcroft}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:56 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {spatial information systems development, integrity constraints, business rules, topological relationships}, Month = {March}, Number = {98/01}, Title = {User defined spatial business rules: {S}torage, management and implementation---{A} pipe network case study}, Type = {Discussion paper}, Year = {1998}} @techreport{dp1999-25, Abstract = {Web based approaches to tracking students on placement are receiving much interest in the field of medical education The work presented here describes a web-based solution to the problem of managing data collection from student encounters with patients whilst on placement. The solution has been developed by postgraduate students under the direction of staff of the health informatics diploma. Specifically, the system allows undergraduate students on placement or in the main hospital to access a web-based front end to a database designed to store the data that they are required to gather. The system also has the important effect of providing a rationale for the provision of electronic communication to the undergraduate students within the context of healthcare delivery. We believe that an additional effect will be to expose practicing healthcare providers to electronic information systems, along with the undergraduates who are trained to use them, and increase the skill base of the practitioners.}, Address = {Dunedin, New Zealand}, Author = {Sophie Cockcroft and David Parry and Alice Breton and David Abernethy and John Gillies}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:48:55 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {99/25}, Size = {92 KB}, Title = {Infiltrating {IT} into primary care: {A} case study}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-18, Abstract = {This paper discusses the problem of integrated planning and execution for tasks that involve the consumption, production and alteration of relational information. Unlike information retrieval problems, the information processing domain requires explicit modelling of the changing information state of the domain and how the validity of resources changes as actions are performed. A solution to this problem is presented in the form of a specialised hierarchical task network planning model. A distinction is made between the information processing effects of an action (modelled in terms of constraints relating the domain information before and after the action) and the actions' preconditions and effects which are expressed in terms of required, produced and invalidated resources. The information flow between tasks is explicitly represented in methods and plans, including any required information-combining operations such as selection and union. The paper presents the semantics of this model and discusses implementation issues arising from the extension of an existing HTN planner (SHOP) to support this model of planning.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:48:17 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {HTN planning, information processing, integrated planning and execution}, Month = {September}, Number = {99/18}, Size = {188 KB}, Title = {{HTN} planning for information processing tasks}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1994-16, Abstract = {Recently there has been a resurgence of interest in the deductive approach to planning. There are many benefits of this approach but one shortcoming is the difficulty of performing nonlinear planning in this framework. This paper argues that these problems are caused by a flaw in the partial order approach---the lack of structure in such a representation---and proposes an alternative, dynamic programming style approach based on a more structured representation of plans.}, Address = {Dunedin, New Zealand}, Author = {Stephen J.S. Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:57 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {November}, Number = {94/16}, Title = {Towards the deductive synthesis of nonlinear plans}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1997-01, Abstract = {In today's open, distributed environments, there is an increasing need for systems to assist the interoperation of tools and information resources. This paper describes a multi-agent system, DALEKS, that supports such activities for the information processing domain. With this system, information processing tasks are accomplished by the use of an agent architecture incorporating task planning and information agent matchmaking components. We discuss the characteristics of planning in this domain and describe how information processing tools are specified for the planner. We also describe the manner in which planning, agent matchmaking, and information task execution are interleaved in the DALEKS system. An example application taken from the domain of university course administration is provided to illustrate some of the activities performed in this system.}, Address = {Dunedin, New Zealand}, Author = {Stephen J.S. Cranefield and Aurora Diaz and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:57 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {February}, Number = {97/01}, Size = {84 KB}, Title = {Planning and matchmaking for the interoperation of information processing agents}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1995-07, Abstract = {The concept of an intelligent software agent has emerged from its origins in artificial intelligence laboratories to become an important basis for the development of distributed systems in the mainstream computer science community. This paper provides a review of some of the ideas behind the intelligent agent approach and addresses the question ``what is an agent?'' Some principal application areas for agent-based computing are outlined and related research programmes at the University of Otago are discussed.}, Address = {Dunedin, New Zealand}, Author = {Stephen J.S. Cranefield and Paul Gorman and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:16:39 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {July}, Number = {95/7}, Size = {188 KB}, Title = {Communicating agents: {A}n emerging approach for distributed heterogeneous systems}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1995-15, Abstract = {Agent-Based Software Integration (ABSI) entails the development of intelligent software agents and knowledge-sharing protocols that enhance interoperability of multiple software packages. Although some past ABSI projects reported in the literature have been concerned with the integration of relatively large software frameworks from separate engineering disciplines, the discussion in this paper concerns the integration of general-purpose software utilities and hand-crafted tools. With such smaller-scale ABSI projects, it may be difficult to justify the expense of constructing an overall ontology for the application. There are cases, however, when the project involves general-purpose tools that manipulate the same general entity types (such as files) but at different levels of abstraction. In such cases it is appropriate to have ontologies appropriate for the general usage of each tool and constraint descriptions that enable the ontological specifications to be mapped across the various levels of abstraction. This paper discusses issues associated with this type of ABSI project and describes an example information management application associated with university course administration. For the information management application presented the key issues are the provision of standard agent wrappers for standard desktop information management tools and the design of standard ontologies describing information stored in relational databases as well as in structured text files. Examples of a conceptual model describing such a database ontology are presented in connection with the example application. It is also suggested that a general planning agent, distinct from the notion of a facilitator agent, be employed in this context to assist in the use of various agents to manipulate information and move items from one data format to another.}, Address = {Dunedin, New Zealand}, Author = {Stephen J.S. Cranefield and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:57 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {October}, Number = {95/15}, Title = {Agent-based integration of general-purpose tools}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1996-24, Abstract = {This paper presents a practical multi-agent architecture for assisting users to coordinate the use of both special and general purpose software tools for performing tasks in a given problem domain. The architecture is open and extensible being based on the techniques of agent-based software interoperability (ABSI), where each tool is encapsulated by a KQML-speaking agent. The work reported here adds additional facilities for the user to describe the problem domain, the tasks that are commonly performed in that domain and the ways in which various software tools are commonly used by the user. Together, these features provide the computer with a degree of autonomy in the user's problem domain in order to help the user achieve tasks through the coordinated use of disparate software tools. This research focuses on the representational and planning capabilities required to extend the existing benefits of the ABSI architecture to include domain-level problem-solving skills. In particular, the paper proposes a number of standard ontologies that are required for this type of problem, and discusses a number of issues related to planning the coordinated use of agent-encapsulated tools.}, Address = {Dunedin, New Zealand}, Author = {Stephen J.S. Cranefield and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:41:01 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {96/24}, Size = {72 KB}, Title = {An agent-based architecture for software tool coordination}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1998-07, Abstract = {This paper describes an agent-based architecture designed to provide automation support for users who perform information processing tasks using a collection of distributed and disparate software tools and on-line resources. The architecture extends previous work on agent-based software interoperability. The unique features of the information processing domain compared to distributed information retrieval are discussed and a novel extension of hierarchical task network (HTN) planning to support this domain is presented.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Bryce McKinlay and Emanuela Moreale and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-11 09:58:54 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {98/07}, Size = {172 KB}, Title = {Automating information processing tasks: {A}n agent-based architecture}, Type = {Discussion paper}, Year = {1998}} @techreport{dp1999-01, Abstract = {Current tools and techniques for ontology development are based on the traditions of AI knowledge representation research. This research has led to popular formalisms such as KIF and KL-ONE style languages. However, these representations are little known outside AI research laboratories. In contrast, commercial interest has resulted in ideas from the object-oriented programming community maturing into industry standards and powerful tools for object-oriented analysis, design and implementation. These standards and tools have a wide and rapidly growing user community. This paper examines the potential for object-oriented standards to be used for ontology modelling, and in particular presents an ontology representation language based on a subset of the Unified Modeling Language together with its associated Object Constraint Language.}, Address = {Dunedin, New Zealand}, Author = {Stephen Cranefield and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:47:08 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {January}, Number = {99/01}, Size = {204 KB}, Title = {{UML} as an ontology modelling language}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1994-15, Abstract = {This paper deals with matters relating to toponymy. The concept of indigenous place names is discussed. A view is presented, based on empirical evidence, that current processes for the official recording of names are detrimental to a fair and reasonable representation of indigenous names. Historical events in Aotearoa are examined as well as the existing place name recording process. Research is outlined as to what can be done to examine and redress this situation. A proposition is tendered whereby names can be recorded via a process which is people based and not government based. Research matters surrounding this concept are discussed.}, Address = {Dunedin, New Zealand}, Author = {Iaean J. Cranwell and George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:22:23 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {August}, Number = {94/15}, Size = {1.1 MB}, Title = {Recording, placement and presentation of {M}{\=a}ori place names in a spatial information system}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1997-06, Abstract = {Computer users employ a collection of software tools to support their day-to-day work. Often the software environment is dynamic with new tools being added as they become available and removed as they become obsolete or outdated. In today's systems, the burden of coordinating the use of these disparate tools, remembering the correct sequence of commands, and incorporating new and modified programs into the daily work pattern lies with the user. This paper describes a multi-agent system, DALEKS, that assists users in utilizing diverse software tools for their everyday work. It manages work and information flow by providing a coordination layer that selects the appropriate tool(s) to use for each of the user's tasks and automates the flow of information between them. This enables the user to be concerned more with what has to be done, rather than with the specifics of how to access tools and information. Here we describe the system architecture of DALEKS and illustrate it with an example in university course administration.}, Address = {Dunedin, New Zealand}, Author = {Aurora Diaz and Stephen J.S. Cranefield and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:58 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {agent architecture, software interoperability}, Month = {June}, Number = {97/06}, Size = {72 KB}, Title = {Planning and matchmaking in a multi-agent system for software integration}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1993-05, Abstract = {This paper argues that the introduction of western cadastral concepts into communities with different land tenure systems have involved ``cultural costs.'' The paper discusses these cultural costs and concludes that cadastral reformers need to re-design their product to fit the communities.}, Address = {Dunedin, New Zealand}, Author = {I. Chukwudozie Ezigbalike and George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 14:46:52 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {93/5}, Size = {1.3 MB}, Title = {Cadastral ``reform''---{A}t what cost to developing countries?}, Type = {Discussion paper}, Year = {1993}} @techreport{dp1993-03, Abstract = {Semantic data models comprise formally defined abstractions for representing real world relationships and aspects of the structure of real world phenomena so as to aid database design. While previous research in spatial database design has shown that semantic data models are amenable to explicitly representing some spatial concepts, this paper shows that semantic data models may usefully be applied to the design of spatial databases even without explicitly representing spatial concepts. Specifically, an entity-relationship model comprising only ``is-associated-with'' relationships is used as the basis from which to define thematic layers for a layer based spatial database.}, Address = {Dunedin, New Zealand}, Author = {Peter G. Firns}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:59 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {November}, Number = {93/3}, Size = {1.2 MB}, Title = {The derivation of thematic map layers from entity-relationship data models}, Type = {Discussion paper}, Year = {1993}} @techreport{dp1994-14, Abstract = {Semantic data models comprise abstractions used, in conceptual database design, to represent real world relationships and aspects of the structure of real world phenomena. Such abstractions have previously been applied to the modelling of spatial concepts, but in the process their semantics are implicitly extended. This paper explicitly extends the semantics of the entity relationship model, defining two specific types of entity set to enable the notion of a thematic layer to be incorporated in entity relationship schemas. It places this in the context of a conceptual modelling framework to be used in the design of spatially referenced databases.}, Address = {Dunedin, New Zealand}, Author = {Peter G. Firns}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:58:59 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {August}, Number = {94/14}, Size = {1.1 MB}, Title = {A conceptual data modelling framework incorporating the notion of a thematic layer}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1996-21, Abstract = {The development of multimedia information systems must be managed and controlled just as it is for other generic system types. This paper proposes an approach for assessing multimedia component and system characteristics with a view to ultimately using these features to estimate the associated development effort. Given the different nature of multimedia systems, existing metrics do not appear to be entirely useful in this domain; however, some general principles can still be applied in analysis. Some basic assertions concerning the influential characteristics of multimedia systems are made and a small preliminary set of data is evaluated.}, Address = {Dunedin, New Zealand}, Author = {Tim Fletcher and William B.L. Wong and Stephen G. MacDonell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2011-01-24 17:25:34 +1300}, Institution = {Department of Information Science, University of Otago}, Keywords = {multimedia, management, metrics}, Month = {October}, Number = {96/21}, Size = {220 KB}, Title = {Early experiences in measuring multimedia systems development effort}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1994-10, Abstract = {This paper investigates statistical models for the understanding of the behaviour of scrubweeds in Southland and Otago. Data pertaining to eight scrubweed species have been collected along four transects together with the environmental factors, altitude, slope, aspect and land use classification. Each transect is approximately 80km by 2km, with data being held for every 1ha so that there are approximately 16,000 pixels for each transect. It is important to understand the relationship between the species so that interpolation and extrapolation can be performed. The initial survey, completed in 1992, will be repeated in 1995 and 1998. These surveys will then form the baseline for an understanding of the spread or contraction of the species in farmlands of the South Island. This in turn will assist policy makers in formulating management plans which relate eradication to farmland productivity. This paper deals in detail with one of the transects---Balclutha to Katiki Point.}, Address = {Dunedin, New Zealand}, Author = {Liliana Gonzalez and George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:21:15 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {canonical correlation, kriging, log-linear models, logistic regression, spatial correlation, variogram analysis}, Month = {May}, Number = {94/10}, Title = {Stochastic models of the behaviour of scrubweeds in {S}outhland and {O}tago}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1999-20, Abstract = {One problem faced by managers who are using project management models is the elicitation of numerical inputs. Obtaining these with any degree of confidence early in a project is not always feasible. Related to this difficulty is the risk of precisely specified outputs from models leading to overcommitment. These problems can be seen as the collective failure of software measurements to represent the inherent uncertainties in managers' knowledge of the development products, resources, and processes. It is proposed that fuzzy logic techniques can help to overcome some of these difficulties by representing the imprecision in inputs and outputs, as well as providing a more expert-knowledge based approach to model building. The use of fuzzy logic for project management however should not be the same throughout the development life cycle. Different levels of available information and desired precision suggest that it can be used differently depending on the current phase, although a single model can be used for consistency.}, Address = {Dunedin, New Zealand}, Author = {Andrew Gray and Stephen MacDonell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:00 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {September}, Number = {99/20}, Size = {148 KB}, Title = {Fuzzy logic for software metric models throughout the development life-cycle}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-11, Abstract = {Whilst some software measurement research has been unquestionably successful, other research has struggled to enable expected advances in project and process management. Contributing to this lack of advancement has been the incidence of inappropriate or non-optimal application of various model-building procedures. This obviously raises questions over the validity and reliability of any results obtained as well as the conclusions that may have been drawn regarding the appropriateness of the techniques in question. In this paper we investigate the influence of various data set characteristics and the purpose of analysis on the effectiveness of four model-building techniques---three statistical methods and one neural network method. In order to illustrate the impact of data set characteristics, three separate data sets, drawn from the literature, are used in this analysis. In terms of predictive accuracy, it is shown that no one modeling method is best in every case. Some consideration of the characteristics of data sets should therefore occur before analysis begins, so that the most appropriate modeling method is then used. Moreover, issues other than predictive accuracy may have a significant influence on the selection of model-building methods. These issues are also addressed here and a series of guidelines for selecting among and implementing these and other modeling techniques is discussed.}, Address = {Dunedin, New Zealand}, Author = {Andrew Gray and Stephen MacDonell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:00 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {software metrics, analysis, statistical methods, connectionist methods}, Month = {June}, Number = {99/11}, Size = {292 KB}, Title = {Software metrics data analysis---{E}xploring the relative performance of some commonly used modeling techniques}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-16, Abstract = {Software metric-based estimation of project development effort is most often performed by expert judgment rather than by using an empirically derived model (although such may be used by the expert to assist their decision). One question that can be asked about these estimates is how stable are they with respect to characteristics of the development process and product? This stability can be assessed in relation to the degree to which the project has advanced over time, the type of module for which the estimate is being made, and the characteristics of that module. In this paper we examine a set of expert-derived estimates for the effort required to develop a collection of modules from a large health-care system. Statistical tests are used to identify relationships between the type (screen or report) and characteristics of modules and the likelihood of the associated development effort being under-estimated, approximately correct, or over-estimated. Distinct relationships are found that suggest that the estimation process being examined was not unbiased to such characteristics.}, Address = {Dunedin, New Zealand}, Author = {Andrew Gray and Stephen MacDonell and Martin Shepperd}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:00 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {99/16}, Size = {236 KB}, Title = {Factors systematically associated with errors in subjective estimates of software development effort: {T}he stability of expert judgment}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1996-05, Abstract = {The almost exclusive use of regression analysis to derive predictive equations for software development metrics found in papers published before 1990 has recently been complemented by increasing numbers of studies using non-traditional methods, such as neural networks, fuzzy logic models, case-based reasoning systems, rule-based systems, and regression trees. There has also been an increasing level of sophistication in the regression-based techniques used, including robust regression methods, factor analysis, resampling methods, and more effective and efficient validation procedures. This paper examines the implications of using these alternative methods and provides some recommendations as to when they may be appropriate. A comparison between standard linear regression, robust regression, and the alternative techniques is also made in terms of their modelling capabilities with specific reference to software metrics.}, Address = {Dunedin, New Zealand}, Author = {Andrew R. Gray and Stephen G. MacDonell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:00 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {96/05}, Title = {A comparison of alternatives to regression analysis as model building techniques to develop predictive equations for software metrics}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1997-10, Abstract = {Software metrics are measurements of the software development process and product that can be used as variables (both dependent and independent) in models for project management. The most common types of these models are those used for predicting the development effort for a software system based on size, complexity, developer characteristics, and other metrics. Despite the financial benefits from developing accurate and usable models, there are a number of problems that have not been overcome using the traditional techniques of formal and linear regression models. These include the non-linearities and interactions inherent in complex real-world development processes, the lack of stationarity in such processes, over-commitment to precisely specified values, the small quantities of data often available, and the inability to use whatever knowledge is available where exact numerical values are unknown. The use of alternative techniques, especially fuzzy logic, is investigated and some usage recommendations are made.}, Address = {Dunedin, New Zealand}, Author = {Andrew R. Gray and Stephen G. MacDonell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:00 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {July}, Number = {97/10}, Size = {88 KB}, Title = {Applications of fuzzy logic to software metric models for development effort estimation}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1997-14, Abstract = {The number of occurrences and severity of computer-based attacks such as viruses and worms, logic bombs, trojan horses, computer fraud, and plagiarism of code have become of increasing concern. In an attempt to better deal with these problems it is proposed that methods for examining the authorship of computer programs are necessary. This field is referred to here as software forensics. This involves the areas of author discrimination, identification, and characterisation, as well as intent analysis. Borrowing extensively from the existing fields of linguistics and software metrics, this can be seen as a new and exciting area for forensics to extend into.}, Address = {Dunedin, New Zealand}, Author = {Andrew R. Gray and Philip J. Sallis and Stephen G. MacDonell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:01 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {authorship analysis, computer programming, malicious programs, software forensics, software metrics, source code}, Month = {December}, Number = {97/14}, Title = {Software forensics: {E}xtending authorship analysis techniques to computer programs}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1996-25c, Abstract = {Decision support systems, statistics and expert systems were some of the mainstay techniques used for modelling environmental phenomena. Now modelling systems utilise artificial intelligence (AI) techniques for the extra computational analysis they provide. Whilst operating in a toolbox environment and by adopting AI techniques, the geographic information system (GIS) modellers have greater options available for solving problems. This paper outlines a new approach in applying artificial intelligence techniques to solve spatial problems. The approach combines case-based reasoning (CBR) with geographic information systems and allows both techniques to be applied to solve spatial problems. More specifically this paper examines techniques applied to the problem of soil classification. Spatial cases are defined and analysed using the case-based reasoning techniques of retrieve, reuse, revise and retain. Once the structure of cases are defined a case base is compiled. When the case base is of sufficient size, the problem of soil classification is tested using this new approach. The problem is solved by searching the case base for another spatial phenomena similar to that which exists. Then the knowledge from that searched case is used to formulate an answer to the problem. A comparison of the results obtained by this approach and a traditional method of soil classification is then undertaken. This paper also documents the saving data concept in translating from decision trees to CBR. The logistics of the problems that are characteristic of case-based reasoning systems are discussed, for example, how should the spatial domain of an environmental phenomena be best represented in a case base? What are the constraints of CBR, what data are lost, and what functions are gained? Finally, the following question is posed: ``to what real world level can the environment be modelled using GIS and case-based reasoning techniques''?}, Address = {Dunedin, New Zealand}, Author = {Alec Holt}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:35:27 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {96/25c}, Size = {1 MB}, Title = {Incorporating a new computational reasoning approach to spatial modelling}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1995-16, Abstract = {This paper outlines a unique approach applying artificial intelligence techniques to the solving of environmental problems. The approach combines case-based reasoning with spatial information systems, enabling technologies and techniques from each domain to be applied to environmental problems. This paper defines a possible case-based reasoning/spatial information system hybrid that would allow spatial cases to be defined and analysed by both technologies. The example used in this paper involves soil series classification which, using case-based reasoning, is performed according to spatial criteria. Evaluations and spatial criteria are then used to predict properties of new cases based on similar previous spatial cases.}, Address = {Dunedin, New Zealand}, Author = {Alec Holt and George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:01 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {95/16}, Title = {Applying case-based reasoning to spatial phenomena}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1995-09, Abstract = {This paper brings emphasis to the plausible concept of case-based reasoning being integrated with spatial information systems, and the adaptation of artificial intelligence techniques to improve the analytical strength in spatial information systems. This adaptation of artificial intelligence techniques may include examples of expert systems, fuzzy logic, hybrid connectionist systems and neural networks, all integrated with spatial information systems. The unique process of case-based reasoning is described. The research into the possible integration of case-based reasoning and spatial information systems is outlined. The benefits of a case-based reasoning spatial information hybrid system are discussed.}, Address = {Dunedin, New Zealand}, Author = {Alec Holt and George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:01 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {August}, Number = {95/9}, Title = {Case-based reasoning and spatial analysis}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1998-08, Abstract = {This research continues with current innovative geocomputational research trends that aim to provide enhanced spatial analysis tools. The coupling of case-based reasoning (CBR) with GIS provides the focus of this paper. This coupling allows the retrieval, reuse, revision and retention of previous similar spatial cases. CBR is therefore used to develop more complex spatial data modelling methods (by using the CBR modules for improved spatial data manipulation) and provide enhanced exploratory geographical analysis tools (to find and assess certain patterns and relationships that may exist in spatial databases). This paper details the manner in which spatial similarity is assessed, for the purpose of re-using previous spatial cases. The authors consider similarity assessment a useful concept for retrieving and analysing spatial information as it may help researchers describe and explore a certain phenomena, its immediate environment and its relationships to other phenomena. This paper will address the following questions: What makes phenomena similar? What is the definition of similarity? What principles govern similarity? and How can similarity be measured? Generally, phenomena are similar when they share common attributes and circumstances. The degree of similarity depends on the type and number of commonalties they share. Within this research, similarity is examined from a spatial perspective. Spatial similarity is broadly defined by the authors as the spatial matching and ranking according to a specific context and scale. More specifically, similarity is governed by context (function, use, reason, goal, users frame-of mind), scale (coarse or fine level), repository (the application, local domain, site and data specifics), techniques (the available technology for searching, retrieving and recognising data) and measure and ranking systems. The degree of match is the score between a source and a target. In spatial matching a source and a target could be a pixel, region or coverage. The principles that govern spatial similarity are not just the attributes but also the relationships between two phenomena. This is one reason why CBR coupled with a GIS is fortuitous. A GIS is used symbiotically to extract spatial variables that can be used by CBR to determine similar spatial relations between phenomena. These spatial relations are used to assess the similarity between two phenomena (for example proximity and neighborhood analysis). Developing the concept of spatial similarity could assist with analysing spatial databases by developing techniques to match similar areas. This would help maximise the information that could be extracted from spatial databases. From an exploratory perspective, spatial similarity serves as an organising principle by which spatial phenomena are classified, relationships identified and generalisations made from previous bona fide experiences or knowledge. This paper will investigate the spatial similarity concept.}, Address = {Dunedin, New Zealand}, Author = {Alec Holt and Stephen MacDonell and George Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-11 09:58:44 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {98/08}, Size = {456 KB}, Title = {Spatial isomorphism}, Type = {Discussion paper}, Year = {1998}} @techreport{dp1996-09, Abstract = {This paper explores two different methods for improved learning in multimodular fuzzy neural network systems for classification. It demonstrates these methods on a case study of satellite image classification using 3 spectral inputs and 10 coastal vegetation covertype outputs. The classification system is a multimodular one; it has one fuzzy neural network per output. All the fuzzy neural networks are trained in parallel for a small number of iterations. Then, the system performance is tested on new data to determine the types of interclass confusion. Two strategies are developed to improve classification performance. First, the individual modules are additionally trained for a very small number of iterations on a subset of the data to decrease the false positive and the false negative errors. The second strategy is to create new units, `experts', which are individually trained to discriminate only the ambiguous classes. So, if the main system classifies a new input into one of the ambiguous classes, then the new input is passed to the `experts' for final classification. Two learning techniques are presented and applied to both classification performance enhancement strategies; the first one reduces omission, or false negative, error; the second reduces comission, or false positive, error. Considerable improvement is achieved by using these learning techniques and thus, making it feasible to incorporate them into a real adaptive system that improves during operation.}, Address = {Dunedin, New Zealand}, Author = {Steven A. Israel and Nikola K. Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:39:57 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {96/09}, Size = {440 KB}, Title = {Improved learning strategies for multimodular fuzzy neural network systems: {A} case study on image classification}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1998-03, Abstract = {The paper introduces one paradigm of neuro-fuzzy techniques and an approach to building on-line, adaptive intelligent systems. This approach is called evolving connectionist systems (ECOS). ECOS evolve through incremental, on-line learning, both supervised and unsupervised. They can accommodate new input data, including new features, new classes, etc. The ECOS framework is presented and illustrated on a particular type of evolving neural networks---evolving fuzzy neural networks. ECOS are three to six orders of magnitude faster than the multilayer perceptrons, or the fuzzy neural networks, trained with the backpropagation algorithm, or with a genetic programming technique. ECOS belong to the new generation of adaptive intelligent systems. This is illustrated on several real world problems for adaptive, on-line classification, prediction, decision making and control: phoneme-based speech recognition; moving person identification; wastewater flow time-series prediction and control; intelligent agents; financial time series prediction and control. The principles of recurrent ECOS and reinforcement learning are outlined.}, Address = {Dunedin, New Zealand}, Author = {Nikola Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:46:38 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {evolving neuro-fuzzy systems, fuzzy neural networks, on-line adaptive control, on-line decision making, intelligent agents}, Month = {March}, Number = {98/03}, Title = {Looking for a new {AI} paradigm: {E}volving connectionist and fuzzy connectionist systems---{T}heory and applications for adaptive, on-line intelligent systems}, Type = {Discussion paper}, Year = {1998}} @techreport{dp1999-02, Abstract = {The paper introduces evolving connectionist systems (ECOS) as an effective approach to building on-line, adaptive intelligent systems. ECOS evolve through incremental, hybrid (supervised/unsupervised), on-line learning. They can accommodate new input data, including new features, new classes, etc. through local element tuning. New connections and new neurons are created during the operation of the system. The ECOS framework is presented and illustrated on a particular type of evolving neural networks---evolving fuzzy neural networks (EFuNNs). EFuNNs can learn spatial-temporal sequences in an adaptive way, through one pass learning. Rules can be inserted and extracted at any time of the system operation. The characteristics of ECOS and EFuNNs are illustrated on several case studies that include: adaptive pattern classification; adaptive, phoneme-based spoken language recognition; adaptive dynamic time-series prediction; intelligent agents.}, Address = {Dunedin, New Zealand}, Author = {Nikola Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:02 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {evolving connectionist systems, evolving fuzzy neural networks, on-line learning, spatial-temporal adaptation, adaptive speech recognition}, Month = {March}, Number = {99/02}, Size = {944 KB}, Title = {Evolving connectionist systems for on-line, knowledge-based learning: {P}rinciples and applications}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-08, Abstract = {The chapter presents a new methodology for building adaptive, incremental learning systems for image pattern classification. The systems are based on dynamically evolving fuzzy neural networks that are neural architectures to realise connectionist learning, fuzzy logic inference, and case-based reasoning. The methodology and the architecture are applied on two sets of real data---one of satellite image data, and the other of fruit image data. The proposed method and architecture encourage fast learning, life-long learning and on-line learning when the system operates in a changing environment of image data.}, Address = {Dunedin, New Zealand}, Author = {Nikola Kasabov and Steven Israel and Brendon Woodford}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:02 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {image classification, evolving fuzzy neural networks, case-based reasoning}, Month = {May}, Number = {99/08}, Size = {1.3 MB}, Title = {Adaptive, evolving, hybrid connectionist systems for image pattern recognition}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1994-12, Abstract = {A new type of generalised fuzzy rule and generalised fuzzy production system and a corresponding reasoning method are developed. They are implemented in a connectionist architecture and are called connectionist fuzzy production systems. They combine all the features of symbolic AI production systems, fuzzy production systems and connectionist systems. A connectionist method for learning generalised fuzzy productions from raw data is also presented. The main conclusion reached is that connectionist fuzzy production systems are very powerful as fuzzy reasoning machines and they may well inspire new methods of plausible representation of inexact knowledge and new inference techniques for approximate reasoning.}, Address = {Dunedin, New Zealand}, Author = {Nikola K. Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:03 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {94/12}, Title = {Connectionist fuzzy production systems}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1994-17, Abstract = {The paper presents the major principles of building complex hybrid systems for knowledge engineering where at the centre of the design process is the task of learning (extracting) fuzzy rules from data. An experimental environment FuzzyCOPE, which facilitates this process, is described. It consists of a fuzzy rules extraction module, a neural networks module, module fuzzy inference methods and a production rules module. Such an environment makes possible the use of the three paradigms, i.e. fuzzy rules, neural networks and symbolic production rules, in one system. Automatic rules extraction from data and choosing the most appropriate reasoning mechanism is also provided. Using FuzzyCOPE for building hybrid systems for decision making and speech recognition is discussed and illustrated.}, Address = {Dunedin, New Zealand}, Author = {Nikola K. Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:03 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {November}, Number = {94/17}, Title = {Hybrid fuzzy connectionist rule-based systems and the role of fuzzy rules extraction}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1994-11, Abstract = {The paper presents a novel approach towards solving different speech recognition tasks, i.e. phoneme recognition, ambiguous words recognition, continuous speech to text conversion, learning fuzzy rules for language processing. The model uses a standard connectionist system for initial recognition and a connectionist rule-based system for a higher level recognition. The higher level is realised as a Connectionist Fuzzy Production System (CFPS) which makes possible introducing different parameters to the higher level production rules, like: degrees of importance, dynamic sensitivity factors, noise tolerance factors, certainty degrees and reactiveness factors. It provides different approximate chain reasoning techniques. The CFPS helps to solve many of the ambiguities in speech recognition tasks. Experiments on phoneme recognition in the English language are reported. This approach facilitates a connectionist implementation of the whole process of speech recognition (at a low level and at a higher logical level) which used to be performed in hybrid environments. It also facilitates the process of learning fuzzy rules for language processing. All the language processing tasks and subtasks are realised in a homogeneous connectionist environment. This brings all the benefits of connectionist systems to practical applications in the speech recognition area.}, Address = {Dunedin, New Zealand}, Author = {Nikola K. Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:03 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {94/11}, Title = {Towards using hybrid connectionist fuzzy production systems for speech recognition}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1996-23, Abstract = {Fuzzy neural networks have several features that make them well suited to a wide range of knowledge engineering applications. These strengths include fast and accurate learning, good generalisation capabilities, excellent explanation facilities in the form of semantically meaningful fuzzy rules, and the ability to accommodate both data and existing expert knowledge about the problem under consideration. This paper investigates adaptive learning, rule extraction and insertion, and neural/fuzzy reasoning for a particular model of a fuzzy neural network called FuNN. As well as providing for representing a fuzzy system with an adaptable neural architecture, FuNN also incorporates a genetic algorithm in one of its adaptation modes. A version of FuNN---FuNN/2, which employs triangular membership functions and correspondingly modified learning and adaptation algorithms, is also presented in the paper.}, Address = {Dunedin, New Zealand}, Author = {Nikola K. Kasabov and Jaesoo Kim and Michael J. Watts and Andrew R. Gray}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:42:44 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {96/23}, Title = {{FuNN}/2---{A} fuzzy neural network architecture for adaptive learning and knowledge acquisition}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1994-05, Abstract = {A Content-Addressable Model of Production Systems, `CAMPUS', has been developed. The main idea is to achieve high execution performance in production systems by exploiting the potential fine-grain data parallelism. The facts and the rules of a production system are uniformly represented as CAM tables. CAMPUS differs from other CAM-inspired models in that it is based on a non-state saving and `lazy' matching algorithm. The production system execution cycle is represented by a small number of associative search operations over the CAM tables which number does not depend, or depends slightly, on the number of the rules and the number of the facts in the production system. The model makes efficient implementation of large production systems on fast CAM possible. An experimental CAMPUS realisation of the production language CLIPS is also reported. The production systems execution time for large number of processed facts is about 1,000 times less than the corresponding CLIPS execution time on a standard computer architecture.}, Address = {Dunedin, New Zealand}, Author = {Nikola K. Kasabov and S.H. Lavington and S. Lin and C. Wang}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:20:43 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {associative matching, content-addressable memory (CAM), production systems}, Month = {February}, Number = {94/5}, Size = {301 KB}, Title = {A model for exploiting associative matching in {AI} production systems}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1994-04, Abstract = {Neural networks (NN) have been intensively used for speech processing. This paper describes a series of experiments on using a single Kohonen Self Organizing Map (KSOM), hierarchically organised KSOM, a backpropagation-type neural network with fuzzy inputs and outputs, and a fuzzy system, for continuous speech recognition. Experiments with different non-linear transformations on the signal before using a KSOM have been done. The results obtained by using different techniques on the case study of phonemes in Bulgarian language are compared.}, Address = {Dunedin, New Zealand}, Author = {Nikola K. Kasabov and E. Peev}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:04 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {February}, Number = {94/4}, Size = {258 KB}, Title = {Phoneme recognition with hierarchical self organised neural networks and fuzzy systems}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1996-03, Abstract = {General Characteristics of the Theme * Emerging technology with rapidly growing practical applications * Nationally and internationally recognised leadership of the University of Otago * Already established organisation for research and working teams * Growing number of postgraduate students working on the theme * Growing number of research projects in this area * Growing number of publications by members of the team}, Address = {Dunedin, New Zealand}, Author = {Nikola K. Kasabov and Martin K. Purvis and Philip J. Sallis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:39:27 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {February}, Number = {96/03}, Size = {180 KB}, Title = {Connectionist-based information systems: {A} proposed research theme}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1996-08, Abstract = {This paper proposes neuro-fuzzy engineering as a novel approach to spatial data analysis and for building decision making systems based on spatial information processing, and the development of this approach by the authors is presented in this paper. It has been implemented as a software environment and is illustrated on a case study problem.}, Address = {Dunedin, New Zealand}, Author = {Nikola K. Kasabov and Martin K. Purvis and Feng Zhang and George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:39:49 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {96/08}, Size = {376 KB}, Title = {Neuro-fuzzy engineering for spatial information processing}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1994-18, Abstract = {The paper presents a framework of an integrated environment for speech recognition and a methodology of using such environment. The integrated environment includes a signal processing unit, neural networks and fuzzy rule-based systems. Neural networks are used for ``blind'' pattern recognition of the phonemic labels of the segments of the speech. Fuzzy rules are used for reducing the ambiguities of the correctly recognised phonemic labels, for final recognition of the phonemes, and for language understanding. The fuzzy system part is organised as multi-level, hierarchical structure. As an illustration, a model for phoneme recognition of New Zealand English is developed which exploits the advantages of the integrated environment. The model is illustrated on a small set of phonemes.}, Address = {Dunedin, New Zealand}, Author = {Nikola K. Kasabov and Catherine I. Watson and Stephen Sinclair and Richard Kilgour}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:04 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {November}, Number = {94/18}, Title = {Integrating neural networks and fuzzy systems for speech recognition}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1999-07, Abstract = {This paper discusses the problem of adaptation in automatic speech recognition systems (ASRS) and suggests several strategies for adaptation in a modular architecture for speech recognition. The architecture allows for adaptation at different levels of the recognition process, where modules can be adapted individually based on their performance and the performance of the whole system. Two realisations of this architecture are presented along with experimental results from small-scale experiments. The first realisation is a hybrid system for speaker-independent phoneme-based spoken word recognition, consisting of neural networks for recognising English phonemes and fuzzy systems for modelling acoustic and linguistic knowledge. This system is adjustable by additional training of individual neural network modules and tuning the fuzzy systems. The increased accuracy of the recognition through appropriate adjustment is also discussed. The second realisation of the architecture is a connectionist system that uses fuzzy neural networks FuNNs to accommodate both a prior linguistic knowledge and data from a speech corpus. A method for on-line adaptation of FuNNs is also presented.}, Address = {Dunedin, New Zealand}, Author = {Nikola Kasabov and Richard Kilgour and Stephen Sinclair}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:05 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {pattern recognition, artificial intelligence, neural networks, speech recognition}, Month = {April}, Number = {99/07}, Title = {From hybrid adjustable neuro-fuzzy systems to adaptive connectionist-based systems for phoneme and word recognition}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-04, Abstract = {The paper introduces a new type of evolving fuzzy neural networks (EFuNNs), denoted as mEFuNNs, for on-line learning and their applications for dynamic time series analysis and prediction. mEFuNNs evolve through incremental, hybrid (supervised/unsupervised), on-line learning, like the EFuNNs. They can accommodate new input data, including new features, new classes, etc. through local element tuning. New connections and new neurons are created during the operation of the system. At each time moment the output vector of a mEFuNN is calculated based on the m-most activated rule nodes. Two approaches are proposed: (1) using weighted fuzzy rules of Zadeh-Mamdani type; (2) using Takagi-Sugeno fuzzy rules that utilise dynamically changing and adapting values for the inference parameters. It is proved that the mEFuNNs can effectively learn complex temporal sequences in an adaptive way and outperform EFuNNs, ANFIS and other neural network and hybrid models. Rules can be inserted, extracted and adjusted continuously during the operation of the system. The characteristics of the mEFuNNs are illustrated on two bench-mark dynamic time series data, as well as on two real case studies for on-line adaptive control and decision making. Aggregation of rule nodes in evolved mEFuNNs can be achieved through fuzzy C-means clustering algorithm which is also illustrated on the bench mark data sets. The regularly trained and aggregated in an on-line, self-organised mode mEFuNNs perform as well, or better, than the mEFuNNs that use fuzzy C-means clustering algorithm for off-line rule node generation on the same data set.}, Address = {Dunedin, New Zealand}, Author = {Nikola Kasabov and Qun Song}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:05 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {dynamic evolving fuzzy neural networks, on-line learning, adaptive control, dynamic time series prediction, fuzzy clustering}, Month = {March}, Number = {99/04}, Size = {2 MB}, Title = {Dynamic evolving fuzzy neural networks with `m-out-of-n' activation nodes for on-line adaptive systems}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-03, Abstract = {The paper is a study on a new class of spatial-temporal evolving fuzzy neural network systems (EFuNNs) for on-line adaptive learning, and their applications for adaptive phoneme recognition. The systems evolve through incremental, hybrid (supervised / unsupervised) learning. They accommodate new input data, including new features, new classes, etc. through local element tuning. Both feature-based similarities and temporal dependencies, that are present in the input data, are learned and stored in the connections, and adjusted over time. This is an important requirement for the task of adaptive, speaker independent spoken language recognition, where new pronunciations and new accents need to be learned in an on-line, adaptive mode. Experiments with EFuNNs, and also with multi-layer perceptrons, and fuzzy neural networks (FuNNs), conducted on the whole set of New Zealand English phonemes, show the superiority and the potential of EFuNNs when used for the task. Spatial allocation of nodes and their aggregation in EFuNNs allow for similarity preserving and similarity observation within one phoneme data and across phonemes, while subtle temporal variations within one phoneme data can be learned and adjusted through temporal feedback connections. The experimental results support the claim that spatial-temporal organisation in EFuNNs can lead to a significant improvement in the recognition rate especially for the diphthong and the vowel phonemes in English, which in many cases are problematic for a system to learn and adjust in an adaptive way.}, Address = {Dunedin, New Zealand}, Author = {Nikola Kasabov and Michael Watts}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:05 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {99/03}, Size = {560 KB}, Title = {Spatial-temporal adaptation in evolving fuzzy neural networks for on-line adaptive phoneme recognition}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-19, Abstract = {In an introductory course in information technology at the University of Otago the acquisition of practical skills is considered to be a prime objective. An effective way of assessing the achievement of this objective is by means of a `practical test', in which students are required to accomplish simple tasks in a controlled environment. The assessment of such work demands a high level of expertise, is very labour intensive and can suffer from marker inconsistency, particularly with large candidatures. This paper describes the results of a trial in which the efforts of one thousand students in a practical test of word processing were scored by means of a program written in MediaTalk. Details of the procedure are given, including sampling strategies for the purpose of validation and examples of problems that were encountered. It was concluded that the approach was useful, and once properly validated gave rise to considerable savings in the time and effort.}, Address = {Dunedin, New Zealand}, Author = {Geoffrey Kennedy}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:05 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {computer-aided learning, automated scoring, computer education, test validation}, Month = {September}, Number = {99/19}, Size = {216 KB}, Title = {Automated scoring of practical tests in an introductory course in information technology}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-05, Abstract = {In this paper, an adaptive neuro-fuzzy system, called HyFIS, is proposed to build and optimise fuzzy models. The proposed model introduces the learning power of neural networks into the fuzzy logic systems and provides linguistic meaning to the connectionist architectures. Heuristic fuzzy logic rules and input-output fuzzy membership functions can be optimally tuned from training examples by a hybrid learning scheme composed of two phases: the phase of rule generation from data, and the phase of rule tuning by using the error backpropagation learning scheme for a neural fuzzy system. In order to illustrate the performance and applicability of the proposed neuro-fuzzy hybrid model, extensive simulation studies of nonlinear complex dynamics are carried out. The proposed method can be applied to on-line incremental adaptive leaning for the purpose of prediction and control of non-linear dynamical systems.}, Address = {Dunedin, New Zealand}, Author = {Jaesoo Kim and Nikola Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:05 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {neuro-fuzzy systems, neural networks, fuzzy logic, parameter and structure learning, knowledge acquisition, adaptation, time series}, Month = {March}, Number = {99/05}, Title = {Hybrid neuro-fuzzy inference systems and their application for on-line adaptive learning of nonlinear dynamical systems}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1998-04, Abstract = {Variation in fruit maturation can influence harvest timing and duration, post-harvest fruit attributes and consumer acceptability. Present methods of managing and identifying lines of fruit with specific attributes both in commercial fruit production systems and breeding programs are limited by a lack of suitable tools to characterise fruit attributes at different stages of development in order to predict fruit behaviour at harvest, during storage or in relation to consumer acceptance. With visible-near infrared (VNIR) reflectance spectroscopy a vast array of analytical information is collected rapidly with a minimum of sample pre-treatment. VNIR spectra contain information about the amount and the composition of constituents within fruit. This information can be obtained from intact fruit at different stage of development. Spectroscopic data is processed using chemometrics techniques such as principal component analysis (PCA), discriminant analysis and/or connectionist approaches in order to extract qualitative and quantitative information for classification and predictive purposes. In this paper, we will illustrate the effectiveness of a model, connectionist and hybrid approaches, for fruit quality classification problems.}, Address = {Dunedin, New Zealand}, Author = {Jaesoo Kim and Nikola Kasabov and A. Mowat and P. Poole}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:06 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {98/04}, Title = {Connectionist methods for classification of fruit populations based on visible-near infrared spectrophotometry data}, Type = {Discussion paper}, Year = {1998}} @techreport{dp1998-05, Abstract = {Biological processes are among the most challenging to predict and control. It has been recognised that the development of an intelligent system for the recognition, prediction and control of process states in a complex, nonlinear biological process control is difficult. Such unpredictable system behaviour requires an advanced, intelligent control system which learns from observations of the process dynamics and takes appropriate control action to avoid collapse of the biological culture. In the present study, a hybrid system called fuzzy neural network is considered, where the role of the fuzzy neural network is to estimate the correct feed demand as a function of the process responses. The feed material is an organic and/or inorganic mixture of chemical compounds for the bacteria to grow on. Small amounts of the feed sources must be added and the response of the bacteria must be measured. This is no easy task because the process sensors used are non-specific and their response would vary during the developmental stages of the process. This hybrid control strategy retains the advantages of both neural networks and fuzzy control. These strengths include fast and accurate learning, good generalisation capabilities, excellent explanation facilities in the form of semantically meaningful fuzzy rules, and the ability to accommodate both numerical data and existing expert knowledge about the problem under consideration. The application to the estimation and prediction of the correct feed demand shows the power of this strategy as compared with conventional fuzzy control.}, Address = {Dunedin, New Zealand}, Author = {Jaesoo Kim and Robert Kozma and Nikola Kasabov and B. Gols and M. Geerink and T. Cohen}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:06 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {fuzzy neural networks, hybrid learning, knowledge extraction and insertion, estimation, biological process and control, bacterial system, total organic carbon (TOC)}, Month = {March}, Number = {98/05}, Title = {A fuzzy neural network model for the estimation of the feeding rate to an anaerobic waste water treatment process}, Type = {Discussion paper}, Year = {1998}} @techreport{dp1999-14, Abstract = {For some years software engineers have been attempting to develop useful prediction systems to estimate such attributes as the effort to develop a piece of software and the likely number of defects. Typically, prediction systems are proposed and then subjected to empirical evaluation. Claims are then made with regard to the quality of the prediction systems. A wide variety of prediction quality indicators have been suggested in the literature. Unfortunately, we believe that a somewhat confusing state of affairs prevails and that this impedes research progress. This paper aims to provide the research community with a better understanding of the meaning of, and relationship between, these indicators. We critically review twelve different approaches by considering them as descriptors of the residual variable. We demonstrate that the two most popular indicators MMRE and pred(25) are in fact indicators of the spread and shape respectively of prediction accuracy where prediction accuracy is the ratio of estimate to actual (or actual to estimate). Next we highlight the impact of the choice of indicator by comparing three prediction systems derived using four different simulated datasets. We demonstrate that the results of such a comparison depend upon the choice of indicator, the analysis technique, and the nature of the dataset used to derive the predictive model. We conclude that prediction systems cannot be characterised by a single summary statistic. We suggest that we need indicators of the central tendency and spread of accuracy as well as indicators of shape and bias. For this reason, boxplots of relative error or residuals are useful alternatives to simple summary metrics.}, Address = {Dunedin, New Zealand}, Author = {Barbara Kitchenham and Stephen MacDonell and Lesley Pickard and Martin Shepperd}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:06 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {prediction systems, estimation, empirical analysis, metrics, goodness-of-fit statistics}, Month = {June}, Number = {99/14}, Size = {304 KB}, Title = {Assessing prediction systems}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1996-20, Abstract = {A novel connectionist architecture based on an optical thin-film multilayer model (OTFM) is described. The architecture is explored as an alternative to the widely used neuron-inspired models, with the thin-film thicknesses serving as adjustable `weights' for the computation. The use of genetic algorithms for training the thin-film model, along with experimental results on the parity problem and the iris data classification are presented.}, Address = {Dunedin, New Zealand}, Author = {Xiaodong Li and Martin K. Purvis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:40:45 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {October}, Number = {96/20}, Size = {448 KB}, Title = {Using genetic algorithms for an optical thin-film learning model}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1996-25d, Abstract = {How should geographic information systems be developed? There is a strong demand from users for enhanced functionality and power. Vendors can and do respond to these demands. But where will this lead? Will the result be one all-embracing and all-conquering program or geographic information system (GIS)? A GIS could grow to incorporate all statistical functions, all visualisation techniques, all data management functions etc. It is possible to perceive a scenario in which GIS is developed to `bloatware' proportions. An alternative scenario is one in which a GIS is interfaced with other software systems. Embedding database bridges and other product-specific links, providing data import and export routines, and system calls are all ways of interfacing GIS with other systems. GIS vendors could opt to produce a `linkware' GIS, interfaced to as many third party systems as possible. Given these two alternatives to GIS development, an interesting set of questions arises. How far do vendors go with enhancing their systems compared with interfacing with third party systems? Is there a balance? Or do GIS users just keep calling for `more', regardless of the solution set? There is a balance. GIS is likely to be developed by being enhanced AND by being interfaced with third party software. In a way, this is a third developmental track leading to an increasingly functional GIS whose ability to interact with other systems is greatly improved. This interoperable GIS allows flexible combinations of systems components while still providing a comprehensive range of spatial operations and analytical functions. Of these three developmental tracks, this paper presents an example of what can be achieved with the interoperable GIS. Expert systems are introduced along with the client/server and object-oriented paradigms. By using these paradigms, a generic, spatial, rule-based toolbox called SES (spatial expert shell) has been created. SES is described using examples and contrasted with other documented expert system-GIS linkages. But first integration is modelled in three dimensions to highlight the need for improvements in how GISs can interact with other systems.}, Address = {Dunedin, New Zealand}, Author = {Linda Lilburne and George Benwell and Roz Buick}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:36:38 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {96/25d}, Size = {564 KB}, Title = {{GIS}, expert systems and interoperability}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1999-15, Abstract = {This paper describes ongoing research directed at formulating a set of appropriate measures for assessing and ultimately predicting effort requirements for multimedia systems development. Whilst significant advances have been made in the determination of measures for both transaction-based and process-intensive systems, very little work has been undertaken in relation to measures for multimedia systems. A small preliminary empirical study is reviewed as a precursor to a more exploratory investigation of the factors that are considered by industry to be influential in determining development effort. This work incorporates the development and use of a goal-based framework to assist the measure selection process from a literature basis, followed by an industry questionnaire. The results provide a number of preliminary but nevertheless useful insights into contemporary project management practices with respect to multimedia systems.}, Address = {Dunedin, New Zealand}, Author = {Stephen MacDonell and Tim Fletcher}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:07 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {99/15}, Size = {228 KB}, Title = {Industry practices in project management for multimedia information systems}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1993-04, Abstract = {This paper reports the results of a recent national survey which considered the use of CASE tools and 4GLs in commercial software development. Responses from just over 750 organisations show a high degree of product penetration, along with extensive use of package solutions. Use of 3GLs in general, and of COBOL in particular, is still relatively widespread, however. In terms of systems analysis and design techniques under a CASE/4GL environment, screen and report definition is the most preferred technique, although both dataflow analysis and data modelling also feature strongly.}, Address = {Dunedin, New Zealand}, Author = {Stephen G. MacDonell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2011-01-19 14:30:30 +1300}, Institution = {Department of Information Science, University of Otago}, Month = {November}, Number = {93/4}, Size = {225 KB}, Title = {Software development, {CASE} tools and {4GLs}---{A} survey of {N}ew {Z}ealand usage. Part 1: 750 New Zealand organisations}, Type = {Discussion paper}, Year = {1993}} @techreport{dp1994-08, Abstract = {Budgetary constraints are placing increasing pressure on project managers to effectively estimate development effort requirements at the earliest opportunity. With the rising impact of automation on commercial software development the attention of researchers developing effort estimation models has recently been focused on functional representations of systems, in response to the assertion that development effort is a function of specification content. A number of such models exist---several, however, have received almost no research or industry attention. Project managers wishing to implement a functional assessment and estimation programme are therefore unlikely to be aware of the various methods or how they compare. This paper therefore attempts to provide this information, as well as forming a basis for the development and improvement of new methods.}, Address = {Dunedin, New Zealand}, Author = {Stephen G. MacDonell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:07 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {94/8}, Size = {259 KB}, Title = {A comparative review of functional complexity assessment methods for effort estimation}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1995-05, Abstract = {Advances in software process technology have rendered many existing methods of size assessment and effort estimation inapplicable. The use of automation in the software process, however, provides an opportunity for the development of more appropriate software size-based effort estimation models. A specification-based size assessment method has therefore been developed and tested in relation to process effort on a preliminary set of systems. The results of the analysis confirm the assertion that, within the automated environment class, specification size indicators (that may be automatically and objectively derived) are strongly related to process effort requirements.}, Address = {Dunedin, New Zealand}, Author = {Stephen G. MacDonell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:16:17 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {CASE, process effort, software metrics}, Month = {July}, Number = {95/5}, Size = {264 KB}, Title = {Establishing relationships between specification size and software process effort in {CASE} environments}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1996-06, Abstract = {The controlled management of software processes, an area of ongoing research in the business systems domain, is equally important in the development of geographical information systems (GIS). Appropriate software processes must be defined, used and managed in order to ensure that, as much as possible, systems are developed to quality standards on time and within budget. However, specific characteristics of geographical information systems, in terms of their inherent need for graphical output, render some process management tools and techniques less appropriate. This paper examines process management activities that are applicable to GIS, and suggests that it may be possible to extend such developments into the visual programming domain. A case study concerned with development effort estimation is presented as a precursor to a discussion of the implications of system requirements for significant graphical output.}, Address = {Dunedin, New Zealand}, Author = {Stephen G. MacDonell and George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:39:36 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {96/06}, Size = {180 KB}, Title = {Process management for geographical information system development}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1996-17, Abstract = {The use of `standard' regression analysis to derive predictive equations for software development has recently been complemented by increasing numbers of analyses using less common methods, such as neural networks, fuzzy logic models, and regression trees. This paper considers the implications of using these methods and provides some recommendations as to when they may be appropriate. A comparison of techniques is also made in terms of their modelling capabilities with specific reference to function point analysis.}, Address = {Dunedin, New Zealand}, Author = {Stephen G. MacDonell and Andrew R. Gray}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:40:20 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {September}, Number = {96/17}, Size = {232 KB}, Title = {Alternatives to regression models for estimating software projects}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1996-19, Abstract = {This paper brings together a set of commonsense recommendations relating to the delivery of software quality, with some emphasis on the adoption of realistic perspectives for software process/product stakeholders in the area of process improvement. The use of software measurement is regarded as an essential component for a quality development program, in terms of prediction, control, and adaptation as well as the communication necessary for stakeholders' realistic perspectives. Some recipes for failure are briefly considered so as to enable some degree of contrast between what is currently perceived to be good and bad practices. This is followed by an evaluation of the quality-at-all-costs model, including a brief pragmatic investigation of quality in other, more mature, disciplines. Several programs that claim to assist in the pursuit of quality are examined, with some suggestions made as to how they may best be used in practice.}, Address = {Dunedin, New Zealand}, Author = {MacDonell, Stephen G. and Andrew R. Gray}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:40:37 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {September}, Number = {96/19}, Size = {240 KB}, Title = {Software process engineering for measurement-driven software quality programs---{R}ealism and idealism}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1996-15, Abstract = {There is comparatively little work, other than function points, that tackles the problem of building prediction systems for software that is dominated by data considerations, in particular systems developed using 4GLs. We describe an empirical investigation of 70 such systems. Various easily obtainable counts were extracted from data models (e.g. number of entities) and from specifications (e.g. number of screens). Using simple regression analysis, prediction systems of implementation size with accuracy of MMRE=21% were constructed. Our work shows that it is possible to develop simple and effective prediction systems based upon metrics easily derived from functional specifications and data models.}, Address = {Dunedin, New Zealand}, Author = {Stephen G. MacDonell and Martin J. Shepperd and Philip J. Sallis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:40:12 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {metrics, entity-relationship models, 4GL, empirical, prediction}, Month = {August}, Number = {96/15}, Size = {200 KB}, Title = {Measurement of database systems: {A}n empirical study}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1999-13, Abstract = {There has been increasing interest in recent times for using fuzzy logic techniques to represent software metric models, especially those predicting development effort. The use of fuzzy logic for this application area offers several advantages when compared to other commonly used techniques. These include the use of a single model with different levels of precision for inputs and outputs used throughout the development life cycle, the possibility of model development with little or no data, and its effectiveness when used as a communication tool. The use of fuzzy logic in any applied field however requires that suitable tools are available for both practitioners and researchers---satisfying both interface and functionality related requirements. After outlining some of the specific needs of the software metrics community, including results from a survey of software developers on this topic, the paper describes the use of a set of tools called FULSOME (Fuzzy Logic for Software Metrics). The development of a simple fuzzy logic system by a software metrician and subsequent tuning are then discussed using a real-world set of software metric data. The automatically generated fuzzy model performs acceptably when compared to regression-based models.}, Address = {Dunedin, New Zealand}, Author = {Stephen MacDonell and Andrew Gray and James Calvert}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:09 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {99/13}, Size = {236 KB}, Title = {{FULSOME}: {F}uzzy logic for software metric practitioners and researchers}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-12, Abstract = {Software forensics is a research field that, by treating pieces of program source code as linguistically and stylistically analyzable entities, attempts to investigate aspects of computer program authorship. This can be performed with the goal of identification, discrimination, or characterization of authors. In this paper we extract a set of 26 standard authorship metrics from 351 programs by 7 different authors. The use of feed-forward neural networks, multiple discriminant analysis, and case-based reasoning is then investigated in terms of classification accuracy for the authors on both training and testing samples. The first two techniques produce remarkably similar results, with the best results coming from the case-based reasoning models. All techniques have high prediction accuracy rates, supporting the feasibility of the task of discriminating program authors based on source-code measurements.}, Address = {Dunedin, New Zealand}, Author = {Stephen MacDonell and Andrew Gray and Grant MacLennan and Philip Sallis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:09 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {99/12}, Size = {148 KB}, Title = {Software forensics for discriminating between program authors using case-based reasoning, feed-forward neural networks and multiple discriminant analysis}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1996-25e, Abstract = {This paper first describes the difficulties inherent in supporting a class of environmental problems, those involved in Regional Environmental Decision Making. A set of conceptual criteria are presented along with discussion on how the criteria might be approached. It is shown that a major obstacle is the need for a system that integrates components of Geographic Information Systems with process modelling functions. A new approach, Spatial Process Modelling is proposed. More detailed design criteria for this system are developed which are then used to develop a prototype system. The system is described and benefits and limitations discussed.}, Address = {Dunedin, New Zealand}, Author = {Samuel Mann}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:37:52 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {96/25e}, Size = {348 KB}, Title = {Environmental decisions with spatial process modelling}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1995-12, Abstract = {Resource management in New Zealand is fraught with debate and controversy. Regional Councils often seem stuck in the middle of two opposing groups, the farmers and the environmentalists. There are areas, however, where the Regional Councils could be seen to be hindering progress towards resolution of problems. By avoiding policy formulations of certain issues eg: vegetation burning, Councils are creating difficulties for their own staff, landholders and environmental groups. This paper examines one debate that could be greatly simplified by a few policy direction decisions.}, Address = {Dunedin, New Zealand}, Author = {Samuel A. Mann}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:09 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {September}, Number = {95/12}, Title = {A case study in environmental decision making}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1997-02, Abstract = {Analyses of landscape structure are used to test the hypothesis that remotely sensed images can be used as indicators of ecosystem conservation status. Vegetation types based on a classified SPOT satellite image were used in a comparison of paired, reserve (conservation area) and adjacent more human modified areas (controls). Ten reserves (average size 965 ha) were selected from upland tussock grasslands in Otago, New Zealand. While there were equal numbers of vegetation types and the size and shape distribution of patches within the overall landscapes were not significantly different, there was less of `target' vegetation in controls. This was in smaller patches and fewer of these patches contained `core areas'. These control `target' patches were also less complex in shape than those in the adjacent reserves. These measures showed that remotely sensed images can be used to derive large scale indicators of landscape conservation status. An index is proposed for assessing landscape change and conservation management issues are raised.}, Address = {Dunedin, New Zealand}, Author = {Samuel Mann and George L. Benwell and William G. Lee}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2011-01-17 14:34:32 +1300}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {97/02}, Size = {108 KB}, Title = {Landscape structure and ecosystem conservation: {A}n assessment using remote sensing}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1995-13, Abstract = {This paper draws together existing data with recent survey results and compares the development of local government GIS with the evolution of Information Systems (IS). These comparisons are made using the philosophy that organisational GIS can be modelled. Using this model, various stages of GIS maturity are evaluated.}, Address = {Dunedin, New Zealand}, Author = {Andrew J. Marr and George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:16:57 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {September}, Number = {95/13}, Size = {268 KB}, Title = {Local government {GIS} in {N}ew {Z}ealand since 1989}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1996-25f, Abstract = {This paper discusses the concept of maturity in the use of GIS and then formulates a computational method for measuring an organisations maturity level from the construction of a surrogate indicator. Generation of this model is made under the proposition that maturity is linked to the level that GIS has been integrated and utilised on an organisation wide basis in day to day activities. The research focuses on New Zealand local government and incorporates parallel studies of conventional information technology (IT) with recently collected data to provide support for the concepts and techniques used. It is postulated that due to similarities of function found in other local authorities, that the model has the potential, with further research for wide application.}, Address = {Dunedin, New Zealand}, Author = {Andrew J. Marr and George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:36:04 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {96/25f}, Size = {500 MB}, Title = {{GIS} maturity and integration}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1998-09, Abstract = {In this paper is proposed a structure for the development of a generic graphical system for modelling spatial processes (SMSP). This system seeks to integrate the spatial data handling operations of a GIS with specialist numerical modelling functionality, by the description of the processes involved. A conceptual framework is described, the foundation of which are six defined modules (or services) that are considered a minimum requirement for basic system operation. The services are identified following description of the three key components to systems integration, and the examination of the preferred integrating structure. The relationship of the integration components to sample commentary on the future requirements of integration is discussed, and the benefits and deficiencies of an implemented system for modelling spatial processes are noted.}, Address = {Dunedin, New Zealand}, Author = {Andrew Marr and Richard Pascoe and George Benwell and Samuel Mann}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-11 09:58:36 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {98/09}, Size = {644 KB}, Title = {Development of a generic system for modelling spatial processes}, Type = {Discussion paper}, Year = {1998}} @techreport{dp1996-25g, Abstract = {This paper describes the use of a prototype spatial information system to facilitate exploratory analyses of 60 years of scientific observation data concerning a breeding population of royal albatrosses at Taiaroa Head, on the east coast of New Zealand's South Island. This system shall form the basis on an on-going data collection, management and analysis effort. Incorporation of breeding records with spatial and landscape data permits the investigation of spatial interactions between the location of nest sites and other phenomena. Three example analyses that explore these interactions are described and discussed.}, Address = {Dunedin, New Zealand}, Author = {B.R. McLennan and Martin K. Purvis and C.J.R. Robertson}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 17:10:34 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Note = {(Not in electronic version.)}, Number = {96/25g}, Title = {Wildlife population analysis with {GIS}: {C}onservation management of royal albatross}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1996-13, Abstract = {This paper describes a tool being developed to allow users to visualise the ripening characteristics of fruit. These characteristics, such as sugar, acid and moisture content, can be measured using non-destructive Near Infrared Reflectance (NIR) analysis techniques. The four dimensional nature of the NIR data introduces some interesting visualisation problems. The display device only provides two dimensions, making it necessary to design two dimensional methods for representing the data. In order to help the user fully understand the dataset, a graphical display system is created with an interface that provides flexible visualisation tools.}, Address = {Dunedin, New Zealand}, Author = {Hayden Munro and Kevin Novins and George L. Benwell and Alistair Moffat}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:41:35 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {NIR spectroscopy, Polhemus FasTrak{\texttrademark}, interaction, interactive graphics, interfaces, visualisation, scientific visualisation}, Month = {July}, Number = {96/13}, Title = {Interactive visualisation tools for analysing {NIR} data}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1997-03, Abstract = {Multimedia technology allows a variety of the presentation formats to portray instructions for performing a task. These formats include the use of text, graphics, video, aural, photographs, used singly or in combination (Kawin, 1992; Hills, 1984; Newton, 1990; Bailey, 1996). As part of research at the Multimedia Systems Research Laboratory to identify a syntax for the use of multimedia elements, an experiment was conducted to determine whether the use text or video representations of task instructions was more effective at communicating task instructions (Norris, 1996). This paper reports on the outcome of that study. The repair and assembly environment of a local whiteware manufacturer provided the study domain. The task chosen for the study was the replacement of a heating element in a cooktop oven. As there were no task instructions available from the manufacturer, the study was conducted in two phases: Phase I was a cognitive task analysis of service technicians to determine the steps as well as the cues and considerations of the assembly task; and in Phase II we evaluated the text and video representations of the task instructions. The next sections briefly describe the methodology and the results from the experiment.}, Address = {Dunedin, New Zealand}, Author = {Brian E. Norris and William B.L. Wong}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:11 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {May}, Number = {97/03}, Size = {44 KB}, Title = {Supporting task performance: {I}s text or video better?}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1999-21, Abstract = {This paper reports on an investigation into wayfinding principles, and their effectiveness within a virtual environment. To investigate these principles, a virtual environment of an actual museum was created using QuickTime Virtual Reality. Wayfinding principles used in the real world were identified and used to design the interaction of the virtual environment. The initial findings suggests that real-world navigation principles, such as the use of map and landmark principles, can significantly help navigation within this virtual environment. However, navigation difficulties were discovered through an Activity Theory-based Cognitive Task Analysis.}, Address = {Dunedin, New Zealand}, Author = {Brian Norris and Da'oud Rashid and William Wong}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:48:32 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {wayfinding, navigation, QTVR, virtual environments, activity theory}, Month = {September}, Number = {99/21}, Title = {Wayfinding/navigation within a {QTVR} virtual environment: {P}reliminary results}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-10, Abstract = {More and more medical information is appearing on the Internet, but it is not easy to get at the nuggets amongst all the spoil. Bruce McKenzie's editorial in the December 1997 edition of SIM Quarterly dealt very well with the problems of quality, but I would suggest that the problem of accessibility is as much of a challenge. As ever-greater quantities of high quality medical information are published electronically, the need to be able to find it becomes imperative. There are a number of tools to find what you want on the Internet---search engines, agents, indexing and classification schemes and hyperlinks, but their use requires care, skill and experience.}, Address = {Dunedin, New Zealand}, Author = {David Parry}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:12 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {99/10}, Size = {196 KB}, Title = {Finding medical information on the {I}nternet: {W}ho should do it and what should they know}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-27, Abstract = {Since July 1998 we have been teaching an Internet-based distance learning course in health informatics (http://basil.otago.ac.nz:800). The development of this course and the experiences we have had running it are described in this paper. The course was delivered using paper materials, a face-to-face workshop, a CD-ROM and Internet communication tools. We currently have about 30 students around New Zealand, a mixture of physicians, nurses and other health staff. Some teaching methods have worked, some haven't, but in the process we have learned a number of valuable lessons.}, Address = {Dunedin, New Zealand}, Author = {David Parry and Alice Breton and David Abernethy and Sophie Cockcroft and John Gillies}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:49:10 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {distance learning, healthcare, Internet, CD-ROM}, Month = {December}, Number = {99/27}, Size = {80 KB}, Title = {Using the {I}nternet to teach health informatics}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-24, Abstract = {Since 1997 the authors have been involved in the development of a distance learning course in health informatics. The course is delivered via CD-ROM and the Internet. During this process we have learned valuable lessons about computer-assisted collaboration and cooperative work. In particular we have developed methods of using the software tools available for communication and education. We believe that electronic distance learning offers a realistic means of providing education in health informatics and other fields to students whom for reasons of geography or work commitments would not be able to participate in a conventional course.}, Address = {Dunedin, New Zealand}, Author = {David Parry and Sophie Cockcroft and Alice Breton and David Abernethy and John Gillies}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:12 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {99/24}, Size = {496 KB}, Title = {The development of an electronic distance learning course in health informatics}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1999-26, Abstract = {The rate of induction of labour (IOL) is increasing, despite no obvious increase in the incidence of the major indications. However the rate varies widely between different centres and practitioners and this does not seem to be due to variations in patient populations. The IOL decision-making process of six clinicians was recorded and examined using hypothetical scenarios presented on a computer. Several rules were identified from a rough sets analysis of the data. These rules were compared to the actual practise of these clinicians in 1994 Initial tests of these rules show that they may form a suitable set for developing an expert system for the induction of labour.}, Address = {Dunedin, New Zealand}, Author = {David Parry and Wai Kiang Yeap and Neil Pattison}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:12 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {rough sets, obstetrics, knowledge acquisition}, Month = {December}, Number = {99/26}, Size = {108 KB}, Title = {Using rough sets to study expert behaviour in induction of labour}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1998-06, Abstract = {The aim of the study was to compare the 2 management protocols for postterm pregnancy; elective induction of labour at 42 weeks' gestation and continuing the pregnancy with fetal monitoring while awaiting spontaneous labour. A retrospective observational study compared a cohort of 360 pregnancies where labour was induced with 486 controls. All pregnancies were postterm (>294 days) by an early ultrasound scan. Induction of labour was achieved with either prostaglandin vaginal pessaries or gel or forewater rupture and Syntocinon infusion. The control group consisted of women with postterm pregnancies who were not induced routinely and who usually had twice weekly fetal assessment with cardiotocography and/or ultrasound. Women who had their labour induced differed from those who awaited spontaneous labour. Nulliparas (OR 1.54; 95% CI 1.24-1.83) and married women (OR 1.76; 95% CI 1.45-2.06) were more likely to have their labour induced. There was no association between the type of caregiver and induction of labour. Induction of labour was associated with a reduction in the incidence of normal vaginal delivery (OR 0.63, 95% CI 0.43-0.92) and an increased incidence of operative vaginal delivery (OR 1.46; 95% CI 1.34-2.01). There was no difference in the overall rate of Caesarean section. There was no difference in fetal or neonatal outcomes. Parity had a major influence on delivery outcomes from a policy of induction of labour. Nulliparas in the induced group had worse outcomes with only 43% achieving a normal vaginal delivery (OR 0.78, 95% CI 0.65-0.95). In contrast for multiparas, the induced group had better outcomes with less Caesarean sections (OR 0.88, 95% CI 0.81-0.96). This retrospective observational study of current clinical practice shows that induction of labour for postterm pregnancy appears to be favoured by nulliparous married women. It suggests that induction of labour may improve delivery outcomes for multigravas but has an adverse effect for nulliparas.}, Address = {Dunedin, New Zealand}, Author = {Emma Parry and David Parry and Neil Pattison}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:13 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {98/06}, Title = {Induction of labour for post term pregnancy: {A}n observational study}, Type = {Discussion paper}, Year = {1998}} @techreport{dp1996-25h, Abstract = {Sharing geographical data sets is highly desirable for economical and technical reasons. In this paper the author describes the development of an agency for sharing geographical data which is based on the use of the ISODE implementation of the X.500 Directory Service and a collection of software agents which collaborate with each other to perform the various task associated with sharing data.}, Address = {Dunedin, New Zealand}, Author = {Richard T. Pascoe}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 17:10:37 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Note = {(Not in electronic version.)}, Number = {96/25h}, Title = {Data sharing using the {X}.500 directory}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1996-16, Abstract = {In this paper is discussed the preliminary development of a Diploma in Medical Informatics which will comprise courses offered entirely through the Internet in the form of World Wide Web documents and electronic mail. Proposed use of such educational technology for the delivery of these courses within a distance learning environment is based upon a conversational framework developed by Laurillard (1993) and an associated classification of this technology according to the length to which elements within the conversational framework is supported.}, Address = {Dunedin, New Zealand}, Author = {Richard T. Pascoe and David Abernathy}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:41:59 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {Diploma in Medical Informatics, World Wide Web (WWW), distance learning, educational technology}, Month = {September}, Number = {96/16}, Title = {Teaching a diploma in medical informatics using the {W}orld {W}ide {W}eb}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1995-01, Abstract = {This paper develops an approach to data modelling for the design of hypermedia databases. First, the use of data modelling for the design of hypermedia database systems is investigated. A specific example, that of a car parts database, is used as a means of illustrating a generic problem, namely the difficulty associated with interrogating a large database when the exact data element being sought is unknown. The use of hypermedia as a basis for data retrieval in such situations is then discussed. The data contained within hypermedia database systems is typically unstructured, which has led to systems being developed using ad hoc design approaches with little regard for formal data modelling techniques. Hence, the main contribution of the paper is the illustration of a hybrid data modelling approach of suitable semantic richness to capture the complexities of hypermedia databases.}, Address = {Dunedin, New Zealand}, Author = {Russell J. Pegler and Peter G. Firns}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:13 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {95/1}, Title = {Semantic data modelling for hypermedia database applications}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1999-17, Abstract = {This paper describes an architecture for building distributed information systems from existing information resources, based on distributed object and software agent technologies. This architecture is being developed as part of the New Zealand Distributed Information Systems (NZDIS) project. An agent-based architecture is used: information sources are encapsulated as information agents that accept messages in an agent communication language (the FIPA ACL). A user agent assists users to browse ontologies appropriate to their domain of interest and to construct queries based on terms from one or more ontologies. One or more query processing agents are then responsible for discovering (from a resource broker agent) which data source agents are relevant to the query, decomposing the query into subqueries suitable for those agents (including the translation of the query into the specific ontologies implemented by those agents), executing the subqueries and translating and combining the subquery results into the desired result set. Novel features of this system include the use of standards from the object-oriented community such as the Common Object Request Broker Architecture (CORBA) (as a communications infrastructure), the Unified Modeling Language (used as an ontology representation language), the Object Data Management Group's Object Query Language (used for queries) and the Object Management Group's Meta Object Facility (used as the basis for an ontology repository agent). Query results need not be returned within an ACL message, but may instead be represented by a CORBA object reference which may be used to obtain the result set.}, Address = {Dunedin, New Zealand}, Author = {Martin K. Purvis and Stephen Cranefield and Geoff Bush and Dan Carter and Bryce McKinlay and Mariusz Nowostawski and Roy Ward}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:14 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {August}, Number = {99/17}, Size = {212 KB}, Title = {The {NZDIS} project: {A}n agent-based distributed information systems architecture}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1995-08, Abstract = {With the increasing size, complexity and interconnectedness of systems and organisations, there is a growing need for high level modelling approaches that span the range of application domains. Causal agent modelling offers an intuitive and powerful approach for the development of dynamic models for any application area. This paper outlines some of the basic ideas behind the nature of causal agent models, why they are fundamental to the modelling enterprise, and compares developments in this area to those in the related field of coordination theory. It also describes some research activities using causal agent models at the University of Otago.}, Address = {Dunedin, New Zealand}, Author = {Martin K. Purvis and Stephen J.S. Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:16:48 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {August}, Number = {95/8}, Size = {180 KB}, Title = {Causal agent modelling: {A} unifying paradigm for systems and organisations}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1996-04, Abstract = {The use of intelligent software agents is a modelling paradigm that is gaining increasing attention in the applications of distributed systems. This paper identifies essential characteristics of agents and shows how they can be mapped into a coloured Petri net representation so that the coordination of activities both within agents and between interacting agents can be visualised and analysed. The detailed structure and behaviour of an individual agent in terms of coloured Petri nets is presented, as well as a description of how such agents interact. A key notion is that the essential functional components of an agent are explicitly represented by means of coloured Petri net constructs in this representation.}, Address = {Dunedin, New Zealand}, Author = {Martin K. Purvis and Stephen J.S. Cranefield}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:39:00 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {96/04}, Title = {Agent modelling with {P}etri nets}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1999-06, Abstract = {The increasing availability and variety of large environmental data sets is opening new opportunities for data mining and useful cross-referencing of disparate environmental data sets distributed over a network. In order to take advantage of these opportunities, environmental information systems will need to operate effectively in a distributed, open environment. In this paper, we describe the New Zealand Distributed Information System (NZDIS) software architecture for environmental information systems. In order to optimise extensibility, openness, and flexible query processing, the architecture is organised into collaborating software agents that communicate by means of a standard declarative agent communication language. The metadata of environmental data sources are stored as part of agent ontologies, which represent information models of the domain of the data repository. The agents and the associated ontological framework are designed as much as possible to take advantage of standard object-oriented technology, such as CORBA, UML, and OQL, in order to enhance the openness and accessibility of the system.}, Address = {Dunedin, New Zealand}, Author = {Martin K. Purvis and Stephen Cranefield and Mariusz Nowostawski}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:14 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {April}, Number = {99/06}, Size = {208 Kb}, Title = {A distributed architecture for environmental information systems}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1998-10, Abstract = {This paper describes combined approaches of data preparation, neural network analysis, and fuzzy inferencing techniques (which we collectively call neuro-fuzzy engineering) to the problem of environmental modelling. The overall neuro-fuzzy architecture is presented, and specific issues associated with environmental modelling are discussed. A case study that shows how these techniques can be combined is presented for illustration. We also describe our current software implementation that incorporates neuro-fuzzy analytical tools into commercially available geographical information system software.}, Address = {Dunedin, New Zealand}, Author = {Martin K. Purvis and Nikola Kasabov and George Benwell and Qingqing Zhou and Feng Zhang}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-11 09:58:29 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {98/10}, Size = {384 KB}, Title = {Neuro-fuzzy methods for environmental modelling}, Type = {Discussion paper}, Year = {1998}} @techreport{dp1996-14, Abstract = {A novel connectionist architecture that differs from conventional architectures based on the neuroanatomy of biological organisms is described. The proposed scheme is based on the model of multilayered optical thin-films, with the thicknesses of the individual thin-film layers serving as adjustable `weights' for the training. A discussion of training techniques for this model and some sample simulation calculations in the area of pattern recognition are presented. These results are shown to compare with results when the same training data are used in connection with a feed-forward neural network with back propagation training. A physical realization of this architecture could largely take advantage of existing optical thin-film deposition technology.}, Address = {Dunedin, New Zealand}, Author = {Martin K. Purvis and Xiaodong Li}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:15 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {August}, Number = {96/14}, Title = {A connectionist computational architecture based on an optical thin-film model}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1994-07, Abstract = {Almost by definition, any engineering discipline has quantitative measurement at its foundation. In adopting an engineering approach to software development, the establishment and use of software metrics has therefore seen extensive discussion. The degree to which metrics are actually used, however, particularly in New Zealand, is unclear. Four surveys, conducted over the last eight years, are therefore reviewed in this paper, with a view to determining trends in the use of metrics. According to the findings presented, it would appear that no more than one third of organisations involved in software development utilise software metrics.}, Address = {Dunedin, New Zealand}, Author = {Martin K. Purvis and Stephen G. MacDonell and Jason Westland}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:20:57 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {94/7}, Title = {Software metrics in {N}ew {Z}ealand: {R}ecent trends}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1995-10, Abstract = {A single piece of legislation, the Resource Management Act, governs the management of environmental resources in New Zealand. It establishes procedural requirements and time constraints for all decision-making activities related to governmental environmental management. The present paper describes a model, based on coloured Petri nets, that is under development to facilitate understanding of the Act and to examine performance characteristics of legal processes defined in the Act.}, Address = {Dunedin, New Zealand}, Author = {Martin K. Purvis and Maryam A. Purvis and George L. Benwell}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:37:22 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {August}, Number = {95/10}, Title = {Modelling and simulation of the {N}ew {Z}ealand {R}esource {M}anagement {A}ct}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1993-01, Abstract = {Despite the many qualitative elements of software time-to-build estimating, some observable features can be quantified; even if the resulting set of variables observed is arbitrary. Such is the case when estimating the expected duration for database re-engineering. If we assume that for any extant database, an entity-relationship model (ERM) can be produced from which a new normalised schema is generated, then our estimating task needs to quantify both the complexity of the ensuing ERM and also the data modelling knowledge of the `re-engineer'. Whilst there may be additional variables to be considered, a set of primary elements required for estimating the durations of the task have been identified. The formula proposed in this paper is arbitrary but it is intended as an instrument for measuring ER model complexity, such that time-to-build estimates can be made for the task of re-engineering extant non-relational databases into relational form.}, Address = {Dunedin, New Zealand}, Author = {Philip J. Sallis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:16 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {November}, Number = {93/1}, Size = {773 KB}, Title = {A data complexity formula for deriving time-to-build estimates from non-relational to relational databases}, Type = {Discussion paper}, Year = {1993}} @techreport{dp1994-01, Abstract = {This paper is the result of some research in computational stylistics; in particular, the analysis of a document corpus that has attracted the attention of scholars from several disciplines for hundreds of years. This corpus, the Epistles of Saint Ignatius of Antioch, was originally written in Greek but this analysis is of a single translation in English. The analysis has been undertaken using a conventional approach in computational stylistics but has employed a number of contemporary software packages, such as a grammar checker, normally used for text and document creation. Research in this field predominantly characterises authorship style by the use of document statistics, such as word frequency, sentence and paragraph length and in some cases the recurrence of certain phrases. During the research described here it was considered appropriate to use a grammar checker to identify the existence of a `new' set of characteristics. These include comparing the use of passive voice across the corpus being analysed, the percentage use of prepositions, as well as document statistics such as sentence and paragraph length, and the application of text readability formulas as indicators of writing style. The corpus analysed in this paper consists of the seven Epistles of Ignatius of Antioch, together with the Epistle of Polycarp to the Philippians. The latter epistle has traditionally been held to authenticate the Ignatian writings. It has been suggested by some church historians that Ignatius was not the author of these epistles and may not in fact, have existed as a person at all. Further, they suggest that two paragraphs in the Polycarp Epistle may have been added later by a second author to authenticate the Ignatian corpus. In order to contribute to the ongoing debate, this paper first examines the Ignatian corpus in order to determine single authorship of the seven epistles. Second, it seeks to determine whether or not the two disputed paragraphs in Polycarp's Epistle to the Philippians vary in authorship style from the rest of that epistle. Third, it compare authorship style in the two inserted paragraphs of Polycarp's Epistle with that of the Ignatian corpus in order to make some observations on the hypothesis that a single author was responsible for both.}, Address = {Dunedin, New Zealand}, Author = {Philip J. Sallis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:20:07 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {January}, Number = {94/1}, Size = {885 KB}, Title = {A comparison of authorship style in the document corpus of the {E}pistles of {S}t.\ {I}gnatius of {A}ntioch}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1994-02, Abstract = {Whilst change is an inherent characteristic of the IT industry, the difficulty of frequent and timely change in tertiary curricula is a constraint on the ability of universities to adequately meet the requirements of knowledge and expertise expected of new graduates. In this paper, some recently published research concerning the top ten issues for managers of information technology in the USA, Europe and Australia is evaluated in terms of its impact on IS teaching and research. The paper concludes that the top ten issues perceived by IS managers was probably in large part due to change resulting not only from advances in technology but also in response to past failures or inadequacies in the process of delivering high quality information system products to corporate consumers. The need for business and education to be aware of the motivations for change and the constraints that are attendant on it in both environments is emphasised for harmonious progress to prevail in the production and utilisation of new IS graduates.}, Address = {Dunedin, New Zealand}, Author = {Philip J. Sallis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:19:50 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {January}, Number = {94/2}, Size = {599 KB}, Title = {Management perceptions of {IS} research and development issues}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1997-13, Address = {Dunedin, New Zealand}, Author = {Philip J. Sallis and Diana A. Kassabova}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-11 09:53:34 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {97/13}, Title = {Computer-mediated communication: {E}xperiments with e-mail readability}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1997-11, Abstract = {The paper explores building profiles of Newsgroups from a corpus of Usenet E-mail messages employing some standard statistical techniques as well as fuzzy clustering methods. A large set of data from a number of Newsgroups has been analysed to elicit some text attributes, such as number of words, length of sentences and other stylistic characteristics. Readability scores have also been obtained by using recognised assessment methods. These text attributes were used for building Newsgroups' profiles. Three newsgroups, each with similar number of messages were selected from the processed sample for the analysis of two types of one-dimensional profiles, one by length of texts and the second by readability scores. Those profiles are compared with corresponding profiles of the whole sample and also with those of a group of frequent participants in the newsgroups. Fuzzy clustering is used for creating two-dimensional profiles of the same groups. An attempt is made to identify the newsgroups by defining centres of data clusters. It is contended that this approach to Newsgroup profile analysis could facilitate a better understanding of computer-mediated communication (CMC) on the Usenet, which is a growing medium of informal business and personal correspondence.}, Address = {Dunedin, New Zealand}, Author = {Philip J. Sallis and Diana A. Kassabova}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:17 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {97/11}, Title = {Usenet newsgroups' profile analysis utilising standard and non-standard statistical methods}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1995-02, Abstract = {In September 1994, the government of New Zealand published a document entitled Education for the 21st Century. The document sets out targets and challenges for the education system in New Zealand to meet by 2001. One of the targets, and the associated fiscal challenge, is to improve the access of New Zealand students to information technology, so that by 2001 there is at least one computer for every five students at all levels of school education. This bold policy statement follows a chain of reports and government initiatives extending over approximately 15 years. This paper describes government policy initiatives, the reports which gave rise to them, and the changes in curriculum, teacher, and classroom practice which have taken place since computers were first used in New Zealand classrooms in the 1970s. The short history of educational computing in New Zealand has spanned a period of massive political and economic reform, and enormous structural change in the education system. The authors have been observers of, and contributors to, aspects of the development of New Zealand's use of information technology in education over the whole of this period.}, Address = {Dunedin, New Zealand}, Author = {Philip J. Sallis and Tim McMahon}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:17 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {April}, Number = {95/2}, Title = {Pursuing a national policy for information technology in school education: {A} {N}ew {Z}ealand odyssey}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1999-23, Abstract = {Smith's method (Smith, 1985) is a formal technique for deriving a set of normalised relations from a functional dependency diagram (FDD). Smith's original rules for deriving these relations are incomplete, as they do not fully address the issue of determining the foreign key links between relations. In addition, one of the rules for deriving foreign keys can produce incorrect results, while the other rule is difficult to automate. In this paper are described solutions these issues.}, Address = {Dunedin, New Zealand}, Author = {Nigel Stanger}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2010-11-10 12:06:17 +1300}, Institution = {Department of Information Science, University of Otago}, Keywords = {normalisation, functional dependencies, relational model, data model translation}, Month = {December}, Number = {99/23}, Size = {184 KB}, Title = {Modifications to {S}mith's method for deriving normalised relations from a functional dependency diagram}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1997-07, Abstract = {Modelling the structure of data is an important part of any system analysis project. One problem that can arise is that there may be many differing viewpoints among the various groups that are involved in a project. Each of these viewpoints describes a perspective on the phenomenon being modelled. In this paper, we focus on the representation of developer viewpoints, and in particular on how multiple viewpoint representations may be used for database design. We examine the issues that arise when transforming between different viewpoint representations, and describe an architecture for implementing a database design environment based on these concepts.}, Address = {Dunedin, New Zealand}, Author = {Nigel J. Stanger and Richard T. Pascoe}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:18 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {97/07}, Size = {232 KB}, Title = {Environments for viewpoint representations}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1997-08, Abstract = {In this paper, we describe the implementation of a database design environment (Swift) that incorporates several novel features: Swift's data modelling approach is derived from viewpoint-oriented methods; Swift is implemented in Java, which allows us to easily construct a client/server based environment; the repository is implemented using PostgreSQL, which allows us to store the actual application code in the database; and the combination of Java and PostgreSQL reduces the impedance mismatch between the application and the repository.}, Address = {Dunedin, New Zealand}, Author = {Nigel J. Stanger and Richard T. Pascoe}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:18 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {97/08}, Size = {108 KB}, Title = {Exploiting the advantages of object-oriented programming in the implementation of a database design environment}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1996-25, Abstract = {A collection of papers authored by members of the Information Science department and presented at the 1st International Conference on GeoComputation, Leeds, United Kingdom.}, Address = {Dunedin, New Zealand}, Author = {{Multiple authors}}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:48:51 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {96/25}, Size = {2.9 MB}, Title = {Special issue: {G}eo{C}omputation '96}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1994-19, Abstract = {The facility to be able to display features of speech in a visual speech aid does not by itself guarantee that the aid will be effective in speech therapy. An effective visual speech aid must provide a visual representation of an utterance from which a judgement on the ``goodness'' of the utterance can be made. Two things are required for an aid to be effective. Firstly, the clusters of acceptable utterances must be separate from the unacceptable utterances in display space. Secondly, the acoustic features which distinguish acceptable utterances from unacceptable utterances must be evident in the displays of the speech aid. A two part test, called the Visual Display Test (VDT), has been developed to assess a visual speech aid's capacity to fulfil these requirements.}, Address = {Dunedin, New Zealand}, Author = {Catherine I. Watson}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:18 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {November}, Number = {94/19}, Size = {257 KB}, Title = {The visual display test: {A} test to assess the usefulness of a visual speech aid}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1999-22, Abstract = {Building predictive time series models for freshwater systems is important both for understanding the dynamics of these natural systems and in the development of decision support and management software. This work describes the application of a machine learning technique, namely genetic programming (GP), to the prediction of chlorophyll-a. The system endeavoured to evolve several mathematical time series equations, based on limnological and climate variables, which could predict the dynamics of chlorophyll-a on unseen data. The predictive accuracy of the genetic programming approach was compared with an artificial neural network and a deterministic algal growth model. The GP system evolved some solutions which were improvements over the neural network and showed that the transparent nature of the solutions may allow inferences about underlying processes to be made. This work demonstrates that non-linear processes in natural systems may be successfully modelled through the use of machine learning techniques. Further, it shows that genetic programming may be used as a tool for exploring the driving processes underlying freshwater system dynamics.}, Address = {Dunedin, New Zealand}, Author = {Peter Whigham and Friedrich Recknagel}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:19 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {September}, Number = {99/22}, Size = {264 KB}, Title = {Predictive modelling of plankton dynamics in freshwater lakes using genetic programming}, Type = {Discussion paper}, Year = {1999}} @techreport{dp1996-25i, Abstract = {Geographic information systems are an important tool for the field of geocomputing. A key component of every system is the data---spatial data has traditionally been labour-intensive to collect, and hence expensive. This paper establishes a new method of acquiring spatial data from motion video. The proposed method is based upon the principles of photogrammetry, but allows position to be calculated with feature tracking rather than point correspondence. By doing so, it avoids many constraints imposed by previous solutions. The new method is demonstrated with linear and rotational motion.}, Address = {Dunedin, New Zealand}, Author = {Mark Williams}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:37:20 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {96/25i}, Size = {808 KB}, Title = {Spatial data acquisition from motion video}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1994-09, Abstract = {It is virtually impossible to know everything about any facet of computing as it changes on almost a daily basis. Having said that I believe that it is worth sharing some of the knowledge that I have gained as a result of 5 years of study and experimentation with viruses and virus defense strategies as well as having personally tested nearly 50 anti-virus products.}, Address = {Dunedin, New Zealand}, Author = {Henry B. Wolfe}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:21:06 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {94/9}, Title = {Viruses: {W}hat can we really do?}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1996-12, Address = {Dunedin, New Zealand}, Author = {Henry B. Wolfe}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:55:13 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {96/12}, Size = {72 KB}, Title = {Information warfare: {W}here are the threats?}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1996-10, Abstract = {Cryptography is the art or science, depending on how you look at it, of keeping messages secure. It has been around for a couple of thousand years in various forms. The Spartan Lysander and even Caesar made use of cryptography in some of their communications. Others in history include Roger Bacon, Edgar Allan Poe, Geoffrey Chaucer, and many more. By today's standards cryptographic techniques, through the ages right up to the end of World War I, have been pretty primitive. With the development of the electro-mechanical devices cryptography came of age. The subsequent evolution of the computer has raised the level of security that cryptography can provide in communications and data storage.}, Address = {Dunedin, New Zealand}, Author = {Henry B. Wolfe}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:20 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {96/10}, Title = {Politics and techniques of data encryption}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1996-11, Abstract = {In today's world most businesses, large and small, depend on their computer(s) to provide vital functions consistently and without interruption. In many organizations the loss of the computer function could mean the difference between continued operation and shutdown. Reliability and continuity, therefore, become the critical aspect of any computer system(s) currently in use. This paper attempts to describe some of the most important issues any organization should address in order to reduce their risk where it relates to computer related failure.}, Address = {Dunedin, New Zealand}, Author = {Henry B. Wolfe}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:20 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {96/11}, Title = {Reasonable security safeguards for small to medium organisations}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1997-09, Abstract = {Privacy is one of the most fundamental of human rights. It is not a privilege granted by some authority or state. It is, in fact, necessary for each human being's normal development and survival. Those nations who have, in the past, and currently follow the notion that they have the authority and/or moral high ground to grant or deny privacy to their citizens are notable for their other human rights violations. This paper is centered around the above premise and will offer the reader some good news and some bad news. But most important, it will put the reader on notice that our privacy is constantly under attack from one vested interest or another and that each and every one of us must be vigilant in the protection of our private matters. It is common in New Zealand to assume that anything secret is bad. This is an extremely na{\"\i}ve position to take for any intelligent individual. The old phrase ``if you haven't got anything to hide, then you shouldn't mind{\ldots}'' is often used to intimidate, manipulate or coerce an individual to ``confess'' or share information that he/she initially believes to be confidential, private or otherwise not for sharing with others. Secrecy is not bad nor good in and of itself. It is merely a factual description of the condition of some information. Now for some good news. There are a number of technological devices and procedures that can be used to enhance one's privacy. The bad news is that most, if not all, can be easily defeated with other technological advances.}, Address = {Dunedin, New Zealand}, Author = {Henry B. Wolfe}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:20 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {July}, Number = {97/09}, Title = {Privacy enhancing technology}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1998-02, Abstract = {Electronic security in this day and age covers a wide variety of techniques. One of the most important areas that must be addressed is that of commerce on the Internet. The Internet is an insecure medium to say the least. Every message sent must pass through many computers that are most likely controlled by unrelated and untrusted organizations before it ultimately reaches the final destination. At any one of these relays the information within the message can be scrutinized, analyzed and/or copied for later reference. There are documented and suspected instances of surveillance of Internet traffic. It has been suggested that several of the major communication switches (through which 90% or more of Internet traffic must pass) have permanent surveillance in place. Another insidious but less obvious fact about Internet use is that messages once sent, are not discarded nor do they disappear forever. Usually, at one or more relays, copies of messages are archived and kept for differing time periods. Most ordinary users are not aware that messages sent six months ago may be able to be retrieved. That fact could have serious legal ramifications for the sender. At this time cryptography is really the only effective method that can be used to protect Internet transactions and communications from unauthorized interception. Unauthorized means anyone who you have not expressly given permission to read your private communications. Cryptography is the art or science of hidden writing. Plain text (your message in readable form) is modified using an algorithm (like a mathematical equation) that requires at least one special variable (your special private key that no one else knows) to create ciphered text (your message in unreadable form). At the destination the person who the message is meant for must have the ``special key'' in order to be able to unlock the ciphered message. All encryption is not created equal nor does it necessarily provide equivalent security. It would be wrong to intimate that merely using ``encryption'' to protect your communication is enough. There are other factors at work here as well and they have to do with the politics of privacy. I have often heard it said in New Zealand that ``if you have nothing to hide then it shouldn't matter who reads your communications''. Of course, that opinion is na{\"\i}ve and does not represent reality in any meaningful way.}, Address = {Dunedin, New Zealand}, Author = {Henry B. Wolfe}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:46:29 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {March}, Number = {98/02}, Title = {Electronic security}, Type = {Discussion paper}, Year = {1998}} @techreport{dp1994-13, Abstract = {This paper reviews the research and practice of how computer-based output information has been presented in nine different information display formats and the suitability of their use in environments ranging from static, reference-type situations, to complex, dynamic situations. The review while not generating conclusive results suggests that displays are more than a platform to place information. Instead care should be taken to organise, lay out, and pre-process the information so that it enhances the communication between computer and human. The information on the screen should also be designed to augment human cognitive limitations. For instance, human weakness in integrating information across time and multiple sources could be assisted by display formats that integrate the information in the display rather than having the user attempt to integrate that information mentally. If this be the desired outcome, information designers must start to consider performing analyses that help them understand the demands on the human information processing system and hence how information can be presented to augment this weakness. This would have to be further investigated in subsequent research.}, Address = {Dunedin, New Zealand}, Author = {William B.L. Wong}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 15:21:25 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {information display design, output information, visual display formats,}, Month = {July}, Number = {94/13}, Size = {1.6 MB}, Title = {Information display design: {A} survey of visual display formats}, Type = {Discussion paper}, Year = {1994}} @techreport{dp1997-12, Address = {Dunedin, New Zealand}, Author = {William B.L. Wong}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-11 09:53:29 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {97/12}, Size = {60 KB}, Title = {The ecological approach to interface design in intentional domains}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1996-07, Abstract = {The purpose of this paper is to report on an experiment conducted to evaluate the feasibility of an empirical approach for translating a cognitive schema into a display structure. This experiment is part of a series of investigations aimed at determining how information about dynamic environments should be portrayed to facilitate decision making. Studies to date have generally derived an information display organisation that is largely based on a designer's experience, intuition and understanding of the processes. In this study we report on how we attempted to formalise this design process so that if the procedures were adopted, other less experienced designers would still be able to objectively formulate a display organisation that is just as effective. This study is based on the first stage of the emergency dispatch management process, the call-taking stage. The participants in the study were ambulance dispatch officers from the Dunedin-based Southern Regional Communications Centre of the St. John's Ambulance Service in New Zealand.}, Address = {Dunedin, New Zealand}, Author = {William B.L. Wong and David P. O'Hare and Philip J. Sallis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:21 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {96/07}, Title = {Experimental transformation of a cognitive schema into a display structure}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1996-18, Abstract = {This paper reports on how the Critical Decision Method, a cognitive task analysis technique, was employed to identify the goal states of tasks performed by dispatchers in a dynamic environment, the Sydney Ambulance Co-ordination Centre. The analysis identified five goal states: Notification; Situation awareness; Planning resource to task compatibility; Speedy response; Maintain history of developments. These goals were then used to guide the development of display concepts that support decision strategies invoked by dispatchers in this task environment.}, Address = {Dunedin, New Zealand}, Author = {William B.L. Wong and David P. O'Hare and Philip J. Sallis}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:40:28 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {critical decision method (CDM), cognitive task analysis, cognitive engineering, ambulance dispatch, command and control, information portrayal, display design, decision support}, Month = {September}, Number = {96/18}, Size = {148 KB}, Title = {A goal-oriented approach for designing decision support displays in dynamic environments}, Type = {Discussion paper}, Year = {1996}} @techreport{dp1995-06, Abstract = {This paper reports on preliminary findings of a cognitive task analysis conducted at an ambulance despatch control center. The intense and dynamic nature of the decision making environment is first described, and the decision process modelled in an attempt to identify decision strategies used by the Communications Officers. Some information portrayal requirements stemming from one of the decision processes are then discussed, and these requirements are then translated into a proposed display solution.}, Address = {Dunedin, New Zealand}, Author = {William B.L. Wong and Philip J. Sallis and David P. O'Hare}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-10 16:16:29 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {information portrayal, information display design, decision support design, decision modelling, naturalistic decision making, critical decision method, ambulance service}, Month = {July}, Number = {95/6}, Size = {244 KB}, Title = {Information portrayal for decision support in dynamic intentional process environments}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1995-03, Abstract = {It is increasingly recognised that the manner in which information required by a decision maker is portrayed is as important as providing appropriate information. In dynamic intentional process environments such as emergency dispatch control, where the problems are non-trivial and time is tightly constrained, it is important to portray information that is used together, close to one another or appropriately integrated. This is important in speeding up the decision maker's interpretation of the information and assessment of the state of the situation. But how should information be portrayed so that the information may be assimilated quickly in such situations? To answer this question, a framework for analysis was developed to guide the investigation. This framework brings together the decisions made, the information used, the source and accessibility of the source, and how the information is used in each decision, thereby identifying the information portrayal requirements. This framework will be presented in this paper. However, before discussing the framework, it is necessary to introduce the concept of decision making in naturalistic environments as it is within this context of dynamic decision making that the problem of information portrayal is studied. The paper will examine the characteristics of dynamic intentional processes, and then briefly describe the environment of one example of an intentional process environment, an emergency control center, that formed the basis of the study. The cognitive task analysis techniques use to elicit the decision processes and the information portrayal requirements will also be described, and then finally, the initial results of the study will be presented.}, Address = {Dunedin, New Zealand}, Author = {William B.L. Wong and Philip J. Sallis and David P. O'Hare}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:22 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {June}, Number = {95/3}, Title = {Information portrayal for intentional processes: {A} framework for analysis}, Type = {Discussion paper}, Year = {1995}} @techreport{dp1997-04, Abstract = {This study is part of research that is investigating the notion that human performance in dynamic and intentional decision making environments, such as ambulance dispatch management, can be improved if information is portrayed in a manner that supports the decision strategies invoked to achieve the goal states of the process being controlled. Hence, in designing interfaces to support real-time dispatch management decisions, it is suggested that it would be necessary to first discover the goal states and the decision strategies invoked during the process, and then portray the required information in a manner that supports such a user group's decision making goals and strategies. The purpose of this paper is to report on the experiences gleaned from the use of a cognitive task analysis technique called Critical Decision Method as an elicitation technique for determining information portrayal requirements. This paper firstly describes how the technique was used in a study to identify the goal states and decision strategies invoked during the dispatch of ambulances at the Sydney Ambulance Co-ordination Centre. The paper then describes how the interview data was analysed within and between cases in order to reveal the goal states of the ambulance dispatchers. A brief description of the resulting goal states follows, although a more detailed description of the goals states and their resulting display concepts has been reported elsewhere (Wong et al., 1996b). Finally, the paper concludes with a set of observations and lessons learnt from the use of the Critical Decision Method for developing display design concepts in dynamic intentional environments.}, Address = {Dunedin, New Zealand}, Author = {William B.L. Wong and Philip J. Sallis and David P. O'Hare}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:23 +1200}, Institution = {Department of Information Science, University of Otago}, Keywords = {display design, cognitive task analysis, Critical Decision Method, ambulance dispatch management}, Month = {May}, Number = {97/04}, Size = {100 KB}, Title = {Eliciting information portrayal requirements: {E}xperiences with the critical decision method}, Type = {Discussion paper}, Year = {1997}} @techreport{dp1997-15, Abstract = {Fuzzy neural networks provide for the extraction of fuzzy rules from artificial neural network architectures. In this paper we describe a general method, based on statistical analysis of the training data, for the selection of fuzzy membership functions to be used in connection with fuzzy neural networks. The technique is first described and then illustrated by means of two experimental examinations.}, Address = {Dunedin, New Zealand}, Author = {Qingqing Zhou and Martin K. Purvis and Nikola K. Kasabov}, Date-Added = {2009-06-08 14:20:00 +1200}, Date-Modified = {2009-06-09 16:59:23 +1200}, Institution = {Department of Information Science, University of Otago}, Month = {December}, Number = {97/15}, Size = {172 KB}, Title = {A membership function selection method for fuzzy neural networks}, Type = {Discussion paper}, Year = {1997}}
Show line notes below